Tianshou/test/continuous/test_sac_with_il.py
Maximilian Huettenrauch 49c750fb09 update tests
2024-04-24 17:06:59 +02:00

211 lines
8.0 KiB
Python

import argparse
import os
import gymnasium as gym
import numpy as np
import torch
from torch.utils.tensorboard import SummaryWriter
from tianshou.data import Collector, VectorReplayBuffer
from tianshou.env import DummyVectorEnv
from tianshou.policy import ImitationPolicy, SACPolicy
from tianshou.policy.base import BasePolicy
from tianshou.trainer import OffpolicyTrainer
from tianshou.utils import TensorboardLogger
from tianshou.utils.net.common import Net
from tianshou.utils.net.continuous import Actor, ActorProb, Critic
from tianshou.utils.space_info import SpaceInfo
try:
import envpool
except ImportError:
envpool = None
def get_args() -> argparse.Namespace:
parser = argparse.ArgumentParser()
parser.add_argument("--task", type=str, default="Pendulum-v1")
parser.add_argument("--reward-threshold", type=float, default=None)
parser.add_argument("--seed", type=int, default=1)
parser.add_argument("--buffer-size", type=int, default=20000)
parser.add_argument("--actor-lr", type=float, default=1e-3)
parser.add_argument("--critic-lr", type=float, default=1e-3)
parser.add_argument("--il-lr", type=float, default=1e-3)
parser.add_argument("--gamma", type=float, default=0.99)
parser.add_argument("--tau", type=float, default=0.005)
parser.add_argument("--alpha", type=float, default=0.2)
parser.add_argument("--auto-alpha", type=int, default=1)
parser.add_argument("--alpha-lr", type=float, default=3e-4)
parser.add_argument("--epoch", type=int, default=10)
parser.add_argument("--step-per-epoch", type=int, default=24000)
parser.add_argument("--il-step-per-epoch", type=int, default=500)
parser.add_argument("--step-per-collect", type=int, default=10)
parser.add_argument("--update-per-step", type=float, default=0.1)
parser.add_argument("--batch-size", type=int, default=128)
parser.add_argument("--hidden-sizes", type=int, nargs="*", default=[128, 128])
parser.add_argument("--imitation-hidden-sizes", type=int, nargs="*", default=[128, 128])
parser.add_argument("--training-num", type=int, default=10)
parser.add_argument("--test-num", type=int, default=100)
parser.add_argument("--logdir", type=str, default="log")
parser.add_argument("--render", type=float, default=0.0)
parser.add_argument("--n-step", type=int, default=3)
parser.add_argument(
"--device",
type=str,
default="cuda" if torch.cuda.is_available() else "cpu",
)
return parser.parse_known_args()[0]
def test_sac_with_il(args: argparse.Namespace = get_args()) -> None:
# if you want to use python vector env, please refer to other test scripts
# train_envs = env = envpool.make_gymnasium(args.task, num_envs=args.training_num, seed=args.seed)
# test_envs = envpool.make_gymnasium(args.task, num_envs=args.test_num, seed=args.seed)
env = gym.make(args.task)
train_envs = DummyVectorEnv([lambda: gym.make(args.task) for _ in range(args.training_num)])
test_envs = DummyVectorEnv([lambda: gym.make(args.task) for _ in range(args.test_num)])
space_info = SpaceInfo.from_env(env)
args.state_shape = space_info.observation_info.obs_shape
args.action_shape = space_info.action_info.action_shape
args.max_action = space_info.action_info.max_action
if args.reward_threshold is None:
default_reward_threshold = {"Pendulum-v0": -250, "Pendulum-v1": -250}
args.reward_threshold = default_reward_threshold.get(
args.task,
env.spec.reward_threshold if env.spec else None,
)
# you can also use tianshou.env.SubprocVectorEnv
# seed
np.random.seed(args.seed)
torch.manual_seed(args.seed)
train_envs.seed(args.seed)
test_envs.seed(args.seed + args.training_num)
# model
net = Net(state_shape=args.state_shape, hidden_sizes=args.hidden_sizes, device=args.device)
actor = ActorProb(net, args.action_shape, device=args.device, unbounded=True).to(args.device)
actor_optim = torch.optim.Adam(actor.parameters(), lr=args.actor_lr)
net_c1 = Net(
state_shape=args.state_shape,
action_shape=args.action_shape,
hidden_sizes=args.hidden_sizes,
concat=True,
device=args.device,
)
critic1 = Critic(net_c1, device=args.device).to(args.device)
critic1_optim = torch.optim.Adam(critic1.parameters(), lr=args.critic_lr)
net_c2 = Net(
state_shape=args.state_shape,
action_shape=args.action_shape,
hidden_sizes=args.hidden_sizes,
concat=True,
device=args.device,
)
critic2 = Critic(net_c2, device=args.device).to(args.device)
critic2_optim = torch.optim.Adam(critic2.parameters(), lr=args.critic_lr)
action_dim = space_info.action_info.action_dim
if args.auto_alpha:
target_entropy = -action_dim
log_alpha = torch.zeros(1, requires_grad=True, device=args.device)
alpha_optim = torch.optim.Adam([log_alpha], lr=args.alpha_lr)
args.alpha = (target_entropy, log_alpha, alpha_optim)
policy: SACPolicy = SACPolicy(
actor=actor,
actor_optim=actor_optim,
critic=critic1,
critic_optim=critic1_optim,
critic2=critic2,
critic2_optim=critic2_optim,
tau=args.tau,
gamma=args.gamma,
alpha=args.alpha,
estimation_step=args.n_step,
action_space=env.action_space,
)
# collector
train_collector = Collector(
policy,
train_envs,
VectorReplayBuffer(args.buffer_size, len(train_envs)),
exploration_noise=True,
)
test_collector = Collector(policy, test_envs)
# train_collector.collect(n_step=args.buffer_size)
# log
log_path = os.path.join(args.logdir, args.task, "sac")
writer = SummaryWriter(log_path)
logger = TensorboardLogger(writer)
def save_best_fn(policy: BasePolicy) -> None:
torch.save(policy.state_dict(), os.path.join(log_path, "policy.pth"))
def stop_fn(mean_rewards: float) -> bool:
return mean_rewards >= args.reward_threshold
# trainer
result = OffpolicyTrainer(
policy=policy,
train_collector=train_collector,
test_collector=test_collector,
max_epoch=args.epoch,
step_per_epoch=args.step_per_epoch,
step_per_collect=args.step_per_collect,
episode_per_test=args.test_num,
batch_size=args.batch_size,
update_per_step=args.update_per_step,
stop_fn=stop_fn,
save_best_fn=save_best_fn,
logger=logger,
).run()
assert stop_fn(result.best_reward)
# here we define an imitation collector with a trivial policy
if args.task.startswith("Pendulum"):
args.reward_threshold -= 50 # lower the goal
il_net = Net(
args.state_shape,
hidden_sizes=args.imitation_hidden_sizes,
device=args.device,
)
il_actor = Actor(
il_net,
args.action_shape,
max_action=args.max_action,
device=args.device,
).to(args.device)
optim = torch.optim.Adam(il_actor.parameters(), lr=args.il_lr)
il_policy: ImitationPolicy = ImitationPolicy(
actor=il_actor,
optim=optim,
action_space=env.action_space,
action_scaling=True,
action_bound_method="clip",
)
il_test_env = gym.make(args.task)
il_test_env.reset(seed=args.seed + args.training_num + args.test_num)
il_test_collector = Collector(
il_policy,
# envpool.make_gymnasium(args.task, num_envs=args.test_num, seed=args.seed),
il_test_env,
)
train_collector.reset()
result = OffpolicyTrainer(
policy=il_policy,
train_collector=train_collector,
test_collector=il_test_collector,
max_epoch=args.epoch,
step_per_epoch=args.il_step_per_epoch,
step_per_collect=args.step_per_collect,
episode_per_test=args.test_num,
batch_size=args.batch_size,
stop_fn=stop_fn,
save_best_fn=save_best_fn,
logger=logger,
).run()
assert stop_fn(result.best_reward)
if __name__ == "__main__":
test_sac_with_il()