Closes #947 This removes all kwargs from all policy constructors. While doing that, I also improved several names and added a whole lot of TODOs. ## Functional changes: 1. Added possibility to pass None as `critic2` and `critic2_optim`. In fact, the default behavior then should cover the absolute majority of cases 2. Added a function called `clone_optimizer` as a temporary measure to support passing `critic2_optim=None` ## Breaking changes: 1. `action_space` is no longer optional. In fact, it already was non-optional, as there was a ValueError in BasePolicy.init. So now several examples were fixed to reflect that 2. `reward_normalization` removed from DDPG and children. It was never allowed to pass it as `True` there, an error would have been raised in `compute_n_step_reward`. Now I removed it from the interface 3. renamed `critic1` and similar to `critic`, in order to have uniform interfaces. Note that the `critic` in DDPG was optional for the sole reason that child classes used `critic1`. I removed this optionality (DDPG can't do anything with `critic=None`) 4. Several renamings of fields (mostly private to public, so backwards compatible) ## Additional changes: 1. Removed type and default declaration from docstring. This kind of duplication is really not necessary 2. Policy constructors are now only called using named arguments, not a fragile mixture of positional and named as before 5. Minor beautifications in typing and code 6. Generally shortened docstrings and made them uniform across all policies (hopefully) ## Comment: With these changes, several problems in tianshou's inheritance hierarchy become more apparent. I tried highlighting them for future work. --------- Co-authored-by: Dominik Jain <d.jain@appliedai.de>
		
			
				
	
	
		
			162 lines
		
	
	
		
			5.9 KiB
		
	
	
	
		
			Python
		
	
	
	
	
	
			
		
		
	
	
			162 lines
		
	
	
		
			5.9 KiB
		
	
	
	
		
			Python
		
	
	
	
	
	
| import argparse
 | |
| import os
 | |
| import pprint
 | |
| 
 | |
| import gymnasium as gym
 | |
| import numpy as np
 | |
| import torch
 | |
| from torch import nn
 | |
| from torch.distributions import Independent, Normal
 | |
| from torch.utils.tensorboard import SummaryWriter
 | |
| 
 | |
| from tianshou.data import Collector, VectorReplayBuffer
 | |
| from tianshou.env import DummyVectorEnv
 | |
| from tianshou.policy import NPGPolicy
 | |
| from tianshou.trainer import OnpolicyTrainer
 | |
| from tianshou.utils import TensorboardLogger
 | |
| from tianshou.utils.net.common import Net
 | |
| from tianshou.utils.net.continuous import ActorProb, Critic
 | |
| 
 | |
| 
 | |
| def get_args():
 | |
|     parser = argparse.ArgumentParser()
 | |
|     parser.add_argument("--task", type=str, default="Pendulum-v1")
 | |
|     parser.add_argument("--reward-threshold", type=float, default=None)
 | |
|     parser.add_argument("--seed", type=int, default=1)
 | |
|     parser.add_argument("--buffer-size", type=int, default=50000)
 | |
|     parser.add_argument("--lr", type=float, default=1e-3)
 | |
|     parser.add_argument("--gamma", type=float, default=0.95)
 | |
|     parser.add_argument("--epoch", type=int, default=5)
 | |
|     parser.add_argument("--step-per-epoch", type=int, default=50000)
 | |
|     parser.add_argument("--step-per-collect", type=int, default=2048)
 | |
|     parser.add_argument("--repeat-per-collect", type=int, default=2)  # theoretically it should be 1
 | |
|     parser.add_argument("--batch-size", type=int, default=99999)
 | |
|     parser.add_argument("--hidden-sizes", type=int, nargs="*", default=[64, 64])
 | |
|     parser.add_argument("--training-num", type=int, default=16)
 | |
|     parser.add_argument("--test-num", type=int, default=10)
 | |
|     parser.add_argument("--logdir", type=str, default="log")
 | |
|     parser.add_argument("--render", type=float, default=0.0)
 | |
|     parser.add_argument(
 | |
|         "--device",
 | |
|         type=str,
 | |
|         default="cuda" if torch.cuda.is_available() else "cpu",
 | |
|     )
 | |
|     # npg special
 | |
|     parser.add_argument("--gae-lambda", type=float, default=0.95)
 | |
|     parser.add_argument("--rew-norm", type=int, default=1)
 | |
|     parser.add_argument("--norm-adv", type=int, default=1)
 | |
|     parser.add_argument("--optim-critic-iters", type=int, default=5)
 | |
|     parser.add_argument("--actor-step-size", type=float, default=0.5)
 | |
|     return parser.parse_known_args()[0]
 | |
| 
 | |
| 
 | |
| def test_npg(args=get_args()):
 | |
|     env = gym.make(args.task)
 | |
|     args.state_shape = env.observation_space.shape or env.observation_space.n
 | |
|     args.action_shape = env.action_space.shape or env.action_space.n
 | |
|     args.max_action = env.action_space.high[0]
 | |
|     if args.reward_threshold is None:
 | |
|         default_reward_threshold = {"Pendulum-v0": -250, "Pendulum-v1": -250}
 | |
|         args.reward_threshold = default_reward_threshold.get(args.task, env.spec.reward_threshold)
 | |
|     # you can also use tianshou.env.SubprocVectorEnv
 | |
|     # train_envs = gym.make(args.task)
 | |
|     train_envs = DummyVectorEnv([lambda: gym.make(args.task) for _ in range(args.training_num)])
 | |
|     # test_envs = gym.make(args.task)
 | |
|     test_envs = DummyVectorEnv([lambda: gym.make(args.task) for _ in range(args.test_num)])
 | |
|     # seed
 | |
|     np.random.seed(args.seed)
 | |
|     torch.manual_seed(args.seed)
 | |
|     train_envs.seed(args.seed)
 | |
|     test_envs.seed(args.seed)
 | |
|     # model
 | |
|     net = Net(
 | |
|         args.state_shape,
 | |
|         hidden_sizes=args.hidden_sizes,
 | |
|         activation=nn.Tanh,
 | |
|         device=args.device,
 | |
|     )
 | |
|     actor = ActorProb(net, args.action_shape, unbounded=True, device=args.device).to(args.device)
 | |
|     critic = Critic(
 | |
|         Net(
 | |
|             args.state_shape,
 | |
|             hidden_sizes=args.hidden_sizes,
 | |
|             device=args.device,
 | |
|             activation=nn.Tanh,
 | |
|         ),
 | |
|         device=args.device,
 | |
|     ).to(args.device)
 | |
|     # orthogonal initialization
 | |
|     for m in list(actor.modules()) + list(critic.modules()):
 | |
|         if isinstance(m, torch.nn.Linear):
 | |
|             torch.nn.init.orthogonal_(m.weight)
 | |
|             torch.nn.init.zeros_(m.bias)
 | |
|     optim = torch.optim.Adam(critic.parameters(), lr=args.lr)
 | |
| 
 | |
|     # replace DiagGuassian with Independent(Normal) which is equivalent
 | |
|     # pass *logits to be consistent with policy.forward
 | |
|     def dist(*logits):
 | |
|         return Independent(Normal(*logits), 1)
 | |
| 
 | |
|     policy = NPGPolicy(
 | |
|         actor=actor,
 | |
|         critic=critic,
 | |
|         optim=optim,
 | |
|         dist_fn=dist,
 | |
|         discount_factor=args.gamma,
 | |
|         reward_normalization=args.rew_norm,
 | |
|         advantage_normalization=args.norm_adv,
 | |
|         gae_lambda=args.gae_lambda,
 | |
|         action_space=env.action_space,
 | |
|         optim_critic_iters=args.optim_critic_iters,
 | |
|         actor_step_size=args.actor_step_size,
 | |
|         deterministic_eval=True,
 | |
|     )
 | |
|     # collector
 | |
|     train_collector = Collector(
 | |
|         policy,
 | |
|         train_envs,
 | |
|         VectorReplayBuffer(args.buffer_size, len(train_envs)),
 | |
|     )
 | |
|     test_collector = Collector(policy, test_envs)
 | |
|     # log
 | |
|     log_path = os.path.join(args.logdir, args.task, "npg")
 | |
|     writer = SummaryWriter(log_path)
 | |
|     logger = TensorboardLogger(writer)
 | |
| 
 | |
|     def save_best_fn(policy):
 | |
|         torch.save(policy.state_dict(), os.path.join(log_path, "policy.pth"))
 | |
| 
 | |
|     def stop_fn(mean_rewards):
 | |
|         return mean_rewards >= args.reward_threshold
 | |
| 
 | |
|     # trainer
 | |
|     result = OnpolicyTrainer(
 | |
|         policy=policy,
 | |
|         train_collector=train_collector,
 | |
|         test_collector=test_collector,
 | |
|         max_epoch=args.epoch,
 | |
|         step_per_epoch=args.step_per_epoch,
 | |
|         repeat_per_collect=args.repeat_per_collect,
 | |
|         episode_per_test=args.test_num,
 | |
|         batch_size=args.batch_size,
 | |
|         step_per_collect=args.step_per_collect,
 | |
|         stop_fn=stop_fn,
 | |
|         save_best_fn=save_best_fn,
 | |
|         logger=logger,
 | |
|     ).run()
 | |
|     assert stop_fn(result["best_reward"])
 | |
| 
 | |
|     if __name__ == "__main__":
 | |
|         pprint.pprint(result)
 | |
|         # Let's watch its performance!
 | |
|         env = gym.make(args.task)
 | |
|         policy.eval()
 | |
|         collector = Collector(policy, env)
 | |
|         result = collector.collect(n_episode=1, render=args.render)
 | |
|         rews, lens = result["rews"], result["lens"]
 | |
|         print(f"Final reward: {rews.mean()}, length: {lens.mean()}")
 | |
| 
 | |
| 
 | |
| if __name__ == "__main__":
 | |
|     test_npg()
 |