This is the first commit of 6 commits mentioned in #274, which features 1. Refactor of `Class Net` to support any form of MLP. 2. Enable type check in utils.network. 3. Relative change in docs/test/examples. 4. Move atari-related network to examples/atari/atari_network.py Co-authored-by: Trinkle23897 <trinkle23897@gmail.com>
124 lines
4.8 KiB
Python
124 lines
4.8 KiB
Python
import os
|
|
import gym
|
|
import torch
|
|
import pprint
|
|
import argparse
|
|
import numpy as np
|
|
from torch.utils.tensorboard import SummaryWriter
|
|
|
|
from tianshou.policy import DQNPolicy
|
|
from tianshou.env import DummyVectorEnv
|
|
from tianshou.utils.net.common import Net
|
|
from tianshou.trainer import offpolicy_trainer
|
|
from tianshou.data import Collector, ReplayBuffer
|
|
|
|
|
|
def get_args():
|
|
parser = argparse.ArgumentParser()
|
|
parser.add_argument('--task', type=str, default='Acrobot-v1')
|
|
parser.add_argument('--seed', type=int, default=0)
|
|
parser.add_argument('--eps-test', type=float, default=0.05)
|
|
parser.add_argument('--eps-train', type=float, default=0.5)
|
|
parser.add_argument('--buffer-size', type=int, default=20000)
|
|
parser.add_argument('--lr', type=float, default=1e-3)
|
|
parser.add_argument('--gamma', type=float, default=0.95)
|
|
parser.add_argument('--n-step', type=int, default=3)
|
|
parser.add_argument('--target-update-freq', type=int, default=320)
|
|
parser.add_argument('--epoch', type=int, default=10)
|
|
parser.add_argument('--step-per-epoch', type=int, default=1000)
|
|
parser.add_argument('--collect-per-step', type=int, default=100)
|
|
parser.add_argument('--batch-size', type=int, default=64)
|
|
parser.add_argument('--hidden-sizes', type=int,
|
|
nargs='*', default=[128])
|
|
parser.add_argument('--dueling-q-hidden-sizes', type=int,
|
|
nargs='*', default=[128, 128])
|
|
parser.add_argument('--dueling-v-hidden-sizes', type=int,
|
|
nargs='*', default=[128, 128])
|
|
parser.add_argument('--training-num', type=int, default=8)
|
|
parser.add_argument('--test-num', type=int, default=100)
|
|
parser.add_argument('--logdir', type=str, default='log')
|
|
parser.add_argument('--render', type=float, default=0.)
|
|
parser.add_argument(
|
|
'--device', type=str,
|
|
default='cuda' if torch.cuda.is_available() else 'cpu')
|
|
return parser.parse_args()
|
|
|
|
|
|
def test_dqn(args=get_args()):
|
|
env = gym.make(args.task)
|
|
args.state_shape = env.observation_space.shape or env.observation_space.n
|
|
args.action_shape = env.action_space.shape or env.action_space.n
|
|
# train_envs = gym.make(args.task)
|
|
# you can also use tianshou.env.SubprocVectorEnv
|
|
train_envs = DummyVectorEnv(
|
|
[lambda: gym.make(args.task) for _ in range(args.training_num)])
|
|
# test_envs = gym.make(args.task)
|
|
test_envs = DummyVectorEnv(
|
|
[lambda: gym.make(args.task) for _ in range(args.test_num)])
|
|
# seed
|
|
np.random.seed(args.seed)
|
|
torch.manual_seed(args.seed)
|
|
train_envs.seed(args.seed)
|
|
test_envs.seed(args.seed)
|
|
# model
|
|
Q_param = {"hidden_sizes": args.dueling_q_hidden_sizes}
|
|
V_param = {"hidden_sizes": args.dueling_v_hidden_sizes}
|
|
net = Net(args.state_shape, args.action_shape,
|
|
hidden_sizes=args.hidden_sizes, device=args.device,
|
|
dueling_param=(Q_param, V_param)).to(args.device)
|
|
optim = torch.optim.Adam(net.parameters(), lr=args.lr)
|
|
policy = DQNPolicy(
|
|
net, optim, args.gamma, args.n_step,
|
|
target_update_freq=args.target_update_freq)
|
|
# collector
|
|
train_collector = Collector(
|
|
policy, train_envs, ReplayBuffer(args.buffer_size))
|
|
test_collector = Collector(policy, test_envs)
|
|
# policy.set_eps(1)
|
|
train_collector.collect(n_step=args.batch_size)
|
|
# log
|
|
log_path = os.path.join(args.logdir, args.task, 'dqn')
|
|
writer = SummaryWriter(log_path)
|
|
|
|
def save_fn(policy):
|
|
torch.save(policy.state_dict(), os.path.join(log_path, 'policy.pth'))
|
|
|
|
def stop_fn(mean_rewards):
|
|
return mean_rewards >= env.spec.reward_threshold
|
|
|
|
def train_fn(epoch, env_step):
|
|
if env_step <= 100000:
|
|
policy.set_eps(args.eps_train)
|
|
elif env_step <= 500000:
|
|
eps = args.eps_train - (env_step - 100000) / \
|
|
400000 * (0.5 * args.eps_train)
|
|
policy.set_eps(eps)
|
|
else:
|
|
policy.set_eps(0.5 * args.eps_train)
|
|
|
|
def test_fn(epoch, env_step):
|
|
policy.set_eps(args.eps_test)
|
|
|
|
# trainer
|
|
result = offpolicy_trainer(
|
|
policy, train_collector, test_collector, args.epoch,
|
|
args.step_per_epoch, args.collect_per_step, args.test_num,
|
|
args.batch_size, train_fn=train_fn, test_fn=test_fn,
|
|
stop_fn=stop_fn, save_fn=save_fn, writer=writer)
|
|
|
|
assert stop_fn(result['best_reward'])
|
|
if __name__ == '__main__':
|
|
pprint.pprint(result)
|
|
# Let's watch its performance!
|
|
policy.eval()
|
|
policy.set_eps(args.eps_test)
|
|
test_envs.seed(args.seed)
|
|
test_collector.reset()
|
|
result = test_collector.collect(n_episode=[1] * args.test_num,
|
|
render=args.render)
|
|
print(f'Final reward: {result["rew"]}, length: {result["len"]}')
|
|
|
|
|
|
if __name__ == '__main__':
|
|
test_dqn(get_args())
|