Update Multi-agent RL docs, upgrade pettingzoo (#595)
* update multi-agent docs, upgrade pettingzoo * avoid pettingzoo deprecation warning * fix pistonball tests * codestyle
This commit is contained in:
parent
18277497ed
commit
6fc6857812
@ -10,47 +10,47 @@ In this section, we describe how to use Tianshou to implement multi-agent reinfo
|
||||
Tic-Tac-Toe Environment
|
||||
-----------------------
|
||||
|
||||
The scripts are located at ``test/multiagent/``. We have implemented a Tic-Tac-Toe environment inherit the :class:`~tianshou.env.MultiAgentEnv` that supports Tic-Tac-Toe of any scale. Let's first explore the environment. The 3x3 Tic-Tac-Toe is too easy, so we will focus on 6x6 Tic-Tac-Toe where 4 same signs in a row are considered to win.
|
||||
The scripts are located at ``test/pettingzoo/``. We have implemented :class:`~tianshou.env.PettingZooEnv` which can wrap any [PettingZoo](https://www.pettingzoo.ml/) environment. PettingZoo offers a 3x3 Tic-Tac-Toe environment, let's first explore it.
|
||||
::
|
||||
|
||||
>>> from tic_tac_toe_env import TicTacToeEnv # the module tic_tac_toe_env is in test/multiagent/
|
||||
>>> board_size = 6 # the size of board size
|
||||
>>> win_size = 4 # how many signs in a row are considered to win
|
||||
>>>
|
||||
>>> # This board has 6 rows and 6 cols (36 places in total)
|
||||
>>> from tianshou.env import PettingZooEnv # wrapper for PettingZoo environments
|
||||
>>> from pettingzoo.classic import tictactoe_v3 # the Tic-Tac-Toe environment to be wrapped
|
||||
>>> # This board has 3 rows and 3 cols (9 places in total)
|
||||
>>> # Players place 'x' and 'o' in turn on the board
|
||||
>>> # The player who first gets 4 consecutive 'x's or 'o's wins
|
||||
>>> # The player who first gets 3 consecutive 'x's or 'o's wins
|
||||
>>>
|
||||
>>> env = TicTacToeEnv(size=board_size, win_size=win_size)
|
||||
>>> env = PettingZooEnv(tictactoe_v3.env())
|
||||
>>> obs = env.reset()
|
||||
>>> env.render() # render the empty board
|
||||
board (step 0):
|
||||
=================
|
||||
===_ _ _ _ _ _===
|
||||
===_ _ _ _ _ _===
|
||||
===_ _ _ _ _ _===
|
||||
===_ _ _ _ _ _===
|
||||
===_ _ _ _ _ _===
|
||||
===_ _ _ _ _ _===
|
||||
=================
|
||||
| |
|
||||
- | - | -
|
||||
_____|_____|_____
|
||||
| |
|
||||
- | - | -
|
||||
_____|_____|_____
|
||||
| |
|
||||
- | - | -
|
||||
| |
|
||||
>>> print(obs) # let's see the shape of the observation
|
||||
{'agent_id': 1,
|
||||
'obs': array([[0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0, 0]], dtype=int32),
|
||||
'mask': array([ True, True, True, True, True, True, True, True, True,
|
||||
True, True, True, True, True, True, True, True, True,
|
||||
True, True, True, True, True, True, True, True, True,
|
||||
True, True, True, True, True, True, True, True, True])}
|
||||
{'agent_id': 'player_1', 'obs': array([[[0, 0],
|
||||
[0, 0],
|
||||
[0, 0]],
|
||||
|
||||
[[0, 0],
|
||||
[0, 0],
|
||||
[0, 0]],
|
||||
|
||||
[[0, 0],
|
||||
[0, 0],
|
||||
[0, 0]]], dtype=int8), 'mask': [True, True, True, True, True, True, True, True, True]}
|
||||
|
||||
|
||||
The observation variable ``obs`` returned from the environment is a ``dict``, with three keys ``agent_id``, ``obs``, ``mask``. This is a general structure in multi-agent RL where agents take turns. The meaning of these keys are:
|
||||
|
||||
- ``agent_id``: the id of the current acting agent, where agent_id :math:`\in [1, N]`, N is the number of agents. In our Tic-Tac-Toe case, N is 2. The agent_id starts at 1 because we reserve 0 for the environment itself. Sometimes the developer may want to control the behavior of the environment, for example, to determine how to dispatch cards in Poker.
|
||||
- ``agent_id``: the id of the current acting agent. In our Tic-Tac-Toe case, the agent_id can be ``player_1`` or ``player_2``.
|
||||
|
||||
- ``obs``: the actual observation of the environment. In the Tic-Tac-Toe game above, the observation variable ``obs`` is a ``np.ndarray`` with the shape of (6, 6). The values can be "0/1/-1": 0 for empty, 1 for ``x``, -1 for ``o``. Agent 1 places ``x`` on the board, while agent 2 places ``o`` on the board.
|
||||
- ``obs``: the actual observation of the environment. In the Tic-Tac-Toe game above, the observation variable ``obs`` is a ``np.ndarray`` with the shape of (3, 3, 2). For ``player_1``, the first 3x3 plane represents the placement of Xs, and the second plane shows the placement of Os. The possible values for each cell are 0 or 1; in the first plane, 1 indicates that an X has been placed in that cell, and 0 indicates that X is not in that cell. Similarly, in the second plane, 1 indicates that an O has been placed in that cell, while 0 indicates that an O has not been placed. For ``player_2``, the observation is the same, but Xs and Os swap positions, so Os are encoded in plane 1 and Xs in plane 2.
|
||||
|
||||
- ``mask``: the action mask in the current timestep. In board games or card games, the legal action set varies with time. The mask is a boolean array. For Tic-Tac-Toe, index ``i`` means the place of ``i/N`` th row and ``i%N`` th column. If ``mask[i] == True``, the player can place an ``x`` or ``o`` at that position. Now the board is empty, so the mask is all the true, contains all the positions on the board.
|
||||
|
||||
@ -66,17 +66,17 @@ Let's play two steps to have an intuitive understanding of the environment.
|
||||
>>> action = 0 # action is either an integer, or an np.ndarray with one element
|
||||
>>> obs, reward, done, info = env.step(action) # the env.step follows the api of OpenAI Gym
|
||||
>>> print(obs) # notice the change in the observation
|
||||
{'agent_id': 2,
|
||||
'obs': array([[1, 0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0, 0]], dtype=int32),
|
||||
'mask': array([False, True, True, True, True, True, True, True, True,
|
||||
True, True, True, True, True, True, True, True, True,
|
||||
True, True, True, True, True, True, True, True, True,
|
||||
True, True, True, True, True, True, True, True, True])}}
|
||||
{'agent_id': 'player_2', 'obs': array([[[0, 1],
|
||||
[0, 0],
|
||||
[0, 0]],
|
||||
|
||||
[[0, 0],
|
||||
[0, 0],
|
||||
[0, 0]],
|
||||
|
||||
[[0, 0],
|
||||
[0, 0],
|
||||
[0, 0]]], dtype=int8), 'mask': [False, True, True, True, True, True, True, True, True]}
|
||||
>>> # reward has two items, one for each player: 1 for win, -1 for lose, and 0 otherwise
|
||||
>>> print(reward)
|
||||
[0. 0.]
|
||||
@ -89,26 +89,26 @@ Let's play two steps to have an intuitive understanding of the environment.
|
||||
One worth-noting case is that the game is over when there is only one empty position, rather than when there is no position. This is because the player just has one choice (literally no choice) in this game.
|
||||
::
|
||||
|
||||
>>> # omitted actions: 6, 1, 7, 2, 8
|
||||
>>> obs, reward, done, info = env.step(3) # player 1 wins
|
||||
>>> # omitted actions: 3, 1, 4
|
||||
>>> obs, reward, done, info = env.step(2) # player_1 wins
|
||||
>>> print((reward, done))
|
||||
(array([ 1., -1.], dtype=float32), array(True))
|
||||
>>> env.render() # 'X' and 'O' indicate the last action
|
||||
board (step 7):
|
||||
=================
|
||||
===x x x X _ _===
|
||||
===o o o _ _ _===
|
||||
===_ _ _ _ _ _===
|
||||
===_ _ _ _ _ _===
|
||||
===_ _ _ _ _ _===
|
||||
===_ _ _ _ _ _===
|
||||
=================
|
||||
([1, -1], True)
|
||||
>>> env.render()
|
||||
| |
|
||||
X | O | -
|
||||
_____|_____|_____
|
||||
| |
|
||||
X | O | -
|
||||
_____|_____|_____
|
||||
| |
|
||||
X | - | -
|
||||
| |
|
||||
|
||||
After being familiar with the environment, let's try to play with random agents first!
|
||||
|
||||
|
||||
Two Random Agent
|
||||
----------------
|
||||
Two Random Agents
|
||||
-----------------
|
||||
|
||||
.. sidebar:: The relationship between MultiAgentPolicyManager (Manager) and BasePolicy (Agent)
|
||||
|
||||
@ -119,12 +119,16 @@ Tianshou already provides some builtin classes for multi-agent learning. You can
|
||||
::
|
||||
|
||||
>>> from tianshou.data import Collector
|
||||
>>> from tianshou.env import DummyVectorEnv
|
||||
>>> from tianshou.policy import RandomPolicy, MultiAgentPolicyManager
|
||||
>>>
|
||||
>>> # agents should be wrapped into one policy,
|
||||
>>> # which is responsible for calling the acting agent correctly
|
||||
>>> # here we use two random agents
|
||||
>>> policy = MultiAgentPolicyManager([RandomPolicy(), RandomPolicy()])
|
||||
>>> policy = MultiAgentPolicyManager([RandomPolicy(), RandomPolicy()], env)
|
||||
>>>
|
||||
>>> # need to vectorize the environment for the collector
|
||||
>>> env = DummyVectorEnv([lambda: env])
|
||||
>>>
|
||||
>>> # use collectors to collect a episode of trajectories
|
||||
>>> # the reward is a vector, so we need a scalar metric to monitor the training
|
||||
@ -133,33 +137,33 @@ Tianshou already provides some builtin classes for multi-agent learning. You can
|
||||
>>> # you will see a long trajectory showing the board status at each timestep
|
||||
>>> result = collector.collect(n_episode=1, render=.1)
|
||||
(only show the last 3 steps)
|
||||
board (step 20):
|
||||
=================
|
||||
===o x _ o o o===
|
||||
===_ _ x _ _ x===
|
||||
===x _ o o x _===
|
||||
===O _ o o x _===
|
||||
===x _ o _ _ _===
|
||||
===x _ _ _ x x===
|
||||
=================
|
||||
board (step 21):
|
||||
=================
|
||||
===o x _ o o o===
|
||||
===_ _ x _ _ x===
|
||||
===x _ o o x _===
|
||||
===o _ o o x _===
|
||||
===x _ o X _ _===
|
||||
===x _ _ _ x x===
|
||||
=================
|
||||
board (step 22):
|
||||
=================
|
||||
===o x _ o o o===
|
||||
===_ O x _ _ x===
|
||||
===x _ o o x _===
|
||||
===o _ o o x _===
|
||||
===x _ o x _ _===
|
||||
===x _ _ _ x x===
|
||||
=================
|
||||
| |
|
||||
X | X | -
|
||||
_____|_____|_____
|
||||
| |
|
||||
X | O | -
|
||||
_____|_____|_____
|
||||
| |
|
||||
O | - | -
|
||||
| |
|
||||
| |
|
||||
X | X | -
|
||||
_____|_____|_____
|
||||
| |
|
||||
X | O | -
|
||||
_____|_____|_____
|
||||
| |
|
||||
O | - | O
|
||||
| |
|
||||
| |
|
||||
X | X | X
|
||||
_____|_____|_____
|
||||
| |
|
||||
X | O | -
|
||||
_____|_____|_____
|
||||
| |
|
||||
O | - | O
|
||||
| |
|
||||
|
||||
Random agents perform badly. In the above game, although agent 2 wins finally, it is clear that a smart agent 1 would place an ``x`` at row 4 col 4 to win directly.
|
||||
|
||||
@ -170,62 +174,99 @@ Train an MARL Agent
|
||||
So let's start to train our Tic-Tac-Toe agent! First, import some required modules.
|
||||
::
|
||||
|
||||
import os
|
||||
import torch
|
||||
import argparse
|
||||
import numpy as np
|
||||
import os
|
||||
from copy import deepcopy
|
||||
from typing import Optional, Tuple
|
||||
|
||||
import gym
|
||||
import numpy as np
|
||||
import torch
|
||||
from pettingzoo.classic import tictactoe_v3
|
||||
from torch.utils.tensorboard import SummaryWriter
|
||||
from tianshou.utils import TensorboardLogger
|
||||
|
||||
from tianshou.env import DummyVectorEnv
|
||||
from tianshou.utils.net.common import Net
|
||||
from tianshou.trainer import offpolicy_trainer
|
||||
from tianshou.data import Collector, VectorReplayBuffer
|
||||
from tianshou.policy import BasePolicy, RandomPolicy, DQNPolicy, MultiAgentPolicyManager
|
||||
|
||||
from tic_tac_toe_env import TicTacToeEnv
|
||||
from tianshou.env import DummyVectorEnv
|
||||
from tianshou.env.pettingzoo_env import PettingZooEnv
|
||||
from tianshou.policy import (
|
||||
BasePolicy,
|
||||
DQNPolicy,
|
||||
MultiAgentPolicyManager,
|
||||
RandomPolicy,
|
||||
)
|
||||
from tianshou.trainer import offpolicy_trainer
|
||||
from tianshou.utils import TensorboardLogger
|
||||
from tianshou.utils.net.common import Net
|
||||
|
||||
The explanation of each Tianshou class/function will be deferred to their first usages. Here we define some arguments and hyperparameters of the experiment. The meaning of arguments is clear by just looking at their names.
|
||||
::
|
||||
|
||||
def get_args():
|
||||
def get_parser() -> argparse.ArgumentParser:
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument('--seed', type=int, default=1626)
|
||||
parser.add_argument('--eps-test', type=float, default=0.05)
|
||||
parser.add_argument('--eps-train', type=float, default=0.1)
|
||||
parser.add_argument('--buffer-size', type=int, default=20000)
|
||||
parser.add_argument('--lr', type=float, default=1e-3)
|
||||
parser.add_argument('--gamma', type=float, default=0.9,
|
||||
help='a smaller gamma favors earlier win')
|
||||
parser.add_argument('--lr', type=float, default=1e-4)
|
||||
parser.add_argument(
|
||||
'--gamma', type=float, default=0.9, help='a smaller gamma favors earlier win'
|
||||
)
|
||||
parser.add_argument('--n-step', type=int, default=3)
|
||||
parser.add_argument('--target-update-freq', type=int, default=320)
|
||||
parser.add_argument('--epoch', type=int, default=20)
|
||||
parser.add_argument('--step-per-epoch', type=int, default=5000)
|
||||
parser.add_argument('--epoch', type=int, default=50)
|
||||
parser.add_argument('--step-per-epoch', type=int, default=1000)
|
||||
parser.add_argument('--step-per-collect', type=int, default=10)
|
||||
parser.add_argument('--update-per-step', type=float, default=0.1)
|
||||
parser.add_argument('--batch-size', type=int, default=64)
|
||||
parser.add_argument('--hidden-sizes', type=int,
|
||||
nargs='*', default=[128, 128, 128, 128])
|
||||
parser.add_argument(
|
||||
'--hidden-sizes', type=int, nargs='*', default=[128, 128, 128, 128]
|
||||
)
|
||||
parser.add_argument('--training-num', type=int, default=10)
|
||||
parser.add_argument('--test-num', type=int, default=100)
|
||||
parser.add_argument('--test-num', type=int, default=10)
|
||||
parser.add_argument('--logdir', type=str, default='log')
|
||||
parser.add_argument('--render', type=float, default=0.1)
|
||||
parser.add_argument('--board-size', type=int, default=6)
|
||||
parser.add_argument('--win-size', type=int, default=4)
|
||||
parser.add_argument('--win-rate', type=float, default=0.9,
|
||||
help='the expected winning rate')
|
||||
parser.add_argument('--watch', default=False, action='store_true',
|
||||
help='no training, watch the play of pre-trained models')
|
||||
parser.add_argument('--agent-id', type=int, default=2,
|
||||
help='the learned agent plays as the agent_id-th player. Choices are 1 and 2.')
|
||||
parser.add_argument('--resume-path', type=str, default='',
|
||||
help='the path of agent pth file for resuming from a pre-trained agent')
|
||||
parser.add_argument('--opponent-path', type=str, default='',
|
||||
help='the path of opponent agent pth file for resuming from a pre-trained agent')
|
||||
parser.add_argument('--device', type=str,
|
||||
default='cuda' if torch.cuda.is_available() else 'cpu')
|
||||
return parser.parse_args()
|
||||
parser.add_argument(
|
||||
'--win-rate',
|
||||
type=float,
|
||||
default=0.6,
|
||||
help='the expected winning rate: Optimal policy can get 0.7'
|
||||
)
|
||||
parser.add_argument(
|
||||
'--watch',
|
||||
default=False,
|
||||
action='store_true',
|
||||
help='no training, '
|
||||
'watch the play of pre-trained models'
|
||||
)
|
||||
parser.add_argument(
|
||||
'--agent-id',
|
||||
type=int,
|
||||
default=2,
|
||||
help='the learned agent plays as the'
|
||||
' agent_id-th player. Choices are 1 and 2.'
|
||||
)
|
||||
parser.add_argument(
|
||||
'--resume-path',
|
||||
type=str,
|
||||
default='',
|
||||
help='the path of agent pth file '
|
||||
'for resuming from a pre-trained agent'
|
||||
)
|
||||
parser.add_argument(
|
||||
'--opponent-path',
|
||||
type=str,
|
||||
default='',
|
||||
help='the path of opponent agent pth file '
|
||||
'for resuming from a pre-trained agent'
|
||||
)
|
||||
parser.add_argument(
|
||||
'--device', type=str, default='cuda' if torch.cuda.is_available() else 'cpu'
|
||||
)
|
||||
return parser
|
||||
|
||||
def get_args() -> argparse.Namespace:
|
||||
parser = get_parser()
|
||||
return parser.parse_known_args()[0]
|
||||
|
||||
.. sidebar:: The relationship between MultiAgentPolicyManager (Manager) and BasePolicy (Agent)
|
||||
|
||||
@ -243,24 +284,34 @@ Here it is:
|
||||
::
|
||||
|
||||
def get_agents(
|
||||
args=get_args(),
|
||||
agent_learn=None, # BasePolicy
|
||||
agent_opponent=None, # BasePolicy
|
||||
optim=None, # torch.optim.Optimizer
|
||||
): # return a tuple of (BasePolicy, torch.optim.Optimizer)
|
||||
|
||||
env = TicTacToeEnv(args.board_size, args.win_size)
|
||||
args.state_shape = env.observation_space.shape or env.observation_space.n
|
||||
args: argparse.Namespace = get_args(),
|
||||
agent_learn: Optional[BasePolicy] = None,
|
||||
agent_opponent: Optional[BasePolicy] = None,
|
||||
optim: Optional[torch.optim.Optimizer] = None,
|
||||
) -> Tuple[BasePolicy, torch.optim.Optimizer, list]:
|
||||
env = get_env()
|
||||
observation_space = env.observation_space['observation'] if isinstance(
|
||||
env.observation_space, gym.spaces.Dict
|
||||
) else env.observation_space
|
||||
args.state_shape = observation_space.shape or observation_space.n
|
||||
args.action_shape = env.action_space.shape or env.action_space.n
|
||||
|
||||
if agent_learn is None:
|
||||
net = Net(args.state_shape, args.action_shape,
|
||||
hidden_sizes=args.hidden_sizes, device=args.device).to(args.device)
|
||||
# model
|
||||
net = Net(
|
||||
args.state_shape,
|
||||
args.action_shape,
|
||||
hidden_sizes=args.hidden_sizes,
|
||||
device=args.device
|
||||
).to(args.device)
|
||||
if optim is None:
|
||||
optim = torch.optim.Adam(net.parameters(), lr=args.lr)
|
||||
agent_learn = DQNPolicy(
|
||||
net, optim, args.gamma, args.n_step,
|
||||
target_update_freq=args.target_update_freq)
|
||||
net,
|
||||
optim,
|
||||
args.gamma,
|
||||
args.n_step,
|
||||
target_update_freq=args.target_update_freq
|
||||
)
|
||||
if args.resume_path:
|
||||
agent_learn.load_state_dict(torch.load(args.resume_path))
|
||||
|
||||
@ -275,35 +326,27 @@ Here it is:
|
||||
agents = [agent_learn, agent_opponent]
|
||||
else:
|
||||
agents = [agent_opponent, agent_learn]
|
||||
policy = MultiAgentPolicyManager(agents)
|
||||
return policy, optim
|
||||
policy = MultiAgentPolicyManager(agents, env)
|
||||
return policy, optim, env.agents
|
||||
|
||||
With the above preparation, we are close to the first learned agent. The following code is almost the same as the code in the DQN tutorial.
|
||||
|
||||
::
|
||||
|
||||
args = get_args()
|
||||
def get_env():
|
||||
return PettingZooEnv(tictactoe_v3.env())
|
||||
|
||||
# ======== a test function that tests a pre-trained agent and exit ======
|
||||
def watch(args=get_args(),
|
||||
agent_learn=None, # BasePolicy
|
||||
agent_opponent=None): # BasePolicy
|
||||
env = TicTacToeEnv(args.board_size, args.win_size)
|
||||
policy, optim = get_agents(
|
||||
args, agent_learn=agent_learn, agent_opponent=agent_opponent)
|
||||
policy.eval()
|
||||
policy.policies[args.agent_id - 1].set_eps(args.eps_test)
|
||||
collector = Collector(policy, env)
|
||||
result = collector.collect(n_episode=1, render=args.render)
|
||||
print(f'Final reward: {result["rews"][:, args.agent_id - 1].mean()}, length: {result["lens"].mean()}')
|
||||
if args.watch:
|
||||
watch(args)
|
||||
exit(0)
|
||||
|
||||
def train_agent(
|
||||
args: argparse.Namespace = get_args(),
|
||||
agent_learn: Optional[BasePolicy] = None,
|
||||
agent_opponent: Optional[BasePolicy] = None,
|
||||
optim: Optional[torch.optim.Optimizer] = None,
|
||||
) -> Tuple[dict, BasePolicy]:
|
||||
|
||||
# ======== environment setup =========
|
||||
env_func = lambda: TicTacToeEnv(args.board_size, args.win_size)
|
||||
train_envs = DummyVectorEnv([env_func for _ in range(args.training_num)])
|
||||
test_envs = DummyVectorEnv([env_func for _ in range(args.test_num)])
|
||||
train_envs = DummyVectorEnv([get_env for _ in range(args.training_num)])
|
||||
test_envs = DummyVectorEnv([get_env for _ in range(args.test_num)])
|
||||
# seed
|
||||
np.random.seed(args.seed)
|
||||
torch.manual_seed(args.seed)
|
||||
@ -311,12 +354,19 @@ With the above preparation, we are close to the first learned agent. The followi
|
||||
test_envs.seed(args.seed)
|
||||
|
||||
# ======== agent setup =========
|
||||
policy, optim = get_agents()
|
||||
policy, optim, agents = get_agents(
|
||||
args, agent_learn=agent_learn, agent_opponent=agent_opponent, optim=optim
|
||||
)
|
||||
|
||||
# ======== collector setup =========
|
||||
buffer = VectorReplayBuffer(args.buffer_size, args.training_num)
|
||||
train_collector = Collector(policy, train_envs, buffer, exploration_noise=True)
|
||||
train_collector = Collector(
|
||||
policy,
|
||||
train_envs,
|
||||
VectorReplayBuffer(args.buffer_size, len(train_envs)),
|
||||
exploration_noise=True
|
||||
)
|
||||
test_collector = Collector(policy, test_envs, exploration_noise=True)
|
||||
# policy.set_eps(1)
|
||||
train_collector.collect(n_step=args.batch_size * args.training_num)
|
||||
|
||||
# ======== tensorboard logging setup =========
|
||||
@ -326,44 +376,71 @@ With the above preparation, we are close to the first learned agent. The followi
|
||||
logger = TensorboardLogger(writer)
|
||||
|
||||
# ======== callback functions used during training =========
|
||||
|
||||
def save_best_fn(policy):
|
||||
if hasattr(args, 'model_save_path'):
|
||||
model_save_path = args.model_save_path
|
||||
else:
|
||||
model_save_path = os.path.join(
|
||||
args.logdir, 'tic_tac_toe', 'dqn', 'policy.pth')
|
||||
args.logdir, 'tic_tac_toe', 'dqn', 'policy.pth'
|
||||
)
|
||||
torch.save(
|
||||
policy.policies[args.agent_id - 1].state_dict(),
|
||||
model_save_path)
|
||||
policy.policies[agents[args.agent_id - 1]].state_dict(), model_save_path
|
||||
)
|
||||
|
||||
def stop_fn(mean_rewards):
|
||||
return mean_rewards >= args.win_rate # 95% winning rate by default
|
||||
# the default args.win_rate is 0.9, but the reward is [-1, 1]
|
||||
# instead of [0, 1], so args.win_rate == 0.9 is equal to 95% win rate.
|
||||
return mean_rewards >= args.win_rate
|
||||
|
||||
def train_fn(epoch, env_step):
|
||||
policy.policies[args.agent_id - 1].set_eps(args.eps_train)
|
||||
policy.policies[agents[args.agent_id - 1]].set_eps(args.eps_train)
|
||||
|
||||
def test_fn(epoch, env_step):
|
||||
policy.policies[args.agent_id - 1].set_eps(args.eps_test)
|
||||
policy.policies[agents[args.agent_id - 1]].set_eps(args.eps_test)
|
||||
|
||||
# the reward is a vector, we need a scalar metric to monitor the training.
|
||||
# we choose the reward of the learning agent
|
||||
def reward_metric(rews):
|
||||
return rews[:, args.agent_id - 1]
|
||||
|
||||
# start training, this may require about three minutes
|
||||
# trainer
|
||||
result = offpolicy_trainer(
|
||||
policy, train_collector, test_collector, args.epoch,
|
||||
args.step_per_epoch, args.step_per_collect, args.test_num,
|
||||
args.batch_size, train_fn=train_fn, test_fn=test_fn,
|
||||
stop_fn=stop_fn, save_best_fn=save_best_fn,
|
||||
update_per_step=args.update_per_step, logger=logger,
|
||||
test_in_train=False, reward_metric=reward_metric)
|
||||
policy,
|
||||
train_collector,
|
||||
test_collector,
|
||||
args.epoch,
|
||||
args.step_per_epoch,
|
||||
args.step_per_collect,
|
||||
args.test_num,
|
||||
args.batch_size,
|
||||
train_fn=train_fn,
|
||||
test_fn=test_fn,
|
||||
stop_fn=stop_fn,
|
||||
save_best_fn=save_best_fn,
|
||||
update_per_step=args.update_per_step,
|
||||
logger=logger,
|
||||
test_in_train=False,
|
||||
reward_metric=reward_metric
|
||||
)
|
||||
|
||||
agent = policy.policies[args.agent_id - 1]
|
||||
# let's watch the match!
|
||||
return result, policy.policies[agents[args.agent_id - 1]]
|
||||
|
||||
# ======== a test function that tests a pre-trained agent ======
|
||||
def watch(
|
||||
args: argparse.Namespace = get_args(),
|
||||
agent_learn: Optional[BasePolicy] = None,
|
||||
agent_opponent: Optional[BasePolicy] = None,
|
||||
) -> None:
|
||||
env = get_env()
|
||||
policy, optim, agents = get_agents(
|
||||
args, agent_learn=agent_learn, agent_opponent=agent_opponent
|
||||
)
|
||||
policy.eval()
|
||||
policy.policies[agents[args.agent_id - 1]].set_eps(args.eps_test)
|
||||
collector = Collector(policy, env, exploration_noise=True)
|
||||
result = collector.collect(n_episode=1, render=args.render)
|
||||
rews, lens = result["rews"], result["lens"]
|
||||
print(f"Final reward: {rews[:, args.agent_id - 1].mean()}, length: {lens.mean()}")
|
||||
|
||||
# train the agent and watch its performance in a match!
|
||||
args = get_args()
|
||||
result, agent = train_agent(args)
|
||||
watch(args, agent)
|
||||
|
||||
That's it. By executing the code, you will see a progress bar indicating the progress of training. After about less than 1 minute, the agent has finished training, and you can see how it plays against the random agent. Here is an example:
|
||||
@ -375,97 +452,79 @@ That's it. By executing the code, you will see a progress bar indicating the pro
|
||||
|
||||
::
|
||||
|
||||
board (step 1):
|
||||
=================
|
||||
===_ _ _ X _ _===
|
||||
===_ _ _ _ _ _===
|
||||
===_ _ _ _ _ _===
|
||||
===_ _ _ _ _ _===
|
||||
===_ _ _ _ _ _===
|
||||
===_ _ _ _ _ _===
|
||||
=================
|
||||
board (step 2):
|
||||
=================
|
||||
===_ _ _ x _ _===
|
||||
===_ _ _ _ _ _===
|
||||
===_ _ O _ _ _===
|
||||
===_ _ _ _ _ _===
|
||||
===_ _ _ _ _ _===
|
||||
===_ _ _ _ _ _===
|
||||
=================
|
||||
board (step 3):
|
||||
=================
|
||||
===_ _ _ x _ _===
|
||||
===_ _ _ _ _ _===
|
||||
===_ _ o _ _ _===
|
||||
===_ _ _ _ _ _===
|
||||
===_ _ _ X _ _===
|
||||
===_ _ _ _ _ _===
|
||||
=================
|
||||
board (step 4):
|
||||
=================
|
||||
===_ _ _ x _ _===
|
||||
===_ _ _ _ _ _===
|
||||
===_ _ o _ _ _===
|
||||
===_ _ _ _ _ _===
|
||||
===_ _ _ x _ _===
|
||||
===_ _ O _ _ _===
|
||||
=================
|
||||
board (step 5):
|
||||
=================
|
||||
===_ _ _ x _ _===
|
||||
===_ _ _ _ X _===
|
||||
===_ _ o _ _ _===
|
||||
===_ _ _ _ _ _===
|
||||
===_ _ _ x _ _===
|
||||
===_ _ o _ _ _===
|
||||
=================
|
||||
board (step 6):
|
||||
=================
|
||||
===_ _ _ x _ _===
|
||||
===_ _ _ _ x _===
|
||||
===_ _ o _ _ _===
|
||||
===_ _ _ _ _ _===
|
||||
===_ _ O x _ _===
|
||||
===_ _ o _ _ _===
|
||||
=================
|
||||
board (step 7):
|
||||
=================
|
||||
===_ _ _ x _ X===
|
||||
===_ _ _ _ x _===
|
||||
===_ _ o _ _ _===
|
||||
===_ _ _ _ _ _===
|
||||
===_ _ o x _ _===
|
||||
===_ _ o _ _ _===
|
||||
=================
|
||||
board (step 8):
|
||||
=================
|
||||
===_ _ _ x _ x===
|
||||
===_ _ _ _ x _===
|
||||
===_ _ o _ _ _===
|
||||
===_ _ _ _ O _===
|
||||
===_ _ o x _ _===
|
||||
===_ _ o _ _ _===
|
||||
=================
|
||||
board (step 9):
|
||||
=================
|
||||
===_ _ _ x _ x===
|
||||
===_ _ _ _ x _===
|
||||
===_ _ o _ _ _===
|
||||
===_ _ _ _ o _===
|
||||
===X _ o x _ _===
|
||||
===_ _ o _ _ _===
|
||||
=================
|
||||
board (step 10):
|
||||
=================
|
||||
===_ _ _ x _ x===
|
||||
===_ _ _ _ x _===
|
||||
===_ _ o _ _ _===
|
||||
===_ _ O _ o _===
|
||||
===x _ o x _ _===
|
||||
===_ _ o _ _ _===
|
||||
=================
|
||||
Final reward: 1.0, length: 10.0
|
||||
| |
|
||||
- | - | -
|
||||
_____|_____|_____
|
||||
| |
|
||||
- | - | X
|
||||
_____|_____|_____
|
||||
| |
|
||||
- | - | -
|
||||
| |
|
||||
| |
|
||||
- | - | -
|
||||
_____|_____|_____
|
||||
| |
|
||||
- | O | X
|
||||
_____|_____|_____
|
||||
| |
|
||||
- | - | -
|
||||
| |
|
||||
| |
|
||||
- | - | -
|
||||
_____|_____|_____
|
||||
| |
|
||||
X | O | X
|
||||
_____|_____|_____
|
||||
| |
|
||||
- | - | -
|
||||
| |
|
||||
| |
|
||||
- | O | -
|
||||
_____|_____|_____
|
||||
| |
|
||||
X | O | X
|
||||
_____|_____|_____
|
||||
| |
|
||||
- | - | -
|
||||
| |
|
||||
| |
|
||||
- | O | -
|
||||
_____|_____|_____
|
||||
| |
|
||||
X | O | X
|
||||
_____|_____|_____
|
||||
| |
|
||||
- | X | -
|
||||
| |
|
||||
| |
|
||||
O | O | -
|
||||
_____|_____|_____
|
||||
| |
|
||||
X | O | X
|
||||
_____|_____|_____
|
||||
| |
|
||||
- | X | -
|
||||
| |
|
||||
| |
|
||||
O | O | X
|
||||
_____|_____|_____
|
||||
| |
|
||||
X | O | X
|
||||
_____|_____|_____
|
||||
| |
|
||||
- | X | -
|
||||
| |
|
||||
| |
|
||||
O | O | X
|
||||
_____|_____|_____
|
||||
| |
|
||||
X | O | X
|
||||
_____|_____|_____
|
||||
| |
|
||||
- | X | O
|
||||
| |
|
||||
Final reward: 1.0, length: 8.0
|
||||
|
||||
.. raw:: html
|
||||
|
||||
@ -473,7 +532,7 @@ That's it. By executing the code, you will see a progress bar indicating the pro
|
||||
|
||||
Notice that, our learned agent plays the role of agent 2, placing ``o`` on the board. The agent performs pretty well against the random opponent! It learns the rule of the game by trial and error, and learns that four consecutive ``o`` means winning, so it does!
|
||||
|
||||
The above code can be executed in a python shell or can be saved as a script file (we have saved it in ``test/multiagent/test_tic_tac_toe.py``). In the latter case, you can train an agent by
|
||||
The above code can be executed in a python shell or can be saved as a script file (we have saved it in ``test/pettingzoo/test_tic_tac_toe.py``). In the latter case, you can train an agent by
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
@ -494,168 +553,79 @@ Here is our output:
|
||||
|
||||
::
|
||||
|
||||
board (step 1):
|
||||
=================
|
||||
===_ _ _ _ _ _===
|
||||
===_ _ _ _ _ _===
|
||||
===_ _ X _ _ _===
|
||||
===_ _ _ _ _ _===
|
||||
===_ _ _ _ _ _===
|
||||
===_ _ _ _ _ _===
|
||||
=================
|
||||
board (step 2):
|
||||
=================
|
||||
===_ _ _ _ _ _===
|
||||
===_ _ _ _ _ _===
|
||||
===_ _ x _ _ _===
|
||||
===_ _ _ _ _ _===
|
||||
===_ _ _ _ _ _===
|
||||
===_ _ O _ _ _===
|
||||
=================
|
||||
board (step 3):
|
||||
=================
|
||||
===_ _ _ _ _ _===
|
||||
===_ _ X _ _ _===
|
||||
===_ _ x _ _ _===
|
||||
===_ _ _ _ _ _===
|
||||
===_ _ _ _ _ _===
|
||||
===_ _ o _ _ _===
|
||||
=================
|
||||
board (step 4):
|
||||
=================
|
||||
===_ _ _ _ _ _===
|
||||
===_ _ x _ _ _===
|
||||
===_ _ x _ _ _===
|
||||
===_ _ _ _ _ _===
|
||||
===_ _ _ _ _ _===
|
||||
===_ _ o O _ _===
|
||||
=================
|
||||
board (step 5):
|
||||
=================
|
||||
===_ _ _ _ _ _===
|
||||
===_ _ x _ _ _===
|
||||
===_ _ x _ _ _===
|
||||
===_ _ _ _ _ _===
|
||||
===_ _ _ X _ _===
|
||||
===_ _ o o _ _===
|
||||
=================
|
||||
board (step 6):
|
||||
=================
|
||||
===_ _ _ _ _ _===
|
||||
===_ _ x _ _ _===
|
||||
===_ _ x _ _ _===
|
||||
===_ _ _ _ _ _===
|
||||
===_ _ _ x _ _===
|
||||
===_ _ o o O _===
|
||||
=================
|
||||
board (step 7):
|
||||
=================
|
||||
===_ _ _ _ _ _===
|
||||
===_ _ x _ X _===
|
||||
===_ _ x _ _ _===
|
||||
===_ _ _ _ _ _===
|
||||
===_ _ _ x _ _===
|
||||
===_ _ o o o _===
|
||||
=================
|
||||
board (step 8):
|
||||
=================
|
||||
===_ _ _ _ _ _===
|
||||
===_ _ x _ x _===
|
||||
===_ _ x _ _ _===
|
||||
===O _ _ _ _ _===
|
||||
===_ _ _ x _ _===
|
||||
===_ _ o o o _===
|
||||
=================
|
||||
board (step 9):
|
||||
=================
|
||||
===_ _ _ _ _ _===
|
||||
===_ _ x _ x _===
|
||||
===_ _ x _ _ _===
|
||||
===o _ _ X _ _===
|
||||
===_ _ _ x _ _===
|
||||
===_ _ o o o _===
|
||||
=================
|
||||
board (step 10):
|
||||
=================
|
||||
===_ O _ _ _ _===
|
||||
===_ _ x _ x _===
|
||||
===_ _ x _ _ _===
|
||||
===o _ _ x _ _===
|
||||
===_ _ _ x _ _===
|
||||
===_ _ o o o _===
|
||||
=================
|
||||
board (step 11):
|
||||
=================
|
||||
===_ o _ _ _ _===
|
||||
===_ _ x _ x _===
|
||||
===_ _ x _ _ X===
|
||||
===o _ _ x _ _===
|
||||
===_ _ _ x _ _===
|
||||
===_ _ o o o _===
|
||||
=================
|
||||
board (step 12):
|
||||
=================
|
||||
===_ o O _ _ _===
|
||||
===_ _ x _ x _===
|
||||
===_ _ x _ _ x===
|
||||
===o _ _ x _ _===
|
||||
===_ _ _ x _ _===
|
||||
===_ _ o o o _===
|
||||
=================
|
||||
board (step 13):
|
||||
=================
|
||||
===_ o o _ _ _===
|
||||
===_ _ x _ x _===
|
||||
===_ _ x _ _ x===
|
||||
===o _ _ x X _===
|
||||
===_ _ _ x _ _===
|
||||
===_ _ o o o _===
|
||||
=================
|
||||
board (step 14):
|
||||
=================
|
||||
===O o o _ _ _===
|
||||
===_ _ x _ x _===
|
||||
===_ _ x _ _ x===
|
||||
===o _ _ x x _===
|
||||
===_ _ _ x _ _===
|
||||
===_ _ o o o _===
|
||||
=================
|
||||
board (step 15):
|
||||
=================
|
||||
===o o o _ _ _===
|
||||
===_ _ x _ x _===
|
||||
===_ _ x _ _ x===
|
||||
===o _ _ x x _===
|
||||
===X _ _ x _ _===
|
||||
===_ _ o o o _===
|
||||
=================
|
||||
board (step 16):
|
||||
=================
|
||||
===o o o _ _ _===
|
||||
===_ O x _ x _===
|
||||
===_ _ x _ _ x===
|
||||
===o _ _ x x _===
|
||||
===x _ _ x _ _===
|
||||
===_ _ o o o _===
|
||||
=================
|
||||
board (step 17):
|
||||
=================
|
||||
===o o o _ _ _===
|
||||
===_ o x _ x _===
|
||||
===_ _ x _ _ x===
|
||||
===o _ _ x x _===
|
||||
===x _ X x _ _===
|
||||
===_ _ o o o _===
|
||||
=================
|
||||
board (step 18):
|
||||
=================
|
||||
===o o o _ _ _===
|
||||
===_ o x _ x _===
|
||||
===_ _ x _ _ x===
|
||||
===o _ _ x x _===
|
||||
===x _ x x _ _===
|
||||
===_ O o o o _===
|
||||
=================
|
||||
| |
|
||||
- | - | -
|
||||
_____|_____|_____
|
||||
| |
|
||||
- | X | -
|
||||
_____|_____|_____
|
||||
| |
|
||||
- | - | -
|
||||
| |
|
||||
| |
|
||||
- | O | -
|
||||
_____|_____|_____
|
||||
| |
|
||||
- | X | -
|
||||
_____|_____|_____
|
||||
| |
|
||||
- | - | -
|
||||
| |
|
||||
| |
|
||||
X | O | -
|
||||
_____|_____|_____
|
||||
| |
|
||||
- | X | -
|
||||
_____|_____|_____
|
||||
| |
|
||||
- | - | -
|
||||
| |
|
||||
| |
|
||||
X | O | -
|
||||
_____|_____|_____
|
||||
| |
|
||||
- | X | -
|
||||
_____|_____|_____
|
||||
| |
|
||||
- | - | O
|
||||
| |
|
||||
| |
|
||||
X | O | -
|
||||
_____|_____|_____
|
||||
| |
|
||||
- | X | -
|
||||
_____|_____|_____
|
||||
| |
|
||||
- | X | O
|
||||
| |
|
||||
| |
|
||||
X | O | O
|
||||
_____|_____|_____
|
||||
| |
|
||||
- | X | -
|
||||
_____|_____|_____
|
||||
| |
|
||||
- | X | O
|
||||
| |
|
||||
| |
|
||||
X | O | O
|
||||
_____|_____|_____
|
||||
| |
|
||||
- | X | -
|
||||
_____|_____|_____
|
||||
| |
|
||||
X | X | O
|
||||
| |
|
||||
| |
|
||||
X | O | O
|
||||
_____|_____|_____
|
||||
| |
|
||||
- | X | O
|
||||
_____|_____|_____
|
||||
| |
|
||||
X | X | O
|
||||
| |
|
||||
Final reward: 1.0, length: 8.0
|
||||
|
||||
.. raw:: html
|
||||
|
||||
|
2
setup.py
2
setup.py
@ -46,7 +46,7 @@ def get_extras_require() -> str:
|
||||
"doc8",
|
||||
"scipy",
|
||||
"pillow",
|
||||
"pettingzoo>=1.12",
|
||||
"pettingzoo>=1.17",
|
||||
"pygame>=2.1.0", # pettingzoo test cases pistonball
|
||||
"pymunk>=6.2.1", # pettingzoo test cases pistonball
|
||||
"nni>=2.3",
|
||||
|
@ -176,7 +176,7 @@ def train_agent(
|
||||
def watch(
|
||||
args: argparse.Namespace = get_args(), policy: Optional[BasePolicy] = None
|
||||
) -> None:
|
||||
env = get_env()
|
||||
env = DummyVectorEnv([get_env])
|
||||
policy.eval()
|
||||
[agent.set_eps(args.eps_test) for agent in policy.policies.values()]
|
||||
collector = Collector(policy, env, exploration_noise=True)
|
||||
|
@ -268,7 +268,7 @@ def train_agent(
|
||||
def watch(
|
||||
args: argparse.Namespace = get_args(), policy: Optional[BasePolicy] = None
|
||||
) -> None:
|
||||
env = get_env()
|
||||
env = DummyVectorEnv([get_env])
|
||||
policy.eval()
|
||||
collector = Collector(policy, env)
|
||||
result = collector.collect(n_episode=1, render=args.render)
|
||||
|
@ -229,7 +229,7 @@ def watch(
|
||||
agent_learn: Optional[BasePolicy] = None,
|
||||
agent_opponent: Optional[BasePolicy] = None,
|
||||
) -> None:
|
||||
env = get_env()
|
||||
env = DummyVectorEnv([get_env])
|
||||
policy, optim, agents = get_agents(
|
||||
args, agent_learn=agent_learn, agent_opponent=agent_opponent
|
||||
)
|
||||
|
7
tianshou/env/pettingzoo_env.py
vendored
7
tianshou/env/pettingzoo_env.py
vendored
@ -32,17 +32,14 @@ class PettingZooEnv(AECEnv, ABC):
|
||||
self.agent_idx = {}
|
||||
for i, agent_id in enumerate(self.agents):
|
||||
self.agent_idx[agent_id] = i
|
||||
# Get dictionaries of obs_spaces and act_spaces
|
||||
self.observation_spaces = self.env.observation_spaces
|
||||
self.action_spaces = self.env.action_spaces
|
||||
|
||||
self.rewards = [0] * len(self.agents)
|
||||
|
||||
# Get first observation space, assuming all agents have equal space
|
||||
self.observation_space: Any = self.observation_space(self.agents[0])
|
||||
self.observation_space: Any = self.env.observation_space(self.agents[0])
|
||||
|
||||
# Get first action space, assuming all agents have equal space
|
||||
self.action_space: Any = self.action_space(self.agents[0])
|
||||
self.action_space: Any = self.env.action_space(self.agents[0])
|
||||
|
||||
assert all(self.env.observation_space(agent) == self.observation_space
|
||||
for agent in self.agents), \
|
||||
|
Loading…
x
Reference in New Issue
Block a user