Closes: #1058 ### Api Extensions - Batch received two new methods: `to_dict` and `to_list_of_dicts`. #1063 - `Collector`s can now be closed, and their reset is more granular. #1063 - Trainers can control whether collectors should be reset prior to training. #1063 - Convenience constructor for `CollectStats` called `with_autogenerated_stats`. #1063 ### Internal Improvements - `Collector`s rely less on state, the few stateful things are stored explicitly instead of through a `.data` attribute. #1063 - Introduced a first iteration of a naming convention for vars in `Collector`s. #1063 - Generally improved readability of Collector code and associated tests (still quite some way to go). #1063 - Improved typing for `exploration_noise` and within Collector. #1063 ### Breaking Changes - Removed `.data` attribute from `Collector` and its child classes. #1063 - Collectors no longer reset the environment on initialization. Instead, the user might have to call `reset` expicitly or pass `reset_before_collect=True` . #1063 - VectorEnvs now return an array of info-dicts on reset instead of a list. #1063 - Fixed `iter(Batch(...)` which now behaves the same way as `Batch(...).__iter__()`. Can be considered a bugfix. #1063 --------- Co-authored-by: Michael Panchenko <m.panchenko@appliedai.de>
255 lines
9.0 KiB
Python
255 lines
9.0 KiB
Python
from copy import deepcopy
|
|
from dataclasses import dataclass
|
|
from typing import Any, Generic, Literal, Self, TypeVar, cast
|
|
|
|
import gymnasium as gym
|
|
import numpy as np
|
|
import torch
|
|
|
|
from tianshou.data import Batch, ReplayBuffer, to_numpy, to_torch_as
|
|
from tianshou.data.batch import BatchProtocol
|
|
from tianshou.data.types import (
|
|
ActBatchProtocol,
|
|
BatchWithReturnsProtocol,
|
|
ModelOutputBatchProtocol,
|
|
ObsBatchProtocol,
|
|
RolloutBatchProtocol,
|
|
)
|
|
from tianshou.policy import BasePolicy
|
|
from tianshou.policy.base import TLearningRateScheduler, TrainingStats
|
|
|
|
|
|
@dataclass(kw_only=True)
|
|
class DQNTrainingStats(TrainingStats):
|
|
loss: float
|
|
|
|
|
|
TDQNTrainingStats = TypeVar("TDQNTrainingStats", bound=DQNTrainingStats)
|
|
|
|
|
|
class DQNPolicy(BasePolicy[TDQNTrainingStats], Generic[TDQNTrainingStats]):
|
|
"""Implementation of Deep Q Network. arXiv:1312.5602.
|
|
|
|
Implementation of Double Q-Learning. arXiv:1509.06461.
|
|
|
|
Implementation of Dueling DQN. arXiv:1511.06581 (the dueling DQN is
|
|
implemented in the network side, not here).
|
|
|
|
:param model: a model following the rules in
|
|
:class:`~tianshou.policy.BasePolicy`. (s -> logits)
|
|
:param optim: a torch.optim for optimizing the model.
|
|
:param discount_factor: in [0, 1].
|
|
:param estimation_step: the number of steps to look ahead.
|
|
:param target_update_freq: the target network update frequency (0 if
|
|
you do not use the target network).
|
|
:param reward_normalization: normalize the **returns** to Normal(0, 1).
|
|
TODO: rename to return_normalization?
|
|
:param is_double: use double dqn.
|
|
:param clip_loss_grad: clip the gradient of the loss in accordance
|
|
with nature14236; this amounts to using the Huber loss instead of
|
|
the MSE loss.
|
|
:param observation_space: Env's observation space.
|
|
:param lr_scheduler: if not None, will be called in `policy.update()`.
|
|
|
|
.. seealso::
|
|
|
|
Please refer to :class:`~tianshou.policy.BasePolicy` for more detailed
|
|
explanation.
|
|
"""
|
|
|
|
def __init__(
|
|
self,
|
|
*,
|
|
model: torch.nn.Module,
|
|
optim: torch.optim.Optimizer,
|
|
# TODO: type violates Liskov substitution principle
|
|
action_space: gym.spaces.Discrete,
|
|
discount_factor: float = 0.99,
|
|
estimation_step: int = 1,
|
|
target_update_freq: int = 0,
|
|
reward_normalization: bool = False,
|
|
is_double: bool = True,
|
|
clip_loss_grad: bool = False,
|
|
observation_space: gym.Space | None = None,
|
|
lr_scheduler: TLearningRateScheduler | None = None,
|
|
) -> None:
|
|
super().__init__(
|
|
action_space=action_space,
|
|
observation_space=observation_space,
|
|
action_scaling=False,
|
|
action_bound_method=None,
|
|
lr_scheduler=lr_scheduler,
|
|
)
|
|
self.model = model
|
|
self.optim = optim
|
|
self.eps = 0.0
|
|
assert (
|
|
0.0 <= discount_factor <= 1.0
|
|
), f"discount factor should be in [0, 1] but got: {discount_factor}"
|
|
self.gamma = discount_factor
|
|
assert (
|
|
estimation_step > 0
|
|
), f"estimation_step should be greater than 0 but got: {estimation_step}"
|
|
self.n_step = estimation_step
|
|
self._target = target_update_freq > 0
|
|
self.freq = target_update_freq
|
|
self._iter = 0
|
|
if self._target:
|
|
self.model_old = deepcopy(self.model)
|
|
self.model_old.eval()
|
|
self.rew_norm = reward_normalization
|
|
self.is_double = is_double
|
|
self.clip_loss_grad = clip_loss_grad
|
|
|
|
# TODO: set in forward, fix this!
|
|
self.max_action_num: int | None = None
|
|
|
|
def set_eps(self, eps: float) -> None:
|
|
"""Set the eps for epsilon-greedy exploration."""
|
|
self.eps = eps
|
|
|
|
def train(self, mode: bool = True) -> Self:
|
|
"""Set the module in training mode, except for the target network."""
|
|
self.training = mode
|
|
self.model.train(mode)
|
|
return self
|
|
|
|
def sync_weight(self) -> None:
|
|
"""Synchronize the weight for the target network."""
|
|
self.model_old.load_state_dict(self.model.state_dict())
|
|
|
|
def _target_q(self, buffer: ReplayBuffer, indices: np.ndarray) -> torch.Tensor:
|
|
obs_next_batch = Batch(
|
|
obs=buffer[indices].obs_next,
|
|
info=[None] * len(indices),
|
|
) # obs_next: s_{t+n}
|
|
result = self(obs_next_batch)
|
|
if self._target:
|
|
# target_Q = Q_old(s_, argmax(Q_new(s_, *)))
|
|
target_q = self(obs_next_batch, model="model_old").logits
|
|
else:
|
|
target_q = result.logits
|
|
if self.is_double:
|
|
return target_q[np.arange(len(result.act)), result.act]
|
|
# Nature DQN, over estimate
|
|
return target_q.max(dim=1)[0]
|
|
|
|
def process_fn(
|
|
self,
|
|
batch: RolloutBatchProtocol,
|
|
buffer: ReplayBuffer,
|
|
indices: np.ndarray,
|
|
) -> BatchWithReturnsProtocol:
|
|
"""Compute the n-step return for Q-learning targets.
|
|
|
|
More details can be found at
|
|
:meth:`~tianshou.policy.BasePolicy.compute_nstep_return`.
|
|
"""
|
|
return self.compute_nstep_return(
|
|
batch=batch,
|
|
buffer=buffer,
|
|
indices=indices,
|
|
target_q_fn=self._target_q,
|
|
gamma=self.gamma,
|
|
n_step=self.n_step,
|
|
rew_norm=self.rew_norm,
|
|
)
|
|
|
|
def compute_q_value(self, logits: torch.Tensor, mask: np.ndarray | None) -> torch.Tensor:
|
|
"""Compute the q value based on the network's raw output and action mask."""
|
|
if mask is not None:
|
|
# the masked q value should be smaller than logits.min()
|
|
min_value = logits.min() - logits.max() - 1.0
|
|
logits = logits + to_torch_as(1 - mask, logits) * min_value
|
|
return logits
|
|
|
|
def forward(
|
|
self,
|
|
batch: ObsBatchProtocol,
|
|
state: dict | BatchProtocol | np.ndarray | None = None,
|
|
model: Literal["model", "model_old"] = "model",
|
|
**kwargs: Any,
|
|
) -> ModelOutputBatchProtocol:
|
|
"""Compute action over the given batch data.
|
|
|
|
If you need to mask the action, please add a "mask" into batch.obs, for
|
|
example, if we have an environment that has "0/1/2" three actions:
|
|
::
|
|
|
|
batch == Batch(
|
|
obs=Batch(
|
|
obs="original obs, with batch_size=1 for demonstration",
|
|
mask=np.array([[False, True, False]]),
|
|
# action 1 is available
|
|
# action 0 and 2 are unavailable
|
|
),
|
|
...
|
|
)
|
|
|
|
:return: A :class:`~tianshou.data.Batch` which has 3 keys:
|
|
|
|
* ``act`` the action.
|
|
* ``logits`` the network's raw output.
|
|
* ``state`` the hidden state.
|
|
|
|
.. seealso::
|
|
|
|
Please refer to :meth:`~tianshou.policy.BasePolicy.forward` for
|
|
more detailed explanation.
|
|
"""
|
|
model = getattr(self, model)
|
|
obs = batch.obs
|
|
# TODO: this is convoluted! See also other places where this is done.
|
|
obs_next = obs.obs if hasattr(obs, "obs") else obs
|
|
logits, hidden = model(obs_next, state=state, info=batch.info)
|
|
q = self.compute_q_value(logits, getattr(obs, "mask", None))
|
|
if self.max_action_num is None:
|
|
self.max_action_num = q.shape[1]
|
|
act = to_numpy(q.max(dim=1)[1])
|
|
result = Batch(logits=logits, act=act, state=hidden)
|
|
return cast(ModelOutputBatchProtocol, result)
|
|
|
|
def learn(self, batch: RolloutBatchProtocol, *args: Any, **kwargs: Any) -> TDQNTrainingStats:
|
|
if self._target and self._iter % self.freq == 0:
|
|
self.sync_weight()
|
|
self.optim.zero_grad()
|
|
weight = batch.pop("weight", 1.0)
|
|
q = self(batch).logits
|
|
q = q[np.arange(len(q)), batch.act]
|
|
returns = to_torch_as(batch.returns.flatten(), q)
|
|
td_error = returns - q
|
|
|
|
if self.clip_loss_grad:
|
|
y = q.reshape(-1, 1)
|
|
t = returns.reshape(-1, 1)
|
|
loss = torch.nn.functional.huber_loss(y, t, reduction="mean")
|
|
else:
|
|
loss = (td_error.pow(2) * weight).mean()
|
|
|
|
batch.weight = td_error # prio-buffer
|
|
loss.backward()
|
|
self.optim.step()
|
|
self._iter += 1
|
|
|
|
return DQNTrainingStats(loss=loss.item()) # type: ignore[return-value]
|
|
|
|
_TArrOrActBatch = TypeVar("_TArrOrActBatch", bound="np.ndarray | ActBatchProtocol")
|
|
|
|
def exploration_noise(
|
|
self,
|
|
act: _TArrOrActBatch,
|
|
batch: ObsBatchProtocol,
|
|
) -> _TArrOrActBatch:
|
|
if isinstance(act, np.ndarray) and not np.isclose(self.eps, 0.0):
|
|
bsz = len(act)
|
|
rand_mask = np.random.rand(bsz) < self.eps
|
|
assert (
|
|
self.max_action_num is not None
|
|
), "Can't call this method before max_action_num was set in first forward"
|
|
q = np.random.rand(bsz, self.max_action_num) # [0, 1]
|
|
if hasattr(batch.obs, "mask"):
|
|
q += batch.obs.mask
|
|
rand_act = q.argmax(axis=1)
|
|
act[rand_mask] = rand_act[rand_mask]
|
|
return act
|