This PR adds strict typing to the output of `update` and `learn` in all policies. This will likely be the last large refactoring PR before the next release (0.6.0, not 1.0.0), so it requires some attention. Several difficulties were encountered on the path to that goal: 1. The policy hierarchy is actually "broken" in the sense that the keys of dicts that were output by `learn` did not follow the same enhancement (inheritance) pattern as the policies. This is a real problem and should be addressed in the near future. Generally, several aspects of the policy design and hierarchy might deserve a dedicated discussion. 2. Each policy needs to be generic in the stats return type, because one might want to extend it at some point and then also extend the stats. Even within the source code base this pattern is necessary in many places. 3. The interaction between learn and update is a bit quirky, we currently handle it by having update modify special field inside TrainingStats, whereas all other fields are handled by learn. 4. The IQM module is a policy wrapper and required a TrainingStatsWrapper. The latter relies on a bunch of black magic. They were addressed by: 1. Live with the broken hierarchy, which is now made visible by bounds in generics. We use type: ignore where appropriate. 2. Make all policies generic with bounds following the policy inheritance hierarchy (which is incorrect, see above). We experimented a bit with nested TrainingStats classes, but that seemed to add more complexity and be harder to understand. Unfortunately, mypy thinks that the code below is wrong, wherefore we have to add `type: ignore` to the return of each `learn` ```python T = TypeVar("T", bound=int) def f() -> T: return 3 ``` 3. See above 4. Write representative tests for the `TrainingStatsWrapper`. Still, the black magic might cause nasty surprises down the line (I am not proud of it)... Closes #933 --------- Co-authored-by: Maximilian Huettenrauch <m.huettenrauch@appliedai.de> Co-authored-by: Michael Panchenko <m.panchenko@appliedai.de>
252 lines
8.9 KiB
Python
252 lines
8.9 KiB
Python
from copy import deepcopy
|
|
from dataclasses import dataclass
|
|
from typing import Any, Generic, Literal, Self, TypeVar, cast
|
|
|
|
import gymnasium as gym
|
|
import numpy as np
|
|
import torch
|
|
|
|
from tianshou.data import Batch, ReplayBuffer, to_numpy, to_torch_as
|
|
from tianshou.data.batch import BatchProtocol
|
|
from tianshou.data.types import (
|
|
BatchWithReturnsProtocol,
|
|
ModelOutputBatchProtocol,
|
|
ObsBatchProtocol,
|
|
RolloutBatchProtocol,
|
|
)
|
|
from tianshou.policy import BasePolicy
|
|
from tianshou.policy.base import TLearningRateScheduler, TrainingStats
|
|
|
|
|
|
@dataclass(kw_only=True)
|
|
class DQNTrainingStats(TrainingStats):
|
|
loss: float
|
|
|
|
|
|
TDQNTrainingStats = TypeVar("TDQNTrainingStats", bound=DQNTrainingStats)
|
|
|
|
|
|
class DQNPolicy(BasePolicy[TDQNTrainingStats], Generic[TDQNTrainingStats]):
|
|
"""Implementation of Deep Q Network. arXiv:1312.5602.
|
|
|
|
Implementation of Double Q-Learning. arXiv:1509.06461.
|
|
|
|
Implementation of Dueling DQN. arXiv:1511.06581 (the dueling DQN is
|
|
implemented in the network side, not here).
|
|
|
|
:param model: a model following the rules in
|
|
:class:`~tianshou.policy.BasePolicy`. (s -> logits)
|
|
:param optim: a torch.optim for optimizing the model.
|
|
:param discount_factor: in [0, 1].
|
|
:param estimation_step: the number of steps to look ahead.
|
|
:param target_update_freq: the target network update frequency (0 if
|
|
you do not use the target network).
|
|
:param reward_normalization: normalize the **returns** to Normal(0, 1).
|
|
TODO: rename to return_normalization?
|
|
:param is_double: use double dqn.
|
|
:param clip_loss_grad: clip the gradient of the loss in accordance
|
|
with nature14236; this amounts to using the Huber loss instead of
|
|
the MSE loss.
|
|
:param observation_space: Env's observation space.
|
|
:param lr_scheduler: if not None, will be called in `policy.update()`.
|
|
|
|
.. seealso::
|
|
|
|
Please refer to :class:`~tianshou.policy.BasePolicy` for more detailed
|
|
explanation.
|
|
"""
|
|
|
|
def __init__(
|
|
self,
|
|
*,
|
|
model: torch.nn.Module,
|
|
optim: torch.optim.Optimizer,
|
|
# TODO: type violates Liskov substitution principle
|
|
action_space: gym.spaces.Discrete,
|
|
discount_factor: float = 0.99,
|
|
estimation_step: int = 1,
|
|
target_update_freq: int = 0,
|
|
reward_normalization: bool = False,
|
|
is_double: bool = True,
|
|
clip_loss_grad: bool = False,
|
|
observation_space: gym.Space | None = None,
|
|
lr_scheduler: TLearningRateScheduler | None = None,
|
|
) -> None:
|
|
super().__init__(
|
|
action_space=action_space,
|
|
observation_space=observation_space,
|
|
action_scaling=False,
|
|
action_bound_method=None,
|
|
lr_scheduler=lr_scheduler,
|
|
)
|
|
self.model = model
|
|
self.optim = optim
|
|
self.eps = 0.0
|
|
assert (
|
|
0.0 <= discount_factor <= 1.0
|
|
), f"discount factor should be in [0, 1] but got: {discount_factor}"
|
|
self.gamma = discount_factor
|
|
assert (
|
|
estimation_step > 0
|
|
), f"estimation_step should be greater than 0 but got: {estimation_step}"
|
|
self.n_step = estimation_step
|
|
self._target = target_update_freq > 0
|
|
self.freq = target_update_freq
|
|
self._iter = 0
|
|
if self._target:
|
|
self.model_old = deepcopy(self.model)
|
|
self.model_old.eval()
|
|
self.rew_norm = reward_normalization
|
|
self.is_double = is_double
|
|
self.clip_loss_grad = clip_loss_grad
|
|
|
|
# TODO: set in forward, fix this!
|
|
self.max_action_num: int | None = None
|
|
|
|
def set_eps(self, eps: float) -> None:
|
|
"""Set the eps for epsilon-greedy exploration."""
|
|
self.eps = eps
|
|
|
|
def train(self, mode: bool = True) -> Self:
|
|
"""Set the module in training mode, except for the target network."""
|
|
self.training = mode
|
|
self.model.train(mode)
|
|
return self
|
|
|
|
def sync_weight(self) -> None:
|
|
"""Synchronize the weight for the target network."""
|
|
self.model_old.load_state_dict(self.model.state_dict())
|
|
|
|
def _target_q(self, buffer: ReplayBuffer, indices: np.ndarray) -> torch.Tensor:
|
|
obs_next_batch = Batch(
|
|
obs=buffer[indices].obs_next,
|
|
info=[None] * len(indices),
|
|
) # obs_next: s_{t+n}
|
|
result = self(obs_next_batch)
|
|
if self._target:
|
|
# target_Q = Q_old(s_, argmax(Q_new(s_, *)))
|
|
target_q = self(obs_next_batch, model="model_old").logits
|
|
else:
|
|
target_q = result.logits
|
|
if self.is_double:
|
|
return target_q[np.arange(len(result.act)), result.act]
|
|
# Nature DQN, over estimate
|
|
return target_q.max(dim=1)[0]
|
|
|
|
def process_fn(
|
|
self,
|
|
batch: RolloutBatchProtocol,
|
|
buffer: ReplayBuffer,
|
|
indices: np.ndarray,
|
|
) -> BatchWithReturnsProtocol:
|
|
"""Compute the n-step return for Q-learning targets.
|
|
|
|
More details can be found at
|
|
:meth:`~tianshou.policy.BasePolicy.compute_nstep_return`.
|
|
"""
|
|
return self.compute_nstep_return(
|
|
batch=batch,
|
|
buffer=buffer,
|
|
indices=indices,
|
|
target_q_fn=self._target_q,
|
|
gamma=self.gamma,
|
|
n_step=self.n_step,
|
|
rew_norm=self.rew_norm,
|
|
)
|
|
|
|
def compute_q_value(self, logits: torch.Tensor, mask: np.ndarray | None) -> torch.Tensor:
|
|
"""Compute the q value based on the network's raw output and action mask."""
|
|
if mask is not None:
|
|
# the masked q value should be smaller than logits.min()
|
|
min_value = logits.min() - logits.max() - 1.0
|
|
logits = logits + to_torch_as(1 - mask, logits) * min_value
|
|
return logits
|
|
|
|
def forward(
|
|
self,
|
|
batch: ObsBatchProtocol,
|
|
state: dict | BatchProtocol | np.ndarray | None = None,
|
|
model: Literal["model", "model_old"] = "model",
|
|
**kwargs: Any,
|
|
) -> ModelOutputBatchProtocol:
|
|
"""Compute action over the given batch data.
|
|
|
|
If you need to mask the action, please add a "mask" into batch.obs, for
|
|
example, if we have an environment that has "0/1/2" three actions:
|
|
::
|
|
|
|
batch == Batch(
|
|
obs=Batch(
|
|
obs="original obs, with batch_size=1 for demonstration",
|
|
mask=np.array([[False, True, False]]),
|
|
# action 1 is available
|
|
# action 0 and 2 are unavailable
|
|
),
|
|
...
|
|
)
|
|
|
|
:return: A :class:`~tianshou.data.Batch` which has 3 keys:
|
|
|
|
* ``act`` the action.
|
|
* ``logits`` the network's raw output.
|
|
* ``state`` the hidden state.
|
|
|
|
.. seealso::
|
|
|
|
Please refer to :meth:`~tianshou.policy.BasePolicy.forward` for
|
|
more detailed explanation.
|
|
"""
|
|
model = getattr(self, model)
|
|
obs = batch.obs
|
|
# TODO: this is convoluted! See also other places where this is done.
|
|
obs_next = obs.obs if hasattr(obs, "obs") else obs
|
|
logits, hidden = model(obs_next, state=state, info=batch.info)
|
|
q = self.compute_q_value(logits, getattr(obs, "mask", None))
|
|
if self.max_action_num is None:
|
|
self.max_action_num = q.shape[1]
|
|
act = to_numpy(q.max(dim=1)[1])
|
|
result = Batch(logits=logits, act=act, state=hidden)
|
|
return cast(ModelOutputBatchProtocol, result)
|
|
|
|
def learn(self, batch: RolloutBatchProtocol, *args: Any, **kwargs: Any) -> TDQNTrainingStats:
|
|
if self._target and self._iter % self.freq == 0:
|
|
self.sync_weight()
|
|
self.optim.zero_grad()
|
|
weight = batch.pop("weight", 1.0)
|
|
q = self(batch).logits
|
|
q = q[np.arange(len(q)), batch.act]
|
|
returns = to_torch_as(batch.returns.flatten(), q)
|
|
td_error = returns - q
|
|
|
|
if self.clip_loss_grad:
|
|
y = q.reshape(-1, 1)
|
|
t = returns.reshape(-1, 1)
|
|
loss = torch.nn.functional.huber_loss(y, t, reduction="mean")
|
|
else:
|
|
loss = (td_error.pow(2) * weight).mean()
|
|
|
|
batch.weight = td_error # prio-buffer
|
|
loss.backward()
|
|
self.optim.step()
|
|
self._iter += 1
|
|
|
|
return DQNTrainingStats(loss=loss.item()) # type: ignore[return-value]
|
|
|
|
def exploration_noise(
|
|
self,
|
|
act: np.ndarray | BatchProtocol,
|
|
batch: RolloutBatchProtocol,
|
|
) -> np.ndarray | BatchProtocol:
|
|
if isinstance(act, np.ndarray) and not np.isclose(self.eps, 0.0):
|
|
bsz = len(act)
|
|
rand_mask = np.random.rand(bsz) < self.eps
|
|
assert (
|
|
self.max_action_num is not None
|
|
), "Can't call this method before max_action_num was set in first forward"
|
|
q = np.random.rand(bsz, self.max_action_num) # [0, 1]
|
|
if hasattr(batch.obs, "mask"):
|
|
q += batch.obs.mask
|
|
rand_act = q.argmax(axis=1)
|
|
act[rand_mask] = rand_act[rand_mask]
|
|
return act
|