Closes #914 Additional changes: - Deprecate python below 11 - Remove 3rd party and throughput tests. This simplifies install and test pipeline - Remove gym compatibility and shimmy - Format with 3.11 conventions. In particular, add `zip(..., strict=True/False)` where possible Since the additional tests and gym were complicating the CI pipeline (flaky and dist-dependent), it didn't make sense to work on fixing the current tests in this PR to then just delete them in the next one. So this PR changes the build and removes these tests at the same time.
103 lines
4.0 KiB
Python
103 lines
4.0 KiB
Python
import warnings
|
|
from typing import Any
|
|
|
|
import numpy as np
|
|
import torch
|
|
import torch.nn.functional as F
|
|
|
|
from tianshou.data import ReplayBuffer
|
|
from tianshou.data.types import RolloutBatchProtocol
|
|
from tianshou.policy import DQNPolicy
|
|
|
|
|
|
class QRDQNPolicy(DQNPolicy):
|
|
"""Implementation of Quantile Regression Deep Q-Network. arXiv:1710.10044.
|
|
|
|
:param torch.nn.Module model: a model following the rules in
|
|
:class:`~tianshou.policy.BasePolicy`. (s -> logits)
|
|
:param torch.optim.Optimizer optim: a torch.optim for optimizing the model.
|
|
:param float discount_factor: in [0, 1].
|
|
:param int num_quantiles: the number of quantile midpoints in the inverse
|
|
cumulative distribution function of the value. Default to 200.
|
|
:param int estimation_step: the number of steps to look ahead. Default to 1.
|
|
:param int target_update_freq: the target network update frequency (0 if
|
|
you do not use the target network).
|
|
:param bool reward_normalization: normalize the reward to Normal(0, 1).
|
|
Default to False.
|
|
:param lr_scheduler: a learning rate scheduler that adjusts the learning rate in
|
|
optimizer in each policy.update(). Default to None (no lr_scheduler).
|
|
|
|
.. seealso::
|
|
|
|
Please refer to :class:`~tianshou.policy.DQNPolicy` for more detailed
|
|
explanation.
|
|
"""
|
|
|
|
def __init__(
|
|
self,
|
|
model: torch.nn.Module,
|
|
optim: torch.optim.Optimizer,
|
|
discount_factor: float = 0.99,
|
|
num_quantiles: int = 200,
|
|
estimation_step: int = 1,
|
|
target_update_freq: int = 0,
|
|
reward_normalization: bool = False,
|
|
**kwargs: Any,
|
|
) -> None:
|
|
super().__init__(
|
|
model,
|
|
optim,
|
|
discount_factor,
|
|
estimation_step,
|
|
target_update_freq,
|
|
reward_normalization,
|
|
**kwargs,
|
|
)
|
|
assert num_quantiles > 1, "num_quantiles should be greater than 1"
|
|
self._num_quantiles = num_quantiles
|
|
tau = torch.linspace(0, 1, self._num_quantiles + 1)
|
|
self.tau_hat = torch.nn.Parameter(
|
|
((tau[:-1] + tau[1:]) / 2).view(1, -1, 1),
|
|
requires_grad=False,
|
|
)
|
|
warnings.filterwarnings("ignore", message="Using a target size")
|
|
|
|
def _target_q(self, buffer: ReplayBuffer, indices: np.ndarray) -> torch.Tensor:
|
|
batch = buffer[indices] # batch.obs_next: s_{t+n}
|
|
if self._target:
|
|
act = self(batch, input="obs_next").act
|
|
next_dist = self(batch, model="model_old", input="obs_next").logits
|
|
else:
|
|
next_batch = self(batch, input="obs_next")
|
|
act = next_batch.act
|
|
next_dist = next_batch.logits
|
|
return next_dist[np.arange(len(act)), act, :]
|
|
|
|
def compute_q_value(self, logits: torch.Tensor, mask: np.ndarray | None) -> torch.Tensor:
|
|
return super().compute_q_value(logits.mean(2), mask)
|
|
|
|
def learn(self, batch: RolloutBatchProtocol, *args: Any, **kwargs: Any) -> dict[str, float]:
|
|
if self._target and self._iter % self._freq == 0:
|
|
self.sync_weight()
|
|
self.optim.zero_grad()
|
|
weight = batch.pop("weight", 1.0)
|
|
curr_dist = self(batch).logits
|
|
act = batch.act
|
|
curr_dist = curr_dist[np.arange(len(act)), act, :].unsqueeze(2)
|
|
target_dist = batch.returns.unsqueeze(1)
|
|
# calculate each element's difference between curr_dist and target_dist
|
|
dist_diff = F.smooth_l1_loss(target_dist, curr_dist, reduction="none")
|
|
huber_loss = (
|
|
(dist_diff * (self.tau_hat - (target_dist - curr_dist).detach().le(0.0).float()).abs())
|
|
.sum(-1)
|
|
.mean(1)
|
|
)
|
|
loss = (huber_loss * weight).mean()
|
|
# ref: https://github.com/ku2482/fqf-iqn-qrdqn.pytorch/
|
|
# blob/master/fqf_iqn_qrdqn/agent/qrdqn_agent.py L130
|
|
batch.weight = dist_diff.detach().abs().sum(-1).mean(1) # prio-buffer
|
|
loss.backward()
|
|
self.optim.step()
|
|
self._iter += 1
|
|
return {"loss": loss.item()}
|