2021-09-03 05:05:04 +08:00
|
|
|
from abc import ABC, abstractmethod
|
2022-03-12 22:26:00 +08:00
|
|
|
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
|
2021-09-03 05:05:04 +08:00
|
|
|
|
2020-09-02 13:03:32 +08:00
|
|
|
import gym
|
2020-04-14 21:11:06 +08:00
|
|
|
import numpy as np
|
2021-09-03 05:05:04 +08:00
|
|
|
import torch
|
|
|
|
from gym.spaces import Box, Discrete, MultiBinary, MultiDiscrete
|
2020-09-02 13:03:32 +08:00
|
|
|
from numba import njit
|
2021-09-03 05:05:04 +08:00
|
|
|
from torch import nn
|
2020-05-12 11:31:47 +08:00
|
|
|
|
2021-09-03 05:05:04 +08:00
|
|
|
from tianshou.data import Batch, ReplayBuffer, to_numpy, to_torch_as
|
2022-04-17 08:52:30 -07:00
|
|
|
from tianshou.utils import MultipleLRSchedulers
|
2020-03-12 22:20:33 +08:00
|
|
|
|
|
|
|
|
2020-03-18 21:45:41 +08:00
|
|
|
class BasePolicy(ABC, nn.Module):
|
2020-09-11 07:55:37 +08:00
|
|
|
"""The base class for any RL policy.
|
|
|
|
|
2021-09-03 05:05:04 +08:00
|
|
|
Tianshou aims to modularize RL algorithms. It comes into several classes of
|
2021-04-04 17:33:35 +08:00
|
|
|
policies in Tianshou. All of the policy classes must inherit
|
2020-04-06 19:36:59 +08:00
|
|
|
:class:`~tianshou.policy.BasePolicy`.
|
2020-03-13 17:49:22 +08:00
|
|
|
|
2021-04-04 17:33:35 +08:00
|
|
|
A policy class typically has the following parts:
|
2020-04-06 19:36:59 +08:00
|
|
|
|
2021-04-04 17:33:35 +08:00
|
|
|
* :meth:`~tianshou.policy.BasePolicy.__init__`: initialize the policy, including \
|
|
|
|
coping the target network and so on;
|
2020-04-10 10:47:16 +08:00
|
|
|
* :meth:`~tianshou.policy.BasePolicy.forward`: compute action with given \
|
2020-04-06 19:36:59 +08:00
|
|
|
observation;
|
2021-04-04 17:33:35 +08:00
|
|
|
* :meth:`~tianshou.policy.BasePolicy.process_fn`: pre-process data from the \
|
|
|
|
replay buffer (this function can interact with replay buffer);
|
|
|
|
* :meth:`~tianshou.policy.BasePolicy.learn`: update policy with a given batch of \
|
|
|
|
data.
|
|
|
|
* :meth:`~tianshou.policy.BasePolicy.post_process_fn`: update the replay buffer \
|
|
|
|
from the learning process (e.g., prioritized replay buffer needs to update \
|
|
|
|
the weight);
|
|
|
|
* :meth:`~tianshou.policy.BasePolicy.update`: the main interface for training, \
|
|
|
|
i.e., `process_fn -> learn -> post_process_fn`.
|
2020-04-06 19:36:59 +08:00
|
|
|
|
|
|
|
Most of the policy needs a neural network to predict the action and an
|
|
|
|
optimizer to optimize the policy. The rules of self-defined networks are:
|
|
|
|
|
2021-04-04 17:33:35 +08:00
|
|
|
1. Input: observation "obs" (may be a ``numpy.ndarray``, a ``torch.Tensor``, a \
|
|
|
|
dict or any others), hidden state "state" (for RNN usage), and other information \
|
|
|
|
"info" provided by the environment.
|
|
|
|
2. Output: some "logits", the next hidden state "state", and the intermediate \
|
|
|
|
result during policy forwarding procedure "policy". The "logits" could be a tuple \
|
|
|
|
instead of a ``torch.Tensor``. It depends on how the policy process the network \
|
|
|
|
output. For example, in PPO, the return of the network might be \
|
|
|
|
``(mu, sigma), state`` for Gaussian policy. The "policy" can be a Batch of \
|
|
|
|
torch.Tensor or other things, which will be stored in the replay buffer, and can \
|
|
|
|
be accessed in the policy update process (e.g. in "policy.learn()", the \
|
|
|
|
"batch.policy" is what you need).
|
|
|
|
|
|
|
|
Since :class:`~tianshou.policy.BasePolicy` inherits ``torch.nn.Module``, you can
|
|
|
|
use :class:`~tianshou.policy.BasePolicy` almost the same as ``torch.nn.Module``,
|
|
|
|
for instance, loading and saving the model:
|
2020-04-06 19:36:59 +08:00
|
|
|
::
|
|
|
|
|
2020-09-11 07:55:37 +08:00
|
|
|
torch.save(policy.state_dict(), "policy.pth")
|
|
|
|
policy.load_state_dict(torch.load("policy.pth"))
|
2020-04-06 19:36:59 +08:00
|
|
|
"""
|
|
|
|
|
2020-09-12 15:39:01 +08:00
|
|
|
def __init__(
|
|
|
|
self,
|
2021-03-21 16:45:50 +08:00
|
|
|
observation_space: Optional[gym.Space] = None,
|
|
|
|
action_space: Optional[gym.Space] = None,
|
|
|
|
action_scaling: bool = False,
|
|
|
|
action_bound_method: str = "",
|
2022-04-17 08:52:30 -07:00
|
|
|
lr_scheduler: Optional[Union[torch.optim.lr_scheduler.LambdaLR,
|
|
|
|
MultipleLRSchedulers]] = None,
|
2020-09-12 15:39:01 +08:00
|
|
|
) -> None:
|
2020-03-12 22:20:33 +08:00
|
|
|
super().__init__()
|
2020-09-02 13:03:32 +08:00
|
|
|
self.observation_space = observation_space
|
|
|
|
self.action_space = action_space
|
2021-04-27 21:22:39 +08:00
|
|
|
self.action_type = ""
|
|
|
|
if isinstance(action_space, (Discrete, MultiDiscrete, MultiBinary)):
|
|
|
|
self.action_type = "discrete"
|
|
|
|
elif isinstance(action_space, Box):
|
|
|
|
self.action_type = "continuous"
|
2020-07-21 14:59:49 +08:00
|
|
|
self.agent_id = 0
|
2020-09-22 16:28:46 +08:00
|
|
|
self.updating = False
|
2021-03-21 16:45:50 +08:00
|
|
|
self.action_scaling = action_scaling
|
|
|
|
# can be one of ("clip", "tanh", ""), empty string means no bounding
|
|
|
|
assert action_bound_method in ("", "clip", "tanh")
|
|
|
|
self.action_bound_method = action_bound_method
|
2022-04-17 08:52:30 -07:00
|
|
|
self.lr_scheduler = lr_scheduler
|
2020-09-12 15:39:01 +08:00
|
|
|
self._compile()
|
2020-07-21 14:59:49 +08:00
|
|
|
|
|
|
|
def set_agent_id(self, agent_id: int) -> None:
|
2020-09-11 07:55:37 +08:00
|
|
|
"""Set self.agent_id = agent_id, for MARL."""
|
2020-07-21 14:59:49 +08:00
|
|
|
self.agent_id = agent_id
|
2020-03-12 22:20:33 +08:00
|
|
|
|
2021-09-03 05:05:04 +08:00
|
|
|
def exploration_noise(self, act: Union[np.ndarray, Batch],
|
|
|
|
batch: Batch) -> Union[np.ndarray, Batch]:
|
2021-02-19 10:33:49 +08:00
|
|
|
"""Modify the action from policy.forward with exploration noise.
|
|
|
|
|
|
|
|
:param act: a data batch or numpy.ndarray which is the action taken by
|
|
|
|
policy.forward.
|
|
|
|
:param batch: the input batch for policy.forward, kept for advanced usage.
|
|
|
|
|
|
|
|
:return: action in the same form of input "act" but with added exploration
|
|
|
|
noise.
|
|
|
|
"""
|
|
|
|
return act
|
|
|
|
|
2022-01-30 00:53:56 +08:00
|
|
|
def soft_update(self, tgt: nn.Module, src: nn.Module, tau: float) -> None:
|
|
|
|
"""Softly update the parameters of target module towards the parameters \
|
|
|
|
of source module."""
|
|
|
|
for tgt_param, src_param in zip(tgt.parameters(), src.parameters()):
|
|
|
|
tgt_param.data.copy_(tau * src_param.data + (1 - tau) * tgt_param.data)
|
|
|
|
|
2020-03-12 22:20:33 +08:00
|
|
|
@abstractmethod
|
2020-09-12 15:39:01 +08:00
|
|
|
def forward(
|
|
|
|
self,
|
|
|
|
batch: Batch,
|
|
|
|
state: Optional[Union[dict, Batch, np.ndarray]] = None,
|
|
|
|
**kwargs: Any,
|
|
|
|
) -> Batch:
|
2020-04-06 19:36:59 +08:00
|
|
|
"""Compute action over the given batch data.
|
|
|
|
|
2021-02-19 10:33:49 +08:00
|
|
|
:return: A :class:`~tianshou.data.Batch` which MUST have the following keys:
|
2020-04-06 19:36:59 +08:00
|
|
|
|
|
|
|
* ``act`` an numpy.ndarray or a torch.Tensor, the action over \
|
|
|
|
given batch data.
|
|
|
|
* ``state`` a dict, an numpy.ndarray or a torch.Tensor, the \
|
|
|
|
internal state of the policy, ``None`` as default.
|
|
|
|
|
|
|
|
Other keys are user-defined. It depends on the algorithm. For example,
|
|
|
|
::
|
|
|
|
|
|
|
|
# some code
|
|
|
|
return Batch(logits=..., act=..., state=None, dist=...)
|
2020-04-29 17:48:48 +08:00
|
|
|
|
2020-07-27 16:54:14 +08:00
|
|
|
The keyword ``policy`` is reserved and the corresponding data will be
|
2020-09-11 07:55:37 +08:00
|
|
|
stored into the replay buffer. For instance,
|
2020-04-29 17:48:48 +08:00
|
|
|
::
|
|
|
|
|
|
|
|
# some code
|
|
|
|
return Batch(..., policy=Batch(log_prob=dist.log_prob(act)))
|
2020-08-15 16:10:42 +08:00
|
|
|
# and in the sampled data batch, you can directly use
|
|
|
|
# batch.policy.log_prob to get your data.
|
2021-04-04 17:33:35 +08:00
|
|
|
|
|
|
|
.. note::
|
|
|
|
|
|
|
|
In continuous action space, you should do another step "map_action" to get
|
|
|
|
the real action:
|
|
|
|
::
|
|
|
|
|
|
|
|
act = policy(batch).act # doesn't map to the target action range
|
|
|
|
act = policy.map_action(act, batch)
|
2020-04-06 19:36:59 +08:00
|
|
|
"""
|
2020-03-12 22:20:33 +08:00
|
|
|
pass
|
|
|
|
|
2021-03-21 16:45:50 +08:00
|
|
|
def map_action(self, act: Union[Batch, np.ndarray]) -> Union[Batch, np.ndarray]:
|
|
|
|
"""Map raw network output to action range in gym's env.action_space.
|
|
|
|
|
|
|
|
This function is called in :meth:`~tianshou.data.Collector.collect` and only
|
|
|
|
affects action sending to env. Remapped action will not be stored in buffer
|
|
|
|
and thus can be viewed as a part of env (a black box action transformation).
|
|
|
|
|
|
|
|
Action mapping includes 2 standard procedures: bounding and scaling. Bounding
|
|
|
|
procedure expects original action range is (-inf, inf) and maps it to [-1, 1],
|
|
|
|
while scaling procedure expects original action range is (-1, 1) and maps it
|
|
|
|
to [action_space.low, action_space.high]. Bounding procedure is applied first.
|
|
|
|
|
|
|
|
:param act: a data batch or numpy.ndarray which is the action taken by
|
|
|
|
policy.forward.
|
|
|
|
|
|
|
|
:return: action in the same form of input "act" but remap to the target action
|
|
|
|
space.
|
|
|
|
"""
|
|
|
|
if isinstance(self.action_space, gym.spaces.Box) and \
|
|
|
|
isinstance(act, np.ndarray):
|
|
|
|
# currently this action mapping only supports np.ndarray action
|
|
|
|
if self.action_bound_method == "clip":
|
2021-05-11 18:24:48 -07:00
|
|
|
act = np.clip(act, -1.0, 1.0)
|
2021-03-21 16:45:50 +08:00
|
|
|
elif self.action_bound_method == "tanh":
|
|
|
|
act = np.tanh(act)
|
|
|
|
if self.action_scaling:
|
2021-03-30 16:06:03 +08:00
|
|
|
assert np.min(act) >= -1.0 and np.max(act) <= 1.0, \
|
2021-03-21 16:45:50 +08:00
|
|
|
"action scaling only accepts raw action range = [-1, 1]"
|
|
|
|
low, high = self.action_space.low, self.action_space.high
|
2021-03-30 16:06:03 +08:00
|
|
|
act = low + (high - low) * (act + 1.0) / 2.0 # type: ignore
|
2021-03-21 16:45:50 +08:00
|
|
|
return act
|
|
|
|
|
2022-03-12 22:26:00 +08:00
|
|
|
def map_action_inverse(
|
|
|
|
self, act: Union[Batch, List, np.ndarray]
|
|
|
|
) -> Union[Batch, List, np.ndarray]:
|
|
|
|
"""Inverse operation to :meth:`~tianshou.policy.BasePolicy.map_action`.
|
|
|
|
|
|
|
|
This function is called in :meth:`~tianshou.data.Collector.collect` for
|
|
|
|
random initial steps. It scales [action_space.low, action_space.high] to
|
|
|
|
the value ranges of policy.forward.
|
|
|
|
|
|
|
|
:param act: a data batch, list or numpy.ndarray which is the action taken
|
|
|
|
by gym.spaces.Box.sample().
|
|
|
|
|
|
|
|
:return: action remapped.
|
|
|
|
"""
|
|
|
|
if isinstance(self.action_space, gym.spaces.Box):
|
|
|
|
act = to_numpy(act)
|
|
|
|
if isinstance(act, np.ndarray):
|
|
|
|
if self.action_scaling:
|
|
|
|
low, high = self.action_space.low, self.action_space.high
|
|
|
|
scale = high - low
|
|
|
|
eps = np.finfo(np.float32).eps.item()
|
|
|
|
scale[scale < eps] += eps
|
|
|
|
act = (act - low) * 2.0 / scale - 1.0
|
|
|
|
if self.action_bound_method == "tanh":
|
|
|
|
act = (np.log(1.0 + act) - np.log(1.0 - act)) / 2.0 # type: ignore
|
|
|
|
return act
|
|
|
|
|
2020-09-12 15:39:01 +08:00
|
|
|
def process_fn(
|
2021-08-20 09:58:44 -04:00
|
|
|
self, batch: Batch, buffer: ReplayBuffer, indices: np.ndarray
|
2020-09-12 15:39:01 +08:00
|
|
|
) -> Batch:
|
2020-09-11 07:55:37 +08:00
|
|
|
"""Pre-process the data from the provided replay buffer.
|
|
|
|
|
2021-02-19 10:33:49 +08:00
|
|
|
Used in :meth:`update`. Check out :ref:`process_fn` for more information.
|
2020-09-02 13:03:32 +08:00
|
|
|
"""
|
|
|
|
return batch
|
|
|
|
|
2020-03-15 17:41:00 +08:00
|
|
|
@abstractmethod
|
2021-02-24 14:48:42 +08:00
|
|
|
def learn(self, batch: Batch, **kwargs: Any) -> Dict[str, Any]:
|
2020-04-06 19:36:59 +08:00
|
|
|
"""Update policy with a given batch of data.
|
2020-03-12 22:20:33 +08:00
|
|
|
|
2021-02-24 14:48:42 +08:00
|
|
|
:return: A dict, including the data needed to be logged (e.g., loss).
|
2020-07-21 22:24:06 +08:00
|
|
|
|
2020-09-22 16:28:46 +08:00
|
|
|
.. note::
|
|
|
|
|
|
|
|
In order to distinguish the collecting state, updating state and
|
|
|
|
testing state, you can check the policy state by ``self.training``
|
|
|
|
and ``self.updating``. Please refer to :ref:`policy_state` for more
|
|
|
|
detailed explanation.
|
|
|
|
|
2020-07-21 22:24:06 +08:00
|
|
|
.. warning::
|
|
|
|
|
|
|
|
If you use ``torch.distributions.Normal`` and
|
|
|
|
``torch.distributions.Categorical`` to calculate the log_prob,
|
|
|
|
please be careful about the shape: Categorical distribution gives
|
|
|
|
"[batch_size]" shape while Normal distribution gives "[batch_size,
|
|
|
|
1]" shape. The auto-broadcasting of numerical operation with torch
|
|
|
|
tensors will amplify this error.
|
2020-04-06 19:36:59 +08:00
|
|
|
"""
|
2020-03-14 21:48:31 +08:00
|
|
|
pass
|
2020-04-14 21:11:06 +08:00
|
|
|
|
2020-09-12 15:39:01 +08:00
|
|
|
def post_process_fn(
|
2021-08-20 09:58:44 -04:00
|
|
|
self, batch: Batch, buffer: ReplayBuffer, indices: np.ndarray
|
2020-09-12 15:39:01 +08:00
|
|
|
) -> None:
|
2020-09-11 07:55:37 +08:00
|
|
|
"""Post-process the data from the provided replay buffer.
|
|
|
|
|
|
|
|
Typical usage is to update the sampling weight in prioritized
|
|
|
|
experience replay. Used in :meth:`update`.
|
2020-09-02 13:03:32 +08:00
|
|
|
"""
|
2020-09-16 17:43:19 +08:00
|
|
|
if hasattr(buffer, "update_weight") and hasattr(batch, "weight"):
|
2021-08-20 09:58:44 -04:00
|
|
|
buffer.update_weight(indices, batch.weight)
|
2020-09-02 13:03:32 +08:00
|
|
|
|
2021-09-03 05:05:04 +08:00
|
|
|
def update(self, sample_size: int, buffer: Optional[ReplayBuffer],
|
|
|
|
**kwargs: Any) -> Dict[str, Any]:
|
2020-09-11 07:55:37 +08:00
|
|
|
"""Update the policy network and replay buffer.
|
2020-09-02 13:03:32 +08:00
|
|
|
|
2021-02-24 14:48:42 +08:00
|
|
|
It includes 3 function steps: process_fn, learn, and post_process_fn. In
|
|
|
|
addition, this function will change the value of ``self.updating``: it will be
|
|
|
|
False before this function and will be True when executing :meth:`update`.
|
|
|
|
Please refer to :ref:`policy_state` for more detailed explanation.
|
2020-09-11 07:55:37 +08:00
|
|
|
|
2021-02-24 14:48:42 +08:00
|
|
|
:param int sample_size: 0 means it will extract all the data from the buffer,
|
|
|
|
otherwise it will sample a batch with given sample_size.
|
2020-09-02 13:03:32 +08:00
|
|
|
:param ReplayBuffer buffer: the corresponding replay buffer.
|
2021-02-24 14:48:42 +08:00
|
|
|
|
|
|
|
:return: A dict, including the data needed to be logged (e.g., loss) from
|
|
|
|
``policy.learn()``.
|
2020-09-02 13:03:32 +08:00
|
|
|
"""
|
|
|
|
if buffer is None:
|
|
|
|
return {}
|
2021-08-20 09:58:44 -04:00
|
|
|
batch, indices = buffer.sample(sample_size)
|
2020-09-22 16:28:46 +08:00
|
|
|
self.updating = True
|
2021-08-20 09:58:44 -04:00
|
|
|
batch = self.process_fn(batch, buffer, indices)
|
2020-09-12 15:39:01 +08:00
|
|
|
result = self.learn(batch, **kwargs)
|
2021-08-20 09:58:44 -04:00
|
|
|
self.post_process_fn(batch, buffer, indices)
|
2022-04-17 08:52:30 -07:00
|
|
|
if self.lr_scheduler is not None:
|
|
|
|
self.lr_scheduler.step()
|
2020-09-22 16:28:46 +08:00
|
|
|
self.updating = False
|
2020-09-02 13:03:32 +08:00
|
|
|
return result
|
|
|
|
|
2021-02-19 10:33:49 +08:00
|
|
|
@staticmethod
|
2021-08-20 09:58:44 -04:00
|
|
|
def value_mask(buffer: ReplayBuffer, indices: np.ndarray) -> np.ndarray:
|
|
|
|
"""Value mask determines whether the obs_next of buffer[indices] is valid.
|
2021-02-19 10:33:49 +08:00
|
|
|
|
|
|
|
For instance, usually "obs_next" after "done" flag is considered to be invalid,
|
|
|
|
and its q/advantage value can provide meaningless (even misleading)
|
|
|
|
information, and should be set to 0 by hand. But if "done" flag is generated
|
|
|
|
because timelimit of game length (info["TimeLimit.truncated"] is set to True in
|
|
|
|
gym's settings), "obs_next" will instead be valid. Value mask is typically used
|
|
|
|
for assisting in calculating the correct q/advantage value.
|
|
|
|
|
|
|
|
:param ReplayBuffer buffer: the corresponding replay buffer.
|
2021-08-20 09:58:44 -04:00
|
|
|
:param numpy.ndarray indices: indices of replay buffer whose "obs_next" will be
|
2021-02-19 10:33:49 +08:00
|
|
|
judged.
|
|
|
|
|
2021-08-20 09:58:44 -04:00
|
|
|
:return: A bool type numpy.ndarray in the same shape with indices. "True" means
|
|
|
|
"obs_next" of that buffer[indices] is valid.
|
2021-02-19 10:33:49 +08:00
|
|
|
"""
|
2021-08-20 09:58:44 -04:00
|
|
|
mask = ~buffer.done[indices]
|
2021-03-30 16:06:03 +08:00
|
|
|
# info["TimeLimit.truncated"] will be True if "done" flag is generated by
|
|
|
|
# timelimit of environments. Checkout gym.wrappers.TimeLimit.
|
2021-02-26 13:23:18 +08:00
|
|
|
if hasattr(buffer, 'info') and 'TimeLimit.truncated' in buffer.info:
|
2021-08-20 09:58:44 -04:00
|
|
|
mask = mask | buffer.info['TimeLimit.truncated'][indices]
|
2021-02-26 13:23:18 +08:00
|
|
|
return mask
|
2021-02-19 10:33:49 +08:00
|
|
|
|
2020-04-19 14:30:42 +08:00
|
|
|
@staticmethod
|
2020-05-12 11:31:47 +08:00
|
|
|
def compute_episodic_return(
|
2020-07-21 22:24:06 +08:00
|
|
|
batch: Batch,
|
2021-02-19 10:33:49 +08:00
|
|
|
buffer: ReplayBuffer,
|
2021-08-20 09:58:44 -04:00
|
|
|
indices: np.ndarray,
|
2020-07-21 22:24:06 +08:00
|
|
|
v_s_: Optional[Union[np.ndarray, torch.Tensor]] = None,
|
2021-03-23 22:05:48 +08:00
|
|
|
v_s: Optional[Union[np.ndarray, torch.Tensor]] = None,
|
2020-07-21 22:24:06 +08:00
|
|
|
gamma: float = 0.99,
|
|
|
|
gae_lambda: float = 0.95,
|
2021-03-23 22:05:48 +08:00
|
|
|
) -> Tuple[np.ndarray, np.ndarray]:
|
2021-02-19 10:33:49 +08:00
|
|
|
"""Compute returns over given batch.
|
2020-09-11 07:55:37 +08:00
|
|
|
|
2021-02-19 10:33:49 +08:00
|
|
|
Use Implementation of Generalized Advantage Estimator (arXiv:1506.02438)
|
2021-03-23 22:05:48 +08:00
|
|
|
to calculate q/advantage value of given batch.
|
2020-04-14 21:11:06 +08:00
|
|
|
|
2021-02-22 19:19:22 +08:00
|
|
|
:param Batch batch: a data batch which contains several episodes of data in
|
|
|
|
sequential order. Mind that the end of each finished episode of batch
|
2021-02-19 10:33:49 +08:00
|
|
|
should be marked by done flag, unfinished (or collecting) episodes will be
|
2021-09-03 05:05:04 +08:00
|
|
|
recognized by buffer.unfinished_index().
|
2021-08-20 09:58:44 -04:00
|
|
|
:param numpy.ndarray indices: tell batch's location in buffer, batch is equal
|
|
|
|
to buffer[indices].
|
2021-02-21 13:06:02 +08:00
|
|
|
:param np.ndarray v_s_: the value function of all next states :math:`V(s')`.
|
|
|
|
:param float gamma: the discount factor, should be in [0, 1]. Default to 0.99.
|
|
|
|
:param float gae_lambda: the parameter for Generalized Advantage Estimation,
|
|
|
|
should be in [0, 1]. Default to 0.95.
|
2020-06-02 22:29:50 +08:00
|
|
|
|
2021-03-23 22:05:48 +08:00
|
|
|
:return: two numpy arrays (returns, advantage) with each shape (bsz, ).
|
2020-04-14 21:11:06 +08:00
|
|
|
"""
|
2020-07-16 19:36:32 +08:00
|
|
|
rew = batch.rew
|
2021-02-19 10:33:49 +08:00
|
|
|
if v_s_ is None:
|
|
|
|
assert np.isclose(gae_lambda, 1.0)
|
|
|
|
v_s_ = np.zeros_like(rew)
|
|
|
|
else:
|
2022-05-15 15:40:32 +02:00
|
|
|
v_s_ = to_numpy(v_s_.flatten())
|
2021-08-20 09:58:44 -04:00
|
|
|
v_s_ = v_s_ * BasePolicy.value_mask(buffer, indices)
|
2021-03-23 22:05:48 +08:00
|
|
|
v_s = np.roll(v_s_, 1) if v_s is None else to_numpy(v_s.flatten())
|
2021-02-19 10:33:49 +08:00
|
|
|
|
|
|
|
end_flag = batch.done.copy()
|
2021-08-20 09:58:44 -04:00
|
|
|
end_flag[np.isin(indices, buffer.unfinished_index())] = True
|
2021-03-23 22:05:48 +08:00
|
|
|
advantage = _gae_return(v_s, v_s_, rew, end_flag, gamma, gae_lambda)
|
|
|
|
returns = advantage + v_s
|
|
|
|
# normalization varies from each policy, so we don't do it here
|
|
|
|
return returns, advantage
|
2020-06-02 22:29:50 +08:00
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
def compute_nstep_return(
|
|
|
|
batch: Batch,
|
|
|
|
buffer: ReplayBuffer,
|
|
|
|
indice: np.ndarray,
|
2020-06-03 13:59:47 +08:00
|
|
|
target_q_fn: Callable[[ReplayBuffer, np.ndarray], torch.Tensor],
|
2020-06-02 22:29:50 +08:00
|
|
|
gamma: float = 0.99,
|
2020-06-03 13:59:47 +08:00
|
|
|
n_step: int = 1,
|
2020-07-16 19:36:32 +08:00
|
|
|
rew_norm: bool = False,
|
2020-07-21 22:24:06 +08:00
|
|
|
) -> Batch:
|
2020-09-11 07:55:37 +08:00
|
|
|
r"""Compute n-step return for Q-learning targets.
|
2020-06-02 22:29:50 +08:00
|
|
|
|
|
|
|
.. math::
|
|
|
|
G_t = \sum_{i = t}^{t + n - 1} \gamma^{i - t}(1 - d_i)r_i +
|
|
|
|
\gamma^n (1 - d_{t + n}) Q_{\mathrm{target}}(s_{t + n})
|
|
|
|
|
2021-02-19 10:33:49 +08:00
|
|
|
where :math:`\gamma` is the discount factor, :math:`\gamma \in [0, 1]`,
|
|
|
|
:math:`d_t` is the done flag of step :math:`t`.
|
2020-06-02 22:29:50 +08:00
|
|
|
|
2021-02-21 13:06:02 +08:00
|
|
|
:param Batch batch: a data batch, which is equal to buffer[indice].
|
|
|
|
:param ReplayBuffer buffer: the data buffer.
|
2021-02-19 10:33:49 +08:00
|
|
|
:param function target_q_fn: a function which compute target Q value
|
|
|
|
of "obs_next" given data buffer and wanted indices.
|
2021-02-21 13:06:02 +08:00
|
|
|
:param float gamma: the discount factor, should be in [0, 1]. Default to 0.99.
|
|
|
|
:param int n_step: the number of estimation step, should be an int greater
|
|
|
|
than 0. Default to 1.
|
|
|
|
:param bool rew_norm: normalize the reward to Normal(0, 1), Default to False.
|
2020-06-02 22:29:50 +08:00
|
|
|
|
2020-06-03 13:59:47 +08:00
|
|
|
:return: a Batch. The result will be stored in batch.returns as a
|
2021-01-06 10:17:45 +08:00
|
|
|
torch.Tensor with the same shape as target_q_fn's return tensor.
|
2020-06-02 22:29:50 +08:00
|
|
|
"""
|
2021-02-27 11:20:43 +08:00
|
|
|
assert not rew_norm, \
|
|
|
|
"Reward normalization in computing n-step returns is unsupported now."
|
2020-07-16 19:36:32 +08:00
|
|
|
rew = buffer.rew
|
2021-02-19 10:33:49 +08:00
|
|
|
bsz = len(indice)
|
|
|
|
indices = [indice]
|
|
|
|
for _ in range(n_step - 1):
|
|
|
|
indices.append(buffer.next(indices[-1]))
|
|
|
|
indices = np.stack(indices)
|
|
|
|
# terminal indicates buffer indexes nstep after 'indice',
|
|
|
|
# and are truncated at the end of each episode
|
|
|
|
terminal = indices[-1]
|
2021-01-28 09:27:05 +08:00
|
|
|
with torch.no_grad():
|
|
|
|
target_q_torch = target_q_fn(buffer, terminal) # (bsz, ?)
|
2021-02-19 10:33:49 +08:00
|
|
|
target_q = to_numpy(target_q_torch.reshape(bsz, -1))
|
|
|
|
target_q = target_q * BasePolicy.value_mask(buffer, terminal).reshape(-1, 1)
|
|
|
|
end_flag = buffer.done.copy()
|
|
|
|
end_flag[buffer.unfinished_index()] = True
|
2021-02-27 11:20:43 +08:00
|
|
|
target_q = _nstep_return(rew, end_flag, target_q, indices, gamma, n_step)
|
2020-09-02 13:03:32 +08:00
|
|
|
|
2020-08-27 12:15:18 +08:00
|
|
|
batch.returns = to_torch_as(target_q, target_q_torch)
|
2020-09-16 17:43:19 +08:00
|
|
|
if hasattr(batch, "weight"): # prio buffer update
|
2020-08-27 12:15:18 +08:00
|
|
|
batch.weight = to_torch_as(batch.weight, target_q_torch)
|
2020-04-14 21:11:06 +08:00
|
|
|
return batch
|
2020-08-15 16:10:42 +08:00
|
|
|
|
2020-09-12 15:39:01 +08:00
|
|
|
def _compile(self) -> None:
|
|
|
|
f64 = np.array([0, 1], dtype=np.float64)
|
|
|
|
f32 = np.array([0, 1], dtype=np.float32)
|
|
|
|
b = np.array([False, True], dtype=np.bool_)
|
2021-02-19 10:33:49 +08:00
|
|
|
i64 = np.array([[0, 1]], dtype=np.int64)
|
|
|
|
_gae_return(f64, f64, f64, b, 0.1, 0.1)
|
|
|
|
_gae_return(f32, f32, f64, b, 0.1, 0.1)
|
2021-02-27 11:20:43 +08:00
|
|
|
_nstep_return(f64, b, f32.reshape(-1, 1), i64, 0.1, 1)
|
2020-09-12 15:39:01 +08:00
|
|
|
|
2020-08-15 16:10:42 +08:00
|
|
|
|
2020-09-02 13:03:32 +08:00
|
|
|
@njit
|
2021-02-19 10:33:49 +08:00
|
|
|
def _gae_return(
|
|
|
|
v_s: np.ndarray,
|
2020-09-12 15:39:01 +08:00
|
|
|
v_s_: np.ndarray,
|
|
|
|
rew: np.ndarray,
|
2021-02-19 10:33:49 +08:00
|
|
|
end_flag: np.ndarray,
|
2020-09-12 15:39:01 +08:00
|
|
|
gamma: float,
|
|
|
|
gae_lambda: float,
|
2020-09-02 13:03:32 +08:00
|
|
|
) -> np.ndarray:
|
2021-02-19 10:33:49 +08:00
|
|
|
returns = np.zeros(rew.shape)
|
|
|
|
delta = rew + v_s_ * gamma - v_s
|
2022-01-30 00:53:56 +08:00
|
|
|
discount = (1.0 - end_flag) * (gamma * gae_lambda)
|
2020-09-12 15:39:01 +08:00
|
|
|
gae = 0.0
|
2020-09-02 13:03:32 +08:00
|
|
|
for i in range(len(rew) - 1, -1, -1):
|
2022-01-30 00:53:56 +08:00
|
|
|
gae = delta[i] + discount[i] * gae
|
2021-02-19 10:33:49 +08:00
|
|
|
returns[i] = gae
|
2020-09-02 13:03:32 +08:00
|
|
|
return returns
|
|
|
|
|
|
|
|
|
|
|
|
@njit
|
|
|
|
def _nstep_return(
|
2020-09-12 15:39:01 +08:00
|
|
|
rew: np.ndarray,
|
2021-02-19 10:33:49 +08:00
|
|
|
end_flag: np.ndarray,
|
2020-09-12 15:39:01 +08:00
|
|
|
target_q: np.ndarray,
|
2021-02-19 10:33:49 +08:00
|
|
|
indices: np.ndarray,
|
2020-09-12 15:39:01 +08:00
|
|
|
gamma: float,
|
|
|
|
n_step: int,
|
2020-09-02 13:03:32 +08:00
|
|
|
) -> np.ndarray:
|
2021-02-19 10:33:49 +08:00
|
|
|
gamma_buffer = np.ones(n_step + 1)
|
|
|
|
for i in range(1, n_step + 1):
|
|
|
|
gamma_buffer[i] = gamma_buffer[i - 1] * gamma
|
2021-01-06 10:17:45 +08:00
|
|
|
target_shape = target_q.shape
|
|
|
|
bsz = target_shape[0]
|
|
|
|
# change target_q to 2d array
|
|
|
|
target_q = target_q.reshape(bsz, -1)
|
|
|
|
returns = np.zeros(target_q.shape)
|
2021-02-19 10:33:49 +08:00
|
|
|
gammas = np.full(indices[0].shape, n_step)
|
2020-09-02 13:03:32 +08:00
|
|
|
for n in range(n_step - 1, -1, -1):
|
2021-02-19 10:33:49 +08:00
|
|
|
now = indices[n]
|
2021-02-26 13:23:18 +08:00
|
|
|
gammas[end_flag[now] > 0] = n + 1
|
2021-02-19 10:33:49 +08:00
|
|
|
returns[end_flag[now] > 0] = 0.0
|
2021-02-27 11:20:43 +08:00
|
|
|
returns = rew[now].reshape(bsz, 1) + gamma * returns
|
2021-02-19 10:33:49 +08:00
|
|
|
target_q = target_q * gamma_buffer[gammas].reshape(bsz, 1) + returns
|
2021-01-06 10:17:45 +08:00
|
|
|
return target_q.reshape(target_shape)
|