132 lines
5.2 KiB
Python
Raw Normal View History

2020-05-12 11:31:47 +08:00
import torch
2020-04-14 21:11:06 +08:00
import numpy as np
2020-03-18 21:45:41 +08:00
from torch import nn
2020-03-12 22:20:33 +08:00
from abc import ABC, abstractmethod
2020-05-12 11:31:47 +08:00
from typing import Dict, List, Union, Optional
from tianshou.data import Batch, ReplayBuffer
2020-03-12 22:20:33 +08:00
2020-03-18 21:45:41 +08:00
class BasePolicy(ABC, nn.Module):
2020-04-06 19:36:59 +08:00
"""Tianshou aims to modularizing RL algorithms. It comes into several
classes of policies in Tianshou. All of the policy classes must inherit
:class:`~tianshou.policy.BasePolicy`.
2020-03-13 17:49:22 +08:00
2020-04-06 19:36:59 +08:00
A policy class typically has four parts:
* :meth:`~tianshou.policy.BasePolicy.__init__`: initialize the policy, \
including coping the target network and so on;
2020-04-10 10:47:16 +08:00
* :meth:`~tianshou.policy.BasePolicy.forward`: compute action with given \
2020-04-06 19:36:59 +08:00
observation;
* :meth:`~tianshou.policy.BasePolicy.process_fn`: pre-process data from \
the replay buffer (this function can interact with replay buffer);
* :meth:`~tianshou.policy.BasePolicy.learn`: update policy with a given \
batch of data.
Most of the policy needs a neural network to predict the action and an
optimizer to optimize the policy. The rules of self-defined networks are:
1. Input: observation ``obs`` (may be a ``numpy.ndarray`` or \
``torch.Tensor``), hidden state ``state`` (for RNN usage), and other \
information ``info`` provided by the environment.
2. Output: some ``logits`` and the next hidden state ``state``. The logits\
could be a tuple instead of a ``torch.Tensor``. It depends on how the \
policy process the network output. For example, in PPO, the return of \
the network might be ``(mu, sigma), state`` for Gaussian policy.
Since :class:`~tianshou.policy.BasePolicy` inherits ``torch.nn.Module``,
2020-04-10 11:16:33 +08:00
you can use :class:`~tianshou.policy.BasePolicy` almost the same as
``torch.nn.Module``, for instance, loading and saving the model:
2020-04-06 19:36:59 +08:00
::
torch.save(policy.state_dict(), 'policy.pth')
policy.load_state_dict(torch.load('policy.pth'))
"""
2020-05-12 11:31:47 +08:00
def __init__(self, **kwargs) -> None:
2020-03-12 22:20:33 +08:00
super().__init__()
2020-05-12 11:31:47 +08:00
def process_fn(self, batch: Batch, buffer: ReplayBuffer,
indice: np.ndarray) -> Batch:
2020-04-06 19:36:59 +08:00
"""Pre-process the data from the provided replay buffer. Check out
:ref:`policy_concept` for more information.
"""
2020-03-17 11:37:31 +08:00
return batch
2020-03-12 22:20:33 +08:00
@abstractmethod
2020-05-12 11:31:47 +08:00
def forward(self, batch: Batch,
state: Optional[Union[dict, Batch, np.ndarray]] = None,
**kwargs) -> Batch:
2020-04-06 19:36:59 +08:00
"""Compute action over the given batch data.
:return: A :class:`~tianshou.data.Batch` which MUST have the following\
keys:
* ``act`` an numpy.ndarray or a torch.Tensor, the action over \
given batch data.
* ``state`` a dict, an numpy.ndarray or a torch.Tensor, the \
internal state of the policy, ``None`` as default.
Other keys are user-defined. It depends on the algorithm. For example,
::
# some code
return Batch(logits=..., act=..., state=None, dist=...)
After version >= 0.2.3, the keyword "policy" is reserverd and the
corresponding data will be stored into the replay buffer in numpy. For
instance,
::
# some code
return Batch(..., policy=Batch(log_prob=dist.log_prob(act)))
# and in the sampled data batch, you can directly call
# batch.policy.log_prob to get your data, although it is stored in
# np.ndarray.
2020-04-06 19:36:59 +08:00
"""
2020-03-12 22:20:33 +08:00
pass
2020-03-15 17:41:00 +08:00
@abstractmethod
2020-05-12 11:31:47 +08:00
def learn(self, batch: Batch, **kwargs
) -> Dict[str, Union[float, List[float]]]:
2020-04-06 19:36:59 +08:00
"""Update policy with a given batch of data.
2020-03-12 22:20:33 +08:00
2020-04-06 19:36:59 +08:00
:return: A dict which includes loss and its corresponding label.
"""
2020-03-14 21:48:31 +08:00
pass
2020-04-14 21:11:06 +08:00
2020-04-19 14:30:42 +08:00
@staticmethod
2020-05-12 11:31:47 +08:00
def compute_episodic_return(
batch: Batch,
v_s_: Optional[Union[np.ndarray, torch.Tensor]] = None,
gamma: Optional[float] = 0.99,
gae_lambda: Optional[float] = 0.95) -> Batch:
2020-04-14 21:11:06 +08:00
"""Compute returns over given full-length episodes, including the
implementation of Generalized Advantage Estimation (arXiv:1506.02438).
:param batch: a data batch which contains several full-episode data
chronologically.
:type batch: :class:`~tianshou.data.Batch`
:param v_s_: the value function of all next states :math:`V(s')`.
:type v_s_: numpy.ndarray
:param float gamma: the discount factor, should be in [0, 1], defaults
to 0.99.
:param float gae_lambda: the parameter for Generalized Advantage
Estimation, should be in [0, 1], defaults to 0.95.
"""
if v_s_ is None:
v_s_ = np.zeros_like(batch.rew)
else:
2020-04-19 14:30:42 +08:00
if not isinstance(v_s_, np.ndarray):
v_s_ = np.array(v_s_, np.float)
v_s_ = v_s_.reshape(batch.rew.shape)
batch.returns = np.roll(v_s_, 1, axis=0)
2020-04-14 21:11:06 +08:00
m = (1. - batch.done) * gamma
delta = batch.rew + v_s_ * m - batch.returns
m *= gae_lambda
gae = 0.
for i in range(len(batch.rew) - 1, -1, -1):
gae = delta[i] + m[i] * gae
batch.returns[i] += gae
return batch