2020-03-18 21:45:41 +08:00
|
|
|
from torch import nn
|
2020-03-12 22:20:33 +08:00
|
|
|
from abc import ABC, abstractmethod
|
|
|
|
|
|
|
|
|
2020-03-18 21:45:41 +08:00
|
|
|
class BasePolicy(ABC, nn.Module):
|
2020-04-06 19:36:59 +08:00
|
|
|
"""Tianshou aims to modularizing RL algorithms. It comes into several
|
|
|
|
classes of policies in Tianshou. All of the policy classes must inherit
|
|
|
|
:class:`~tianshou.policy.BasePolicy`.
|
2020-03-13 17:49:22 +08:00
|
|
|
|
2020-04-06 19:36:59 +08:00
|
|
|
A policy class typically has four parts:
|
|
|
|
|
|
|
|
* :meth:`~tianshou.policy.BasePolicy.__init__`: initialize the policy, \
|
|
|
|
including coping the target network and so on;
|
2020-04-10 10:47:16 +08:00
|
|
|
* :meth:`~tianshou.policy.BasePolicy.forward`: compute action with given \
|
2020-04-06 19:36:59 +08:00
|
|
|
observation;
|
|
|
|
* :meth:`~tianshou.policy.BasePolicy.process_fn`: pre-process data from \
|
|
|
|
the replay buffer (this function can interact with replay buffer);
|
|
|
|
* :meth:`~tianshou.policy.BasePolicy.learn`: update policy with a given \
|
|
|
|
batch of data.
|
|
|
|
|
|
|
|
Most of the policy needs a neural network to predict the action and an
|
|
|
|
optimizer to optimize the policy. The rules of self-defined networks are:
|
|
|
|
|
|
|
|
1. Input: observation ``obs`` (may be a ``numpy.ndarray`` or \
|
|
|
|
``torch.Tensor``), hidden state ``state`` (for RNN usage), and other \
|
|
|
|
information ``info`` provided by the environment.
|
|
|
|
2. Output: some ``logits`` and the next hidden state ``state``. The logits\
|
|
|
|
could be a tuple instead of a ``torch.Tensor``. It depends on how the \
|
|
|
|
policy process the network output. For example, in PPO, the return of \
|
|
|
|
the network might be ``(mu, sigma), state`` for Gaussian policy.
|
|
|
|
|
|
|
|
Since :class:`~tianshou.policy.BasePolicy` inherits ``torch.nn.Module``,
|
|
|
|
you can operate :class:`~tianshou.policy.BasePolicy` almost the same as
|
|
|
|
``torch.nn.Module``, for instance, load and save the model:
|
|
|
|
::
|
|
|
|
|
|
|
|
torch.save(policy.state_dict(), 'policy.pth')
|
|
|
|
policy.load_state_dict(torch.load('policy.pth'))
|
|
|
|
"""
|
|
|
|
|
|
|
|
def __init__(self, **kwargs):
|
2020-03-12 22:20:33 +08:00
|
|
|
super().__init__()
|
|
|
|
|
2020-03-17 11:37:31 +08:00
|
|
|
def process_fn(self, batch, buffer, indice):
|
2020-04-06 19:36:59 +08:00
|
|
|
"""Pre-process the data from the provided replay buffer. Check out
|
|
|
|
:ref:`policy_concept` for more information.
|
|
|
|
"""
|
2020-03-17 11:37:31 +08:00
|
|
|
return batch
|
|
|
|
|
2020-03-12 22:20:33 +08:00
|
|
|
@abstractmethod
|
2020-04-10 10:47:16 +08:00
|
|
|
def forward(self, batch, state=None, **kwargs):
|
2020-04-06 19:36:59 +08:00
|
|
|
"""Compute action over the given batch data.
|
|
|
|
|
|
|
|
:return: A :class:`~tianshou.data.Batch` which MUST have the following\
|
|
|
|
keys:
|
|
|
|
|
|
|
|
* ``act`` an numpy.ndarray or a torch.Tensor, the action over \
|
|
|
|
given batch data.
|
|
|
|
* ``state`` a dict, an numpy.ndarray or a torch.Tensor, the \
|
|
|
|
internal state of the policy, ``None`` as default.
|
|
|
|
|
|
|
|
Other keys are user-defined. It depends on the algorithm. For example,
|
|
|
|
::
|
|
|
|
|
|
|
|
# some code
|
|
|
|
return Batch(logits=..., act=..., state=None, dist=...)
|
|
|
|
"""
|
2020-03-12 22:20:33 +08:00
|
|
|
pass
|
|
|
|
|
2020-03-15 17:41:00 +08:00
|
|
|
@abstractmethod
|
2020-04-06 19:36:59 +08:00
|
|
|
def learn(self, batch, **kwargs):
|
|
|
|
"""Update policy with a given batch of data.
|
2020-03-12 22:20:33 +08:00
|
|
|
|
2020-04-06 19:36:59 +08:00
|
|
|
:return: A dict which includes loss and its corresponding label.
|
|
|
|
"""
|
2020-03-14 21:48:31 +08:00
|
|
|
pass
|