2020-03-11 09:09:56 +08:00
|
|
|
import numpy as np
|
2020-06-01 08:30:09 +08:00
|
|
|
from typing import Any, Tuple, Union, Optional
|
2020-05-12 11:31:47 +08:00
|
|
|
|
2020-06-29 12:18:52 +08:00
|
|
|
from tianshou.data.batch import Batch, _create_value
|
2020-06-26 12:37:50 +02:00
|
|
|
|
|
|
|
|
|
|
|
class ReplayBuffer:
|
2020-04-05 18:34:45 +08:00
|
|
|
""":class:`~tianshou.data.ReplayBuffer` stores data generated from
|
2020-07-19 15:20:35 +08:00
|
|
|
interaction between the policy and environment. The current implementation
|
|
|
|
of Tianshou typically use 7 reserved keys in :class:`~tianshou.data.Batch`:
|
|
|
|
|
|
|
|
* ``obs`` the observation of step :math:`t` ;
|
|
|
|
* ``act`` the action of step :math:`t` ;
|
|
|
|
* ``rew`` the reward of step :math:`t` ;
|
|
|
|
* ``done`` the done flag of step :math:`t` ;
|
|
|
|
* ``obs_next`` the observation of step :math:`t+1` ;
|
|
|
|
* ``info`` the info of step :math:`t` (in ``gym.Env``, the ``env.step()`` \
|
|
|
|
function returns 4 arguments, and the last one is ``info``);
|
|
|
|
* ``policy`` the data computed by policy in step :math:`t`;
|
|
|
|
|
|
|
|
The following code snippet illustrates its usage:
|
2020-04-03 21:28:12 +08:00
|
|
|
::
|
|
|
|
|
2020-04-09 21:36:53 +08:00
|
|
|
>>> import numpy as np
|
2020-04-03 21:28:12 +08:00
|
|
|
>>> from tianshou.data import ReplayBuffer
|
|
|
|
>>> buf = ReplayBuffer(size=20)
|
|
|
|
>>> for i in range(3):
|
|
|
|
... buf.add(obs=i, act=i, rew=i, done=i, obs_next=i + 1, info={})
|
|
|
|
>>> buf.obs
|
|
|
|
# since we set size = 20, len(buf.obs) == 20.
|
|
|
|
array([0., 1., 2., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
|
|
|
|
0., 0., 0., 0.])
|
2020-07-19 15:20:35 +08:00
|
|
|
>>> # but there are only three valid items, so len(buf) == 3.
|
|
|
|
>>> len(buf)
|
|
|
|
3
|
2020-04-03 21:28:12 +08:00
|
|
|
>>> buf2 = ReplayBuffer(size=10)
|
|
|
|
>>> for i in range(15):
|
|
|
|
... buf2.add(obs=i, act=i, rew=i, done=i, obs_next=i + 1, info={})
|
2020-04-05 18:34:45 +08:00
|
|
|
>>> len(buf2)
|
|
|
|
10
|
2020-04-03 21:28:12 +08:00
|
|
|
>>> buf2.obs
|
|
|
|
# since its size = 10, it only stores the last 10 steps' result.
|
|
|
|
array([10., 11., 12., 13., 14., 5., 6., 7., 8., 9.])
|
|
|
|
|
2020-04-05 18:34:45 +08:00
|
|
|
>>> # move buf2's result into buf (meanwhile keep it chronologically)
|
2020-04-03 21:28:12 +08:00
|
|
|
>>> buf.update(buf2)
|
|
|
|
array([ 0., 1., 2., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14.,
|
|
|
|
0., 0., 0., 0., 0., 0., 0.])
|
|
|
|
|
|
|
|
>>> # get a random sample from buffer
|
|
|
|
>>> # the batch_data is equal to buf[incide].
|
|
|
|
>>> batch_data, indice = buf.sample(batch_size=4)
|
|
|
|
>>> batch_data.obs == buf[indice].obs
|
|
|
|
array([ True, True, True, True])
|
2020-04-09 19:53:45 +08:00
|
|
|
|
2020-04-29 17:48:48 +08:00
|
|
|
:class:`~tianshou.data.ReplayBuffer` also supports frame_stack sampling
|
|
|
|
(typically for RNN usage, see issue#19), ignoring storing the next
|
|
|
|
observation (save memory in atari tasks), and multi-modal observation (see
|
2020-06-08 21:53:00 +08:00
|
|
|
issue#38):
|
2020-04-09 19:53:45 +08:00
|
|
|
::
|
|
|
|
|
2020-04-10 09:01:17 +08:00
|
|
|
>>> buf = ReplayBuffer(size=9, stack_num=4, ignore_obs_next=True)
|
2020-04-09 19:53:45 +08:00
|
|
|
>>> for i in range(16):
|
|
|
|
... done = i % 5 == 0
|
2020-04-29 17:48:48 +08:00
|
|
|
... buf.add(obs={'id': i}, act=i, rew=i, done=done,
|
|
|
|
... obs_next={'id': i + 1})
|
|
|
|
>>> print(buf) # you can see obs_next is not saved in buf
|
2020-04-09 21:36:53 +08:00
|
|
|
ReplayBuffer(
|
2020-04-29 17:48:48 +08:00
|
|
|
act: array([ 9., 10., 11., 12., 13., 14., 15., 7., 8.]),
|
|
|
|
done: array([0., 1., 0., 0., 0., 0., 1., 0., 0.]),
|
2020-06-01 08:30:09 +08:00
|
|
|
info: Batch(),
|
2020-04-29 17:48:48 +08:00
|
|
|
obs: Batch(
|
|
|
|
id: array([ 9., 10., 11., 12., 13., 14., 15., 7., 8.]),
|
|
|
|
),
|
|
|
|
policy: Batch(),
|
|
|
|
rew: array([ 9., 10., 11., 12., 13., 14., 15., 7., 8.]),
|
2020-04-09 21:36:53 +08:00
|
|
|
)
|
2020-04-09 19:53:45 +08:00
|
|
|
>>> index = np.arange(len(buf))
|
2020-04-29 17:48:48 +08:00
|
|
|
>>> print(buf.get(index, 'obs').id)
|
2020-04-09 19:53:45 +08:00
|
|
|
[[ 7. 7. 8. 9.]
|
|
|
|
[ 7. 8. 9. 10.]
|
|
|
|
[11. 11. 11. 11.]
|
|
|
|
[11. 11. 11. 12.]
|
|
|
|
[11. 11. 12. 13.]
|
|
|
|
[11. 12. 13. 14.]
|
|
|
|
[12. 13. 14. 15.]
|
|
|
|
[ 7. 7. 7. 7.]
|
|
|
|
[ 7. 7. 7. 8.]]
|
|
|
|
>>> # here is another way to get the stacked data
|
|
|
|
>>> # (stack only for obs and obs_next)
|
2020-04-29 17:48:48 +08:00
|
|
|
>>> abs(buf.get(index, 'obs')['id'] - buf[index].obs.id).sum().sum()
|
2020-04-09 19:53:45 +08:00
|
|
|
0.0
|
2020-04-29 17:48:48 +08:00
|
|
|
>>> # we can get obs_next through __getitem__, even if it doesn't exist
|
|
|
|
>>> print(buf[:].obs_next.id)
|
2020-04-11 16:54:27 +08:00
|
|
|
[[ 7. 8. 9. 10.]
|
|
|
|
[ 7. 8. 9. 10.]
|
|
|
|
[11. 11. 11. 12.]
|
|
|
|
[11. 11. 12. 13.]
|
|
|
|
[11. 12. 13. 14.]
|
|
|
|
[12. 13. 14. 15.]
|
|
|
|
[12. 13. 14. 15.]
|
|
|
|
[ 7. 7. 7. 8.]
|
|
|
|
[ 7. 7. 8. 9.]]
|
2020-06-29 12:18:52 +08:00
|
|
|
|
|
|
|
:param int size: the size of replay buffer.
|
|
|
|
:param int stack_num: the frame-stack sampling argument, should be greater
|
2020-07-25 13:33:44 +02:00
|
|
|
than or equal to 1, defaults to 1 (no stacking).
|
2020-06-29 12:18:52 +08:00
|
|
|
:param bool ignore_obs_next: whether to store obs_next, defaults to
|
|
|
|
``False``.
|
|
|
|
:param bool sample_avail: the parameter indicating sampling only available
|
|
|
|
index when using frame-stack sampling method, defaults to ``False``.
|
|
|
|
This feature is not supported in Prioritized Replay Buffer currently.
|
2020-04-03 21:28:12 +08:00
|
|
|
"""
|
2020-06-29 12:18:52 +08:00
|
|
|
|
2020-07-25 13:33:44 +02:00
|
|
|
def __init__(self, size: int, stack_num: int = 1,
|
2020-06-29 12:18:52 +08:00
|
|
|
ignore_obs_next: bool = False,
|
|
|
|
sample_avail: bool = False, **kwargs) -> None:
|
2020-06-25 14:39:30 +02:00
|
|
|
super().__init__()
|
2020-06-26 12:37:50 +02:00
|
|
|
self._maxsize = size
|
2020-07-25 13:33:44 +02:00
|
|
|
self._stack = None
|
|
|
|
self.stack_num = stack_num
|
2020-06-29 12:18:52 +08:00
|
|
|
self._avail = sample_avail and stack_num > 1
|
|
|
|
self._avail_index = []
|
2020-06-26 12:37:50 +02:00
|
|
|
self._save_s_ = not ignore_obs_next
|
|
|
|
self._index = 0
|
|
|
|
self._size = 0
|
|
|
|
self._meta = Batch()
|
2020-03-11 17:28:51 +08:00
|
|
|
self.reset()
|
2020-03-11 09:09:56 +08:00
|
|
|
|
2020-05-12 11:31:47 +08:00
|
|
|
def __len__(self) -> int:
|
2020-04-04 21:02:06 +08:00
|
|
|
"""Return len(self)."""
|
2020-03-11 09:09:56 +08:00
|
|
|
return self._size
|
|
|
|
|
2020-06-26 12:37:50 +02:00
|
|
|
def __repr__(self) -> str:
|
2020-06-27 03:06:40 +02:00
|
|
|
"""Return str(self)."""
|
2020-06-26 12:37:50 +02:00
|
|
|
return self.__class__.__name__ + self._meta.__repr__()[5:]
|
2020-06-25 14:39:30 +02:00
|
|
|
|
2020-06-26 12:37:50 +02:00
|
|
|
def __getattr__(self, key: str) -> Union['Batch', Any]:
|
|
|
|
"""Return self.key"""
|
|
|
|
return self._meta.__dict__[key]
|
|
|
|
|
|
|
|
def _add_to_buffer(self, name: str, inst: Any) -> None:
|
|
|
|
try:
|
|
|
|
value = self._meta.__dict__[name]
|
|
|
|
except KeyError:
|
|
|
|
self._meta.__dict__[name] = _create_value(inst, self._maxsize)
|
|
|
|
value = self._meta.__dict__[name]
|
2020-07-11 21:46:01 +08:00
|
|
|
if isinstance(inst, np.ndarray) and value.shape[1:] != inst.shape:
|
2020-04-28 20:56:02 +08:00
|
|
|
raise ValueError(
|
2020-06-26 12:37:50 +02:00
|
|
|
"Cannot add data to a buffer with different shape, key: "
|
2020-07-11 21:46:01 +08:00
|
|
|
f"{name}, expect shape: {value.shape[1:]}, "
|
|
|
|
f"given shape: {inst.shape}.")
|
2020-06-26 12:37:50 +02:00
|
|
|
try:
|
|
|
|
value[self._index] = inst
|
|
|
|
except KeyError:
|
|
|
|
for key in set(inst.keys()).difference(value.__dict__.keys()):
|
|
|
|
value.__dict__[key] = _create_value(inst[key], self._maxsize)
|
|
|
|
value[self._index] = inst
|
2020-03-11 09:09:56 +08:00
|
|
|
|
2020-07-25 13:33:44 +02:00
|
|
|
@property
|
|
|
|
def stack_num(self):
|
2020-07-20 22:12:57 +08:00
|
|
|
return self._stack
|
|
|
|
|
2020-07-25 13:33:44 +02:00
|
|
|
@stack_num.setter
|
|
|
|
def stack_num(self, num):
|
|
|
|
assert num > 0, 'stack_num should greater than 0'
|
2020-07-20 22:12:57 +08:00
|
|
|
self._stack = num
|
|
|
|
|
2020-05-12 11:31:47 +08:00
|
|
|
def update(self, buffer: 'ReplayBuffer') -> None:
|
2020-04-04 21:02:06 +08:00
|
|
|
"""Move the data from the given buffer to self."""
|
2020-07-10 08:24:11 +08:00
|
|
|
if len(buffer) == 0:
|
|
|
|
return
|
2020-03-16 11:11:29 +08:00
|
|
|
i = begin = buffer._index % len(buffer)
|
2020-07-25 13:33:44 +02:00
|
|
|
stack_num_orig = buffer.stack_num
|
|
|
|
buffer.stack_num = 1
|
2020-03-16 11:11:29 +08:00
|
|
|
while True:
|
2020-06-24 15:43:48 +02:00
|
|
|
self.add(**buffer[i])
|
2020-03-16 11:11:29 +08:00
|
|
|
i = (i + 1) % len(buffer)
|
|
|
|
if i == begin:
|
|
|
|
break
|
2020-07-25 13:33:44 +02:00
|
|
|
buffer.stack_num = stack_num_orig
|
2020-03-14 21:48:31 +08:00
|
|
|
|
2020-05-12 11:31:47 +08:00
|
|
|
def add(self,
|
2020-06-26 12:37:50 +02:00
|
|
|
obs: Union[dict, Batch, np.ndarray],
|
2020-05-12 11:31:47 +08:00
|
|
|
act: Union[np.ndarray, float],
|
2020-06-29 12:18:52 +08:00
|
|
|
rew: Union[int, float],
|
2020-05-12 11:31:47 +08:00
|
|
|
done: bool,
|
2020-06-26 12:37:50 +02:00
|
|
|
obs_next: Optional[Union[dict, Batch, np.ndarray]] = None,
|
2020-05-16 20:08:32 +08:00
|
|
|
info: dict = {},
|
2020-05-12 11:31:47 +08:00
|
|
|
policy: Optional[Union[dict, Batch]] = {},
|
|
|
|
**kwargs) -> None:
|
2020-04-04 21:02:06 +08:00
|
|
|
"""Add a batch of data into replay buffer."""
|
2020-06-01 08:30:09 +08:00
|
|
|
assert isinstance(info, (dict, Batch)), \
|
2020-03-13 17:49:22 +08:00
|
|
|
'You should return a dict in the last argument of env.step().'
|
2020-03-11 09:09:56 +08:00
|
|
|
self._add_to_buffer('obs', obs)
|
|
|
|
self._add_to_buffer('act', act)
|
|
|
|
self._add_to_buffer('rew', rew)
|
|
|
|
self._add_to_buffer('done', done)
|
2020-04-10 09:01:17 +08:00
|
|
|
if self._save_s_:
|
2020-06-26 12:37:50 +02:00
|
|
|
if obs_next is None:
|
|
|
|
obs_next = Batch()
|
2020-04-10 09:01:17 +08:00
|
|
|
self._add_to_buffer('obs_next', obs_next)
|
2020-03-11 09:09:56 +08:00
|
|
|
self._add_to_buffer('info', info)
|
2020-04-28 20:56:02 +08:00
|
|
|
self._add_to_buffer('policy', policy)
|
2020-06-29 12:18:52 +08:00
|
|
|
|
|
|
|
# maintain available index for frame-stack sampling
|
|
|
|
if self._avail:
|
|
|
|
# update current frame
|
|
|
|
avail = sum(self.done[i] for i in range(
|
2020-07-25 13:33:44 +02:00
|
|
|
self._index - self.stack_num + 1, self._index)) == 0
|
|
|
|
if self._size < self.stack_num - 1:
|
2020-06-29 12:18:52 +08:00
|
|
|
avail = False
|
|
|
|
if avail and self._index not in self._avail_index:
|
|
|
|
self._avail_index.append(self._index)
|
|
|
|
elif not avail and self._index in self._avail_index:
|
|
|
|
self._avail_index.remove(self._index)
|
|
|
|
# remove the later available frame because of broken storage
|
2020-07-25 13:33:44 +02:00
|
|
|
t = (self._index + self.stack_num - 1) % self._maxsize
|
2020-06-29 12:18:52 +08:00
|
|
|
if t in self._avail_index:
|
|
|
|
self._avail_index.remove(t)
|
|
|
|
|
2020-03-28 15:14:41 +08:00
|
|
|
if self._maxsize > 0:
|
|
|
|
self._size = min(self._size + 1, self._maxsize)
|
|
|
|
self._index = (self._index + 1) % self._maxsize
|
|
|
|
else:
|
|
|
|
self._size = self._index = self._index + 1
|
2020-03-11 09:09:56 +08:00
|
|
|
|
2020-05-12 11:31:47 +08:00
|
|
|
def reset(self) -> None:
|
2020-04-04 21:02:06 +08:00
|
|
|
"""Clear all the data in replay buffer."""
|
2020-06-25 14:39:30 +02:00
|
|
|
self._index = 0
|
|
|
|
self._size = 0
|
2020-06-29 12:18:52 +08:00
|
|
|
self._avail_index = []
|
2020-03-11 09:09:56 +08:00
|
|
|
|
2020-05-12 11:31:47 +08:00
|
|
|
def sample(self, batch_size: int) -> Tuple[Batch, np.ndarray]:
|
2020-04-05 18:34:45 +08:00
|
|
|
"""Get a random sample from buffer with size equal to batch_size. \
|
|
|
|
Return all the data in the buffer if batch_size is ``0``.
|
2020-04-03 21:28:12 +08:00
|
|
|
|
|
|
|
:return: Sample data and its corresponding index inside the buffer.
|
|
|
|
"""
|
2020-03-12 22:20:33 +08:00
|
|
|
if batch_size > 0:
|
2020-06-29 12:18:52 +08:00
|
|
|
_all = self._avail_index if self._avail else self._size
|
|
|
|
indice = np.random.choice(_all, batch_size)
|
2020-03-12 22:20:33 +08:00
|
|
|
else:
|
2020-06-29 12:18:52 +08:00
|
|
|
if self._avail:
|
|
|
|
indice = np.array(self._avail_index)
|
|
|
|
else:
|
|
|
|
indice = np.concatenate([
|
|
|
|
np.arange(self._index, self._size),
|
|
|
|
np.arange(0, self._index),
|
|
|
|
])
|
|
|
|
assert len(indice) > 0, 'No available indice can be sampled.'
|
2020-03-30 22:52:25 +08:00
|
|
|
return self[indice], indice
|
2020-03-11 09:09:56 +08:00
|
|
|
|
2020-06-25 14:39:30 +02:00
|
|
|
def get(self, indice: Union[slice, int, np.integer, np.ndarray], key: str,
|
2020-05-12 11:31:47 +08:00
|
|
|
stack_num: Optional[int] = None) -> Union[Batch, np.ndarray]:
|
2020-04-09 19:53:45 +08:00
|
|
|
"""Return the stacked result, e.g. [s_{t-3}, s_{t-2}, s_{t-1}, s_t],
|
|
|
|
where s is self.key, t is indice. The stack_num (here equals to 4) is
|
|
|
|
given from buffer initialization procedure.
|
|
|
|
"""
|
2020-04-28 20:56:02 +08:00
|
|
|
if stack_num is None:
|
2020-07-25 13:33:44 +02:00
|
|
|
stack_num = self.stack_num
|
2020-06-25 14:39:30 +02:00
|
|
|
if isinstance(indice, slice):
|
|
|
|
indice = np.arange(
|
|
|
|
0 if indice.start is None
|
|
|
|
else self._size - indice.start if indice.start < 0
|
|
|
|
else indice.start,
|
|
|
|
self._size if indice.stop is None
|
|
|
|
else self._size - indice.stop if indice.stop < 0
|
|
|
|
else indice.stop,
|
|
|
|
1 if indice.step is None else indice.step)
|
2020-06-26 12:37:50 +02:00
|
|
|
else:
|
|
|
|
indice = np.array(indice, copy=True)
|
2020-04-09 19:53:45 +08:00
|
|
|
# set last frame done to True
|
|
|
|
last_index = (self._index - 1 + self._size) % self._size
|
|
|
|
last_done, self.done[last_index] = self.done[last_index], True
|
2020-06-08 21:53:00 +08:00
|
|
|
if key == 'obs_next' and (not self._save_s_ or self.obs_next is None):
|
2020-04-10 09:01:17 +08:00
|
|
|
indice += 1 - self.done[indice].astype(np.int)
|
|
|
|
indice[indice == self._size] = 0
|
|
|
|
key = 'obs'
|
2020-06-26 12:37:50 +02:00
|
|
|
val = self._meta.__dict__[key]
|
|
|
|
try:
|
2020-07-25 13:33:44 +02:00
|
|
|
if stack_num > 1:
|
2020-06-25 14:39:30 +02:00
|
|
|
stack = []
|
|
|
|
for _ in range(stack_num):
|
|
|
|
stack = [val[indice]] + stack
|
|
|
|
pre_indice = np.asarray(indice - 1)
|
|
|
|
pre_indice[pre_indice == -1] = self._size - 1
|
|
|
|
indice = np.asarray(
|
|
|
|
pre_indice + self.done[pre_indice].astype(np.int))
|
|
|
|
indice[indice == self._size] = 0
|
2020-06-26 12:37:50 +02:00
|
|
|
if isinstance(val, Batch):
|
2020-06-25 14:39:30 +02:00
|
|
|
stack = Batch.stack(stack, axis=indice.ndim)
|
|
|
|
else:
|
|
|
|
stack = np.stack(stack, axis=indice.ndim)
|
2020-04-28 20:56:02 +08:00
|
|
|
else:
|
2020-06-26 12:37:50 +02:00
|
|
|
stack = val[indice]
|
2020-07-08 16:29:37 +02:00
|
|
|
except IndexError as e:
|
2020-06-26 12:37:50 +02:00
|
|
|
stack = Batch()
|
2020-07-08 16:29:37 +02:00
|
|
|
if not isinstance(val, Batch) or len(val.__dict__) > 0:
|
|
|
|
raise e
|
2020-06-26 12:37:50 +02:00
|
|
|
self.done[last_index] = last_done
|
|
|
|
return stack
|
2020-04-08 21:13:15 +08:00
|
|
|
|
2020-06-25 14:39:30 +02:00
|
|
|
def __getitem__(self, index: Union[
|
|
|
|
slice, int, np.integer, np.ndarray]) -> Batch:
|
2020-07-25 13:33:44 +02:00
|
|
|
"""Return a data batch: self[index]. If stack_num is larger than 1,
|
2020-04-08 21:13:15 +08:00
|
|
|
return the stacked obs and obs_next with shape [batch, len, ...].
|
|
|
|
"""
|
2020-03-15 17:41:00 +08:00
|
|
|
return Batch(
|
2020-04-10 09:01:17 +08:00
|
|
|
obs=self.get(index, 'obs'),
|
2020-06-26 12:37:50 +02:00
|
|
|
act=self.act[index],
|
|
|
|
rew=self.rew[index],
|
|
|
|
done=self.done[index],
|
2020-04-10 09:01:17 +08:00
|
|
|
obs_next=self.get(index, 'obs_next'),
|
2020-06-29 12:18:52 +08:00
|
|
|
info=self.get(index, 'info'),
|
2020-06-26 12:37:50 +02:00
|
|
|
policy=self.get(index, 'policy')
|
2020-03-15 17:41:00 +08:00
|
|
|
)
|
|
|
|
|
2020-03-11 09:09:56 +08:00
|
|
|
|
2020-03-28 15:14:41 +08:00
|
|
|
class ListReplayBuffer(ReplayBuffer):
|
2020-04-05 18:34:45 +08:00
|
|
|
"""The function of :class:`~tianshou.data.ListReplayBuffer` is almost the
|
|
|
|
same as :class:`~tianshou.data.ReplayBuffer`. The only difference is that
|
2020-07-10 08:24:11 +08:00
|
|
|
:class:`~tianshou.data.ListReplayBuffer` is based on ``list``. Therefore,
|
|
|
|
it does not support advanced indexing, which means you cannot sample a
|
|
|
|
batch of data out of it. It is typically used for storing data.
|
2020-04-09 21:36:53 +08:00
|
|
|
|
|
|
|
.. seealso::
|
|
|
|
|
2020-04-28 20:56:02 +08:00
|
|
|
Please refer to :class:`~tianshou.data.ReplayBuffer` for more
|
2020-04-09 21:36:53 +08:00
|
|
|
detailed explanation.
|
2020-04-03 21:28:12 +08:00
|
|
|
"""
|
|
|
|
|
2020-05-12 11:31:47 +08:00
|
|
|
def __init__(self, **kwargs) -> None:
|
2020-04-10 09:01:17 +08:00
|
|
|
super().__init__(size=0, ignore_obs_next=False, **kwargs)
|
2020-03-28 15:14:41 +08:00
|
|
|
|
2020-07-10 08:24:11 +08:00
|
|
|
def sample(self, batch_size: int) -> Tuple[Batch, np.ndarray]:
|
|
|
|
raise NotImplementedError("ListReplayBuffer cannot be sampled!")
|
|
|
|
|
2020-05-12 11:31:47 +08:00
|
|
|
def _add_to_buffer(
|
|
|
|
self, name: str,
|
|
|
|
inst: Union[dict, Batch, np.ndarray, float, int, bool]) -> None:
|
2020-03-28 15:14:41 +08:00
|
|
|
if inst is None:
|
|
|
|
return
|
2020-06-26 12:37:50 +02:00
|
|
|
if self._meta.__dict__.get(name, None) is None:
|
|
|
|
self._meta.__dict__[name] = []
|
|
|
|
self._meta.__dict__[name].append(inst)
|
2020-03-28 15:14:41 +08:00
|
|
|
|
2020-05-12 11:31:47 +08:00
|
|
|
def reset(self) -> None:
|
2020-03-28 15:14:41 +08:00
|
|
|
self._index = self._size = 0
|
2020-06-26 12:37:50 +02:00
|
|
|
for k in list(self._meta.__dict__.keys()):
|
|
|
|
if isinstance(self._meta.__dict__[k], list):
|
|
|
|
self._meta.__dict__[k] = []
|
2020-03-28 15:14:41 +08:00
|
|
|
|
|
|
|
|
2020-03-11 09:09:56 +08:00
|
|
|
class PrioritizedReplayBuffer(ReplayBuffer):
|
2020-04-28 20:56:02 +08:00
|
|
|
"""Prioritized replay buffer implementation.
|
|
|
|
|
2020-04-29 12:14:53 +08:00
|
|
|
:param float alpha: the prioritization exponent.
|
|
|
|
:param float beta: the importance sample soft coefficient.
|
|
|
|
:param str mode: defaults to ``weight``.
|
2020-06-25 07:02:59 +08:00
|
|
|
:param bool replace: whether to sample with replacement
|
2020-04-29 12:14:53 +08:00
|
|
|
|
2020-04-28 20:56:02 +08:00
|
|
|
.. seealso::
|
|
|
|
|
|
|
|
Please refer to :class:`~tianshou.data.ReplayBuffer` for more
|
|
|
|
detailed explanation.
|
|
|
|
"""
|
2020-03-13 17:49:22 +08:00
|
|
|
|
2020-05-12 11:31:47 +08:00
|
|
|
def __init__(self, size: int, alpha: float, beta: float,
|
2020-06-25 07:02:59 +08:00
|
|
|
mode: str = 'weight',
|
|
|
|
replace: bool = False, **kwargs) -> None:
|
2020-04-26 12:05:58 +08:00
|
|
|
if mode != 'weight':
|
|
|
|
raise NotImplementedError
|
2020-04-10 09:01:17 +08:00
|
|
|
super().__init__(size, **kwargs)
|
2020-04-29 12:14:53 +08:00
|
|
|
self._alpha = alpha
|
|
|
|
self._beta = beta
|
2020-04-26 12:05:58 +08:00
|
|
|
self._weight_sum = 0.0
|
|
|
|
self._amortization_freq = 50
|
2020-06-25 07:02:59 +08:00
|
|
|
self._replace = replace
|
2020-07-11 21:46:01 +08:00
|
|
|
self._meta.weight = np.zeros(size, dtype=np.float64)
|
2020-04-26 12:05:58 +08:00
|
|
|
|
2020-05-12 11:31:47 +08:00
|
|
|
def add(self,
|
|
|
|
obs: Union[dict, np.ndarray],
|
|
|
|
act: Union[np.ndarray, float],
|
2020-06-29 12:18:52 +08:00
|
|
|
rew: Union[int, float],
|
2020-05-12 11:31:47 +08:00
|
|
|
done: bool,
|
|
|
|
obs_next: Optional[Union[dict, np.ndarray]] = None,
|
2020-05-16 20:08:32 +08:00
|
|
|
info: dict = {},
|
2020-05-12 11:31:47 +08:00
|
|
|
policy: Optional[Union[dict, Batch]] = {},
|
2020-05-16 20:08:32 +08:00
|
|
|
weight: float = 1.0,
|
2020-05-12 11:31:47 +08:00
|
|
|
**kwargs) -> None:
|
2020-04-26 12:05:58 +08:00
|
|
|
"""Add a batch of data into replay buffer."""
|
2020-06-26 12:37:50 +02:00
|
|
|
# we have to sacrifice some convenience for speed
|
2020-05-12 11:31:47 +08:00
|
|
|
self._weight_sum += np.abs(weight) ** self._alpha - \
|
2020-07-11 21:46:01 +08:00
|
|
|
self._meta.weight[self._index]
|
2020-04-28 20:56:02 +08:00
|
|
|
self._add_to_buffer('weight', np.abs(weight) ** self._alpha)
|
|
|
|
super().add(obs, act, rew, done, obs_next, info, policy)
|
2020-04-26 12:05:58 +08:00
|
|
|
|
2020-06-25 07:02:59 +08:00
|
|
|
@property
|
|
|
|
def replace(self):
|
|
|
|
return self._replace
|
|
|
|
|
|
|
|
@replace.setter
|
|
|
|
def replace(self, v: bool):
|
|
|
|
self._replace = v
|
|
|
|
|
2020-07-10 08:24:11 +08:00
|
|
|
def sample(self, batch_size: int) -> Tuple[Batch, np.ndarray]:
|
2020-04-28 20:56:02 +08:00
|
|
|
"""Get a random sample from buffer with priority probability. \
|
2020-04-26 12:05:58 +08:00
|
|
|
Return all the data in the buffer if batch_size is ``0``.
|
2020-03-11 09:38:14 +08:00
|
|
|
|
2020-04-26 12:05:58 +08:00
|
|
|
:return: Sample data and its corresponding index inside the buffer.
|
|
|
|
"""
|
2020-07-10 08:24:11 +08:00
|
|
|
assert self._size > 0, 'cannot sample a buffer with size == 0 !'
|
|
|
|
p = None
|
|
|
|
if batch_size > 0 and (self._replace or batch_size <= self._size):
|
|
|
|
# sampling weight
|
|
|
|
p = (self.weight / self.weight.sum())[:self._size]
|
2020-04-26 12:05:58 +08:00
|
|
|
indice = np.random.choice(
|
2020-07-10 08:24:11 +08:00
|
|
|
self._size, batch_size, p=p,
|
2020-06-25 07:02:59 +08:00
|
|
|
replace=self._replace)
|
2020-07-10 08:24:11 +08:00
|
|
|
p = p[indice] # weight of each sample
|
2020-04-26 12:05:58 +08:00
|
|
|
elif batch_size == 0:
|
2020-07-20 22:12:57 +08:00
|
|
|
p = np.full(shape=self._size, fill_value=1.0 / self._size)
|
2020-04-26 12:05:58 +08:00
|
|
|
indice = np.concatenate([
|
|
|
|
np.arange(self._index, self._size),
|
|
|
|
np.arange(0, self._index),
|
|
|
|
])
|
|
|
|
else:
|
2020-06-25 07:02:59 +08:00
|
|
|
raise ValueError(
|
2020-07-10 08:24:11 +08:00
|
|
|
f"batch_size should be less than {len(self)}, \
|
|
|
|
or set replace=True")
|
2020-04-26 12:05:58 +08:00
|
|
|
batch = self[indice]
|
2020-07-11 21:46:01 +08:00
|
|
|
batch["impt_weight"] = (self._size * p) ** (-self._beta)
|
2020-04-26 12:05:58 +08:00
|
|
|
return batch, indice
|
2020-03-11 09:09:56 +08:00
|
|
|
|
2020-05-12 11:31:47 +08:00
|
|
|
def update_weight(self, indice: Union[slice, np.ndarray],
|
|
|
|
new_weight: np.ndarray) -> None:
|
2020-04-28 20:56:02 +08:00
|
|
|
"""Update priority weight by indice in this buffer.
|
2020-04-26 12:05:58 +08:00
|
|
|
|
2020-04-29 12:14:53 +08:00
|
|
|
:param np.ndarray indice: indice you want to update weight
|
2020-07-08 08:30:01 +08:00
|
|
|
:param np.ndarray new_weight: new priority weight you want to update
|
2020-04-26 12:05:58 +08:00
|
|
|
"""
|
2020-06-25 07:02:59 +08:00
|
|
|
if self._replace:
|
|
|
|
if isinstance(indice, slice):
|
|
|
|
# convert slice to ndarray
|
|
|
|
indice = np.arange(indice.stop)[indice]
|
|
|
|
# remove the same values in indice
|
|
|
|
indice, unique_indice = np.unique(
|
|
|
|
indice, return_index=True)
|
|
|
|
new_weight = new_weight[unique_indice]
|
2020-04-26 12:05:58 +08:00
|
|
|
self.weight[indice] = np.power(np.abs(new_weight), self._alpha)
|
|
|
|
|
2020-06-29 12:18:52 +08:00
|
|
|
def __getitem__(self, index: Union[
|
|
|
|
slice, int, np.integer, np.ndarray]) -> Batch:
|
2020-04-26 12:05:58 +08:00
|
|
|
return Batch(
|
|
|
|
obs=self.get(index, 'obs'),
|
2020-06-26 12:37:50 +02:00
|
|
|
act=self.act[index],
|
|
|
|
rew=self.rew[index],
|
|
|
|
done=self.done[index],
|
2020-04-26 12:05:58 +08:00
|
|
|
obs_next=self.get(index, 'obs_next'),
|
2020-06-24 15:43:48 +02:00
|
|
|
info=self.get(index, 'info'),
|
2020-06-26 12:37:50 +02:00
|
|
|
weight=self.weight[index],
|
2020-04-28 20:56:02 +08:00
|
|
|
policy=self.get(index, 'policy'),
|
2020-04-26 12:05:58 +08:00
|
|
|
)
|