Tianshou/test/throughput/test_buffer_profile.py
n+e 94bfb32cc1
optimize training procedure and improve code coverage (#189)
1. add policy.eval() in all test scripts' "watch performance"
2. remove dict return support for collector preprocess_fn
3. add `__contains__` and `pop` in batch: `key in batch`, `batch.pop(key, deft)`
4. exact n_episode for a list of n_episode limitation and save fake data in cache_buffer when self.buffer is None (#184)
5. fix tensorboard logging: h-axis stands for env step instead of gradient step; add test results into tensorboard
6. add test_returns (both GAE and nstep)
7. change the type-checking order in batch.py and converter.py in order to meet the most often case first
8. fix shape inconsistency for torch.Tensor in replay buffer
9. remove `**kwargs` in ReplayBuffer
10. remove default value in batch.split() and add merge_last argument (#185)
11. improve nstep efficiency
12. add max_batchsize in onpolicy algorithms
13. potential bugfix for subproc.wait
14. fix RecurrentActorProb
15. improve the code-coverage (from 90% to 95%) and remove the dead code
16. fix some incorrect type annotation

The above improvement also increases the training FPS: on my computer, the previous version is only ~1800 FPS and after that, it can reach ~2050 (faster than v0.2.4.post1).
2020-08-27 12:15:18 +08:00

90 lines
2.2 KiB
Python

import pytest
import numpy as np
from tianshou.data import (ListReplayBuffer, PrioritizedReplayBuffer,
ReplayBuffer, SegmentTree)
@pytest.fixture(scope="module")
def data():
np.random.seed(0)
obs = {'observable': np.random.rand(100, 100),
'hidden': np.random.randint(1000, size=200)}
info = {'policy': "dqn", 'base': np.arange(10)}
add_data = {'obs': obs, 'rew': 1., 'act': np.random.rand(30),
'done': False, 'obs_next': obs, 'info': info}
buffer = ReplayBuffer(int(1e3), stack_num=100)
buffer2 = ReplayBuffer(int(1e4), stack_num=100)
indexes = np.random.choice(int(1e3), size=3, replace=False)
return {
'add_data': add_data,
'buffer': buffer,
'buffer2': buffer2,
'slice': slice(-3000, -1000, 2),
'indexes': indexes,
}
def test_init():
for _ in np.arange(1e5):
_ = ReplayBuffer(1e5)
_ = PrioritizedReplayBuffer(size=int(1e5), alpha=0.5, beta=0.5)
_ = ListReplayBuffer()
def test_add(data):
buffer = data['buffer']
for _ in np.arange(1e5):
buffer.add(**data['add_data'])
def test_update(data):
buffer = data['buffer']
buffer2 = data['buffer2']
for _ in np.arange(1e2):
buffer2.update(buffer)
def test_getitem_slice(data):
Slice = data['slice']
buffer = data['buffer']
for _ in np.arange(1e3):
_ = buffer[Slice]
def test_getitem_indexes(data):
indexes = data['indexes']
buffer = data['buffer']
for _ in np.arange(1e2):
_ = buffer[indexes]
def test_get(data):
indexes = data['indexes']
buffer = data['buffer']
for _ in np.arange(3e2):
buffer.get(indexes, 'obs')
buffer.get(indexes, 'rew')
buffer.get(indexes, 'done')
buffer.get(indexes, 'info')
def test_sample(data):
buffer = data['buffer']
for _ in np.arange(1e1):
buffer.sample(int(1e2))
def test_segtree(data):
size = 100000
tree = SegmentTree(size)
tree[np.arange(size)] = np.random.rand(size)
for i in np.arange(1e5):
scalar = np.random.rand(64) * tree.reduce()
tree.get_prefix_sum_idx(scalar)
if __name__ == '__main__':
pytest.main(["-s", "-k buffer_profile", "--durations=0", "-v"])