This commit is contained in:
songshshshsh 2018-02-27 13:15:36 +08:00
commit 25b25ce7d8
7 changed files with 107 additions and 20 deletions

View File

@ -76,9 +76,12 @@ if __name__ == '__main__':
pi.sync_weights() # TODO: automate this for policies with target network
start_time = time.time()
for i in range(100):
#TODO : repeat_num shoulde be defined in some configuration files
repeat_num = 100
for i in range(repeat_num):
# collect data
data_collector.collect(nums=50)
# data_collector.collect(nums=50)
data_collector.collect(num_episodes=50, epsilon_greedy= (repeat_num - i + 0.0) / repeat_num)
# print current return
print('Epoch {}:'.format(i))

87
examples/dqn_replay.py Normal file
View File

@ -0,0 +1,87 @@
#!/usr/bin/env python
from __future__ import absolute_import
import tensorflow as tf
import gym
import numpy as np
import time
# our lib imports here! It's ok to append path in examples
import sys
sys.path.append('..')
from tianshou.core import losses
# from tianshou.data.batch import Batch
import tianshou.data.advantage_estimation as advantage_estimation
import tianshou.core.policy.dqn as policy # TODO: fix imports as zhusuan so that only need to import to policy
import tianshou.core.value_function.action_value as value_function
import tianshou.data.replay as replay
import tianshou.data.data_collector as data_collector
# TODO: why this solves cartpole even without training?
if __name__ == '__main__':
env = gym.make('CartPole-v0')
observation_dim = env.observation_space.shape
action_dim = env.action_space.n
clip_param = 0.2
num_batches = 10
batch_size = 512
seed = 0
np.random.seed(seed)
tf.set_random_seed(seed)
### 1. build network with pure tf
observation_ph = tf.placeholder(tf.float32, shape=(None,) + observation_dim)
def my_network():
net = tf.layers.dense(observation_ph, 32, activation=tf.nn.tanh)
net = tf.layers.dense(net, 32, activation=tf.nn.tanh)
action_values = tf.layers.dense(net, action_dim, activation=None)
return None, action_values # no policy head
### 2. build policy, loss, optimizer
dqn = value_function.DQN(my_network, observation_placeholder=observation_ph, weight_update=100)
pi = policy.DQN(dqn)
dqn_loss = losses.qlearning(dqn)
total_loss = dqn_loss
optimizer = tf.train.AdamOptimizer(1e-4)
train_op = optimizer.minimize(total_loss, var_list=dqn.trainable_variables)
### 3. define data collection
replay_buffer = replay()
data_collector = data_collector(env, pi, [advantage_estimation.nstep_q_return(1, dqn)], [dqn], replay_buffer)
### 4. start training
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
with tf.Session(config=config) as sess:
sess.run(tf.global_variables_initializer())
# assign actor to pi_old
pi.sync_weights() # TODO: automate this for policies with target network
start_time = time.time()
for i in range(100):
# collect data
data_collector.collect(num_episodes=50)
# print current return
print('Epoch {}:'.format(i))
data_collector.statistics()
# update network
for _ in range(num_batches):
feed_dict = data_collector.next_batch(batch_size)
sess.run(train_op, feed_dict=feed_dict)
print('Elapsed time: {:.1f} min'.format((time.time() - start_time) / 60))

View File

@ -18,7 +18,7 @@ class DQN(PolicyBase):
else:
self.interaction_count = -1
def act(self, observation, exploration=None):
def act(self, observation, my_feed_dict):
sess = tf.get_default_session()
if self.weight_update > 1:
if self.interaction_count % self.weight_update == 0:
@ -30,7 +30,6 @@ class DQN(PolicyBase):
if self.weight_update > 0:
self.interaction_count += 1
if not exploration:
return np.squeeze(action)
@property

View File

@ -34,7 +34,7 @@ class Batch(object):
self._is_first_collect = True
def collect(self, num_timesteps=0, num_episodes=0, my_feed_dict={},
process_reward=True): # specify how many data to collect here, or fix it in __init__()
process_reward=True, epsilon_greedy=0): # specify how many data to collect here, or fix it in __init__()
assert sum(
[num_timesteps > 0, num_episodes > 0]) == 1, "One and only one collection number specification permitted!"
@ -106,6 +106,10 @@ class Batch(object):
episode_start_flags.append(True)
while True:
# a simple implementation of epsilon greedy
if epsilon_greedy > 0 and np.random.random() < epsilon_greedy:
ac = np.random.randint(low = 0, high = self._env.action_space.n)
else:
ac = self._pi.act(ob, my_feed_dict)
actions.append(ac)
@ -114,9 +118,9 @@ class Batch(object):
ob, reward, done, _ = self._env.step(ac)
rewards.append(reward)
t_count += 1
if t_count >= 100: # force episode stop, just to test if memory still grows
break
#t_count += 1
#if t_count >= 100: # force episode stop, just to test if memory still grows
# break
if done: # end of episode, discard s_T
# TODO: for num_timesteps collection, has to store terminal flag instead of start flag!

View File

@ -3,7 +3,7 @@ import tensorflow as tf
from collections import deque
from math import fabs
from tianshou.data.replay_buffer.buffer import ReplayBuffer
from .buffer import ReplayBuffer
class NaiveExperience(ReplayBuffer):

View File

@ -1,9 +1,3 @@
#!/usr/bin/python
# -*- encoding=utf-8 -*-
# author: Ian
# e-mail: stmayue@gmail.com
# description:
import sys
import math
import random

View File

@ -1,8 +1,8 @@
import sys
from tianshou.data.replay_buffer.naive import NaiveExperience
from tianshou.data.replay_buffer.proportional import PropotionalExperience
from tianshou.data.replay_buffer.rank_based import RankBasedExperience
from .naive import NaiveExperience
from .proportional import PropotionalExperience
from .rank_based import RankBasedExperience
def get_replay_buffer(name, env, policy, qnet, target_qnet, conf):