finish design and running of ppo and actor-critic. advantage estimation module is not complete yet.

This commit is contained in:
haoshengzou 2018-01-17 14:21:50 +08:00
parent ed25bf7586
commit 9f96cc2461
6 changed files with 52 additions and 64 deletions

View File

@ -4,6 +4,7 @@ from __future__ import absolute_import
import tensorflow as tf
import time
import numpy as np
import gym
# our lib imports here! It's ok to append path in examples
import sys
@ -14,16 +15,13 @@ import tianshou.data.advantage_estimation as advantage_estimation
import tianshou.core.policy.stochastic as policy # TODO: fix imports as zhusuan so that only need to import to policy
import tianshou.core.value_function.state_value as value_function
from rllab.envs.box2d.cartpole_env import CartpoleEnv
from rllab.envs.normalized_env import normalize
# for tutorial purpose, placeholders are explicitly appended with '_ph' suffix
if __name__ == '__main__':
env = normalize(CartpoleEnv())
env = gym.make('CartPole-v0')
observation_dim = env.observation_space.shape
action_dim = env.action_space.flat_dim
action_dim = env.action_space.n
clip_param = 0.2
num_batches = 10
@ -41,17 +39,16 @@ if __name__ == '__main__':
net = tf.layers.dense(observation_ph, 32, activation=tf.nn.tanh)
net = tf.layers.dense(net, 32, activation=tf.nn.tanh)
action_mean = tf.layers.dense(net, action_dim, activation=None)
action_logstd = tf.get_variable('action_logstd', shape=(action_dim, ))
action_logtis = tf.layers.dense(net, action_dim, activation=None)
value = tf.layers.dense(net, 1, activation=None)
return action_mean, action_logstd, value
return action_logtis, value
# TODO: overriding seems not able to handle shared layers, unless a new class `SharedPolicyValue`
# maybe the most desired thing is to freely build policy and value function from any tensor?
# but for now, only the outputs of the network matters
### 2. build policy, critic, loss, optimizer
actor = policy.Normal(my_network, observation_placeholder=observation_ph, weight_update=1)
actor = policy.OnehotCategorical(my_network, observation_placeholder=observation_ph, weight_update=1)
critic = value_function.StateValue(my_network, observation_placeholder=observation_ph)
actor_loss = losses.REINFORCE(actor)

View File

@ -4,6 +4,7 @@ from __future__ import absolute_import
import tensorflow as tf
import time
import numpy as np
import gym
# our lib imports here! It's ok to append path in examples
import sys
@ -14,16 +15,13 @@ import tianshou.data.advantage_estimation as advantage_estimation
import tianshou.core.policy.stochastic as policy # TODO: fix imports as zhusuan so that only need to import to policy
import tianshou.core.value_function.state_value as value_function
from rllab.envs.box2d.cartpole_env import CartpoleEnv
from rllab.envs.normalized_env import normalize
# for tutorial purpose, placeholders are explicitly appended with '_ph' suffix
if __name__ == '__main__':
env = normalize(CartpoleEnv())
env = gym.make('CartPole-v0')
observation_dim = env.observation_space.shape
action_dim = env.action_space.flat_dim
action_dim = env.action_space.n
clip_param = 0.2
num_batches = 10
@ -40,10 +38,9 @@ if __name__ == '__main__':
net = tf.layers.dense(observation_ph, 32, activation=tf.nn.tanh)
net = tf.layers.dense(net, 32, activation=tf.nn.tanh)
action_mean = tf.layers.dense(net, action_dim, activation=None)
action_logstd = tf.get_variable('action_logstd', shape=(action_dim, ))
action_logits = tf.layers.dense(net, action_dim, activation=None)
return action_mean, action_logstd, None
return action_logits, None
def my_critic():
net = tf.layers.dense(observation_ph, 32, activation=tf.nn.tanh)
@ -53,11 +50,11 @@ if __name__ == '__main__':
return None, value
### 2. build policy, critic, loss, optimizer
actor = policy.Normal(my_actor, observation_placeholder=observation_ph, weight_update=1)
print('actor and critic will share the first two layers in this case, and the third layer will cause error')
actor = policy.OnehotCategorical(my_actor, observation_placeholder=observation_ph, weight_update=1)
critic = value_function.StateValue(my_critic, observation_placeholder=observation_ph)
print('actor and critic will share variables in this case')
sys.exit()
actor_loss = losses.vanilla_policy_gradient(actor)
critic_loss = losses.state_value_mse(critic)

View File

@ -4,6 +4,7 @@ from __future__ import absolute_import
import tensorflow as tf
import time
import numpy as np
import gym
# our lib imports here! It's ok to append path in examples
import sys
@ -14,16 +15,13 @@ import tianshou.data.advantage_estimation as advantage_estimation
import tianshou.core.policy.stochastic as policy # TODO: fix imports as zhusuan so that only need to import to policy
import tianshou.core.value_function.state_value as value_function
from rllab.envs.box2d.cartpole_env import CartpoleEnv
from rllab.envs.normalized_env import normalize
# for tutorial purpose, placeholders are explicitly appended with '_ph' suffix
if __name__ == '__main__':
env = normalize(CartpoleEnv())
env = gym.make('CartPole-v0')
observation_dim = env.observation_space.shape
action_dim = env.action_space.flat_dim
action_dim = env.action_space.n
clip_param = 0.2
num_batches = 10
@ -40,17 +38,16 @@ if __name__ == '__main__':
net = tf.layers.dense(observation_ph, 32, activation=tf.nn.tanh)
net = tf.layers.dense(net, 32, activation=tf.nn.tanh)
action_mean = tf.layers.dense(net, action_dim, activation=None)
action_logstd = tf.get_variable('action_logstd', shape=(action_dim, ))
action_logits = tf.layers.dense(net, action_dim, activation=None)
net = tf.layers.dense(observation_ph, 32, activation=tf.nn.tanh)
net = tf.layers.dense(net, 32, activation=tf.nn.tanh)
value = tf.layers.dense(net, 1, activation=None)
return action_mean, action_logstd, value
return action_logits, value
### 2. build policy, critic, loss, optimizer
actor = policy.Normal(my_network, observation_placeholder=observation_ph, weight_update=1)
actor = policy.OnehotCategorical(my_network, observation_placeholder=observation_ph, weight_update=1)
critic = value_function.StateValue(my_network, observation_placeholder=observation_ph)
actor_loss = losses.REINFORCE(actor)

View File

@ -2,8 +2,9 @@
from __future__ import absolute_import
import tensorflow as tf
import time
import gym
import numpy as np
import time
# our lib imports here! It's ok to append path in examples
import sys
@ -13,22 +14,17 @@ from tianshou.data.batch import Batch
import tianshou.data.advantage_estimation as advantage_estimation
import tianshou.core.policy.stochastic as policy # TODO: fix imports as zhusuan so that only need to import to policy
from rllab.envs.box2d.cartpole_env import CartpoleEnv
from rllab.envs.normalized_env import normalize
# for tutorial purpose, placeholders are explicitly appended with '_ph' suffix
if __name__ == '__main__':
env = normalize(CartpoleEnv())
env = gym.make('CartPole-v0')
observation_dim = env.observation_space.shape
action_dim = env.action_space.flat_dim
action_dim = env.action_space.n
clip_param = 0.2
num_batches = 10
batch_size = 128
batch_size = 512
seed = 10
seed = 0
np.random.seed(seed)
tf.set_random_seed(seed)
@ -39,11 +35,9 @@ if __name__ == '__main__':
net = tf.layers.dense(observation_ph, 32, activation=tf.nn.tanh)
net = tf.layers.dense(net, 32, activation=tf.nn.tanh)
action_mean = tf.layers.dense(net, action_dim, activation=None)
action_logstd = tf.get_variable('action_logstd', shape=(action_dim, ))
# value = tf.layers.dense(net, 1, activation=None)
action_logits = tf.layers.dense(net, action_dim, activation=None)
return action_mean, action_logstd, None # None value head
return action_logits, None # None value head
# TODO: current implementation of passing function or overriding function has to return a value head
# to allow network sharing between policy and value networks. This makes 'policy' and 'value_function'
@ -52,7 +46,7 @@ if __name__ == '__main__':
# not based on passing function or overriding function.
### 2. build policy, loss, optimizer
pi = policy.Normal(my_policy, observation_placeholder=observation_ph, weight_update=0)
pi = policy.OnehotCategorical(my_policy, observation_placeholder=observation_ph, weight_update=0)
ppo_loss_clip = losses.ppo_clip(pi, clip_param)
@ -75,7 +69,7 @@ if __name__ == '__main__':
start_time = time.time()
for i in range(100):
# collect data
training_data.collect(num_episodes=20)
training_data.collect(num_episodes=50)
# print current return
print('Epoch {}:'.format(i))

View File

@ -4,6 +4,7 @@ from __future__ import absolute_import
import tensorflow as tf
import time
import numpy as np
import gym
# our lib imports here! It's ok to append path in examples
import sys
@ -13,9 +14,6 @@ from tianshou.data.batch import Batch
import tianshou.data.advantage_estimation as advantage_estimation
import tianshou.core.policy.stochastic as policy # TODO: fix imports as zhusuan so that only need to import to policy
from rllab.envs.box2d.cartpole_env import CartpoleEnv
from rllab.envs.normalized_env import normalize
# for tutorial purpose, placeholders are explicitly appended with '_ph' suffix
# this example with batch_norm and dropout almost surely cannot improve. it just shows how to use those
@ -36,16 +34,15 @@ class MyPolicy(object):
net = tf.layers.dense(net, 32, activation=tf.nn.relu)
net = tf.layers.dropout(net, rate=1 - self.keep_prob_ph)
action_mean = tf.layers.dense(net, action_dim, activation=None)
action_logstd = tf.get_variable('action_logstd', shape=(self.action_dim,), dtype=tf.float32)
action_logits = tf.layers.dense(net, action_dim, activation=None)
return action_mean, action_logstd, None
return action_logits, None
if __name__ == '__main__':
env = normalize(CartpoleEnv())
env = gym.make('CartPole-v0')
observation_dim = env.observation_space.shape
action_dim = env.action_space.flat_dim
action_dim = env.action_space.n
# clip_param = 0.2
num_batches = 10
@ -63,7 +60,7 @@ if __name__ == '__main__':
my_policy = MyPolicy(observation_ph, is_training_ph, keep_prob_ph, action_dim)
### 2. build policy, loss, optimizer
pi = policy.Normal(my_policy, observation_placeholder=observation_ph, weight_update=0)
pi = policy.OnehotCategorical(my_policy, observation_placeholder=observation_ph, weight_update=0)
clip_param = tf.placeholder(tf.float32, shape=(), name='ppo_loss_clip_param')
ppo_loss_clip = losses.ppo_clip(pi, clip_param)

View File

@ -2,9 +2,8 @@
from __future__ import absolute_import
import tensorflow as tf
import gym
import numpy as np
import time
import numpy as np
# our lib imports here! It's ok to append path in examples
import sys
@ -14,17 +13,22 @@ from tianshou.data.batch import Batch
import tianshou.data.advantage_estimation as advantage_estimation
import tianshou.core.policy.stochastic as policy # TODO: fix imports as zhusuan so that only need to import to policy
from rllab.envs.box2d.cartpole_env import CartpoleEnv
from rllab.envs.normalized_env import normalize
# for tutorial purpose, placeholders are explicitly appended with '_ph' suffix
if __name__ == '__main__':
env = gym.make('CartPole-v0')
env = normalize(CartpoleEnv())
observation_dim = env.observation_space.shape
action_dim = env.action_space.n
action_dim = env.action_space.flat_dim
clip_param = 0.2
num_batches = 10
batch_size = 512
batch_size = 128
seed = 0
seed = 10
np.random.seed(seed)
tf.set_random_seed(seed)
@ -35,9 +39,11 @@ if __name__ == '__main__':
net = tf.layers.dense(observation_ph, 32, activation=tf.nn.tanh)
net = tf.layers.dense(net, 32, activation=tf.nn.tanh)
action_logits = tf.layers.dense(net, action_dim, activation=None)
action_mean = tf.layers.dense(net, action_dim, activation=None)
action_logstd = tf.get_variable('action_logstd', shape=(action_dim, ))
# value = tf.layers.dense(net, 1, activation=None)
return action_logits, None # None value head
return action_mean, action_logstd, None # None value head
# TODO: current implementation of passing function or overriding function has to return a value head
# to allow network sharing between policy and value networks. This makes 'policy' and 'value_function'
@ -46,7 +52,7 @@ if __name__ == '__main__':
# not based on passing function or overriding function.
### 2. build policy, loss, optimizer
pi = policy.OnehotCategorical(my_policy, observation_placeholder=observation_ph, weight_update=0)
pi = policy.Normal(my_policy, observation_placeholder=observation_ph, weight_update=0)
ppo_loss_clip = losses.ppo_clip(pi, clip_param)
@ -69,7 +75,7 @@ if __name__ == '__main__':
start_time = time.time()
for i in range(100):
# collect data
training_data.collect(num_episodes=50)
training_data.collect(num_episodes=20)
# print current return
print('Epoch {}:'.format(i))