mcts framework
This commit is contained in:
parent
d9368c9a78
commit
30427055d1
@ -70,7 +70,7 @@ total_loss = value_loss + policy_loss + reg
|
||||
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
|
||||
with tf.control_dependencies(update_ops):
|
||||
train_op = tf.train.RMSPropOptimizer(1e-4).minimize(total_loss)
|
||||
var_list = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
|
||||
var_list = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
|
||||
saver = tf.train.Saver(max_to_keep=10, var_list=var_list)
|
||||
|
||||
|
||||
@ -131,13 +131,21 @@ def train():
|
||||
|
||||
def forward(call_number):
|
||||
#checkpoint_path = "/home/yama/rl/tianshou/AlphaGo/checkpoints"
|
||||
checkpoint_path = "/home/yama/rl/tianshou/AlphaGo/checkpoints/jialian"
|
||||
checkpoint_path = "/home/yama/rl/tianshou/AlphaGo/checkpoints"
|
||||
board_file = np.genfromtxt("/home/yama/rl/tianshou/leela-zero/src/mcts_nn_files/board_" + call_number, dtype='str');
|
||||
human_board = np.zeros((17, 19, 19))
|
||||
|
||||
#TODO : is it ok to ignore the last channel?
|
||||
for i in range(17):
|
||||
human_board[i] = np.array(list(board_file[i])).reshape(19, 19)
|
||||
#print("============================")
|
||||
#print("human board sum : " + str(np.sum(human_board[-1])))
|
||||
#print("============================")
|
||||
#print(human_board)
|
||||
#print("============================")
|
||||
#rint(human_board)
|
||||
feed_board = human_board.transpose(1, 2, 0).reshape(1, 19, 19, 17)
|
||||
#print(feed_board[:,:,:,-1])
|
||||
#print(feed_board.shape)
|
||||
|
||||
#npz_board = np.load("/home/yama/rl/tianshou/AlphaGo/data/7f83928932f64a79bc1efdea268698ae.npz")
|
||||
@ -148,7 +156,7 @@ def forward(call_number):
|
||||
#print("board shape : ", show_board.shape)
|
||||
#print(show_board)
|
||||
|
||||
itflag = True
|
||||
itflag = False
|
||||
with multi_gpu.create_session() as sess:
|
||||
sess.run(tf.global_variables_initializer())
|
||||
ckpt_file = tf.train.latest_checkpoint(checkpoint_path)
|
||||
|
173
AlphaGo/Network_ori.py
Normal file
173
AlphaGo/Network_ori.py
Normal file
@ -0,0 +1,173 @@
|
||||
import os
|
||||
import time
|
||||
import gc
|
||||
|
||||
import numpy as np
|
||||
import tensorflow as tf
|
||||
import tensorflow.contrib.layers as layers
|
||||
|
||||
import multi_gpu
|
||||
|
||||
os.environ["CUDA_VISIBLE_DEVICES"] = "1"
|
||||
|
||||
|
||||
def residual_block(input, is_training):
|
||||
normalizer_params = {'is_training': is_training,
|
||||
'updates_collections': tf.GraphKeys.UPDATE_OPS}
|
||||
h = layers.conv2d(input, 256, kernel_size=3, stride=1, activation_fn=tf.nn.relu,
|
||||
normalizer_fn=layers.batch_norm, normalizer_params=normalizer_params,
|
||||
weights_regularizer=layers.l2_regularizer(1e-4))
|
||||
h = layers.conv2d(h, 256, kernel_size=3, stride=1, activation_fn=tf.identity,
|
||||
normalizer_fn=layers.batch_norm, normalizer_params=normalizer_params,
|
||||
weights_regularizer=layers.l2_regularizer(1e-4))
|
||||
h = h + input
|
||||
return tf.nn.relu(h)
|
||||
|
||||
|
||||
def policy_heads(input, is_training):
|
||||
normalizer_params = {'is_training': is_training,
|
||||
'updates_collections': tf.GraphKeys.UPDATE_OPS}
|
||||
h = layers.conv2d(input, 2, kernel_size=1, stride=1, activation_fn=tf.nn.relu,
|
||||
normalizer_fn=layers.batch_norm, normalizer_params=normalizer_params,
|
||||
weights_regularizer=layers.l2_regularizer(1e-4))
|
||||
h = layers.flatten(h)
|
||||
h = layers.fully_connected(h, 362, activation_fn=tf.identity, weights_regularizer=layers.l2_regularizer(1e-4))
|
||||
return h
|
||||
|
||||
|
||||
def value_heads(input, is_training):
|
||||
normalizer_params = {'is_training': is_training,
|
||||
'updates_collections': tf.GraphKeys.UPDATE_OPS}
|
||||
h = layers.conv2d(input, 2, kernel_size=1, stride=1, activation_fn=tf.nn.relu,
|
||||
normalizer_fn=layers.batch_norm, normalizer_params=normalizer_params,
|
||||
weights_regularizer=layers.l2_regularizer(1e-4))
|
||||
h = layers.flatten(h)
|
||||
h = layers.fully_connected(h, 256, activation_fn=tf.nn.relu, weights_regularizer=layers.l2_regularizer(1e-4))
|
||||
h = layers.fully_connected(h, 1, activation_fn=tf.nn.tanh, weights_regularizer=layers.l2_regularizer(1e-4))
|
||||
return h
|
||||
|
||||
|
||||
x = tf.placeholder(tf.float32, shape=[None, 19, 19, 17])
|
||||
is_training = tf.placeholder(tf.bool, shape=[])
|
||||
z = tf.placeholder(tf.float32, shape=[None, 1])
|
||||
pi = tf.placeholder(tf.float32, shape=[None, 362])
|
||||
|
||||
h = layers.conv2d(x, 256, kernel_size=3, stride=1, activation_fn=tf.nn.relu, normalizer_fn=layers.batch_norm,
|
||||
normalizer_params={'is_training': is_training, 'updates_collections': tf.GraphKeys.UPDATE_OPS},
|
||||
weights_regularizer=layers.l2_regularizer(1e-4))
|
||||
for i in range(19):
|
||||
h = residual_block(h, is_training)
|
||||
v = value_heads(h, is_training)
|
||||
p = policy_heads(h, is_training)
|
||||
# loss = tf.reduce_mean(tf.square(z-v)) - tf.multiply(pi, tf.log(tf.clip_by_value(tf.nn.softmax(p), 1e-8, tf.reduce_max(tf.nn.softmax(p)))))
|
||||
value_loss = tf.reduce_mean(tf.square(z - v))
|
||||
policy_loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=pi, logits=p))
|
||||
|
||||
reg = tf.add_n(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))
|
||||
total_loss = value_loss + policy_loss + reg
|
||||
# train_op = tf.train.MomentumOptimizer(1e-4, momentum=0.9, use_nesterov=True).minimize(total_loss)
|
||||
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
|
||||
with tf.control_dependencies(update_ops):
|
||||
train_op = tf.train.RMSPropOptimizer(1e-4).minimize(total_loss)
|
||||
var_list = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
|
||||
saver = tf.train.Saver(max_to_keep=10, var_list=var_list)
|
||||
|
||||
|
||||
def train():
|
||||
data_path = "/home/tongzheng/data/"
|
||||
data_name = os.listdir("/home/tongzheng/data/")
|
||||
epochs = 100
|
||||
batch_size = 128
|
||||
|
||||
result_path = "./checkpoints/"
|
||||
with multi_gpu.create_session() as sess:
|
||||
sess.run(tf.global_variables_initializer())
|
||||
ckpt_file = tf.train.latest_checkpoint(result_path)
|
||||
if ckpt_file is not None:
|
||||
print('Restoring model from {}...'.format(ckpt_file))
|
||||
saver.restore(sess, ckpt_file)
|
||||
for epoch in range(epochs):
|
||||
for name in data_name:
|
||||
data = np.load(data_path + name)
|
||||
boards = data["boards"]
|
||||
wins = data["wins"]
|
||||
ps = data["ps"]
|
||||
print (boards.shape)
|
||||
print (wins.shape)
|
||||
print (ps.shape)
|
||||
# batch_num = 1
|
||||
batch_num = boards.shape[0] // batch_size
|
||||
index = np.arange(boards.shape[0])
|
||||
np.random.shuffle(index)
|
||||
value_losses = []
|
||||
policy_losses = []
|
||||
regs = []
|
||||
time_train = -time.time()
|
||||
for iter in range(batch_num):
|
||||
lv, lp, r, _ = sess.run([value_loss, policy_loss, reg, train_op],
|
||||
feed_dict={x: boards[
|
||||
index[iter * batch_size:(iter + 1) * batch_size]],
|
||||
z: wins[index[
|
||||
iter * batch_size:(iter + 1) * batch_size]],
|
||||
pi: ps[index[
|
||||
iter * batch_size:(iter + 1) * batch_size]],
|
||||
is_training: True})
|
||||
value_losses.append(lv)
|
||||
policy_losses.append(lp)
|
||||
regs.append(r)
|
||||
del lv, lp, r
|
||||
if iter % 1 == 0:
|
||||
print(
|
||||
"Epoch: {}, Part {}, Iteration: {}, Time: {}, Value Loss: {}, Policy Loss: {}, Reg: {}".format(
|
||||
epoch, name, iter, time.time() + time_train, np.mean(np.array(value_losses)),
|
||||
np.mean(np.array(policy_losses)), np.mean(np.array(regs))))
|
||||
del value_losses, policy_losses, regs, time_train
|
||||
time_train = -time.time()
|
||||
value_losses = []
|
||||
policy_losses = []
|
||||
regs = []
|
||||
if iter % 20 == 0:
|
||||
save_path = "Epoch{}.Part{}.Iteration{}.ckpt".format(epoch, name, iter)
|
||||
saver.save(sess, result_path + save_path)
|
||||
del save_path
|
||||
del data, boards, wins, ps, batch_num, index
|
||||
gc.collect()
|
||||
def forward(board):
|
||||
result_path = "./checkpoints"
|
||||
itflag = False
|
||||
res = None
|
||||
if board is None:
|
||||
# data = np.load("/home/tongzheng/meta-data/80b7bf21bce14862806d48c3cd760a1b.npz")
|
||||
data = np.load("./data/7f83928932f64a79bc1efdea268698ae.npz")
|
||||
board = data["boards"][50].reshape(-1, 19, 19, 17)
|
||||
human_board = board[0].transpose(2, 0, 1)
|
||||
print("============================")
|
||||
print("human board sum : " + str(np.sum(human_board)))
|
||||
print("============================")
|
||||
print(board[:,:,:,-1])
|
||||
itflag = False
|
||||
with multi_gpu.create_session() as sess:
|
||||
sess.run(tf.global_variables_initializer())
|
||||
ckpt_file = tf.train.latest_checkpoint(result_path)
|
||||
if ckpt_file is not None:
|
||||
print('Restoring model from {}...'.format(ckpt_file))
|
||||
saver.restore(sess, ckpt_file)
|
||||
else:
|
||||
raise ValueError("No model loaded")
|
||||
res = sess.run([tf.nn.softmax(p), v], feed_dict={x: board, is_training: itflag})
|
||||
# res = sess.run([tf.nn.softmax(p),v], feed_dict={x:fix_board["boards"][300].reshape(-1, 19, 19, 17), is_training:False})
|
||||
# res = sess.run([tf.nn.softmax(p),v], feed_dict={x:fix_board["boards"][50].reshape(-1, 19, 19, 17), is_training:True})
|
||||
# print(np.argmax(res[0]))
|
||||
print(res)
|
||||
print(data["p"][0])
|
||||
print(np.argmax(res[0]))
|
||||
print(np.argmax(data["p"][0]))
|
||||
# print(res[0].tolist()[0])
|
||||
# print(np.argmax(res[0]))
|
||||
return res
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
# train()
|
||||
# if sys.argv[1] == "test":
|
||||
forward(None)
|
5
tianshou/core/global_config.json
Normal file
5
tianshou/core/global_config.json
Normal file
@ -0,0 +1,5 @@
|
||||
{
|
||||
"global_description": "read by Environment, Neural Network, and MCTS",
|
||||
"state_space": " ",
|
||||
"action_space": " "
|
||||
}
|
73
tianshou/core/mcts.py
Normal file
73
tianshou/core/mcts.py
Normal file
@ -0,0 +1,73 @@
|
||||
import numpy as np
|
||||
import math
|
||||
import json
|
||||
|
||||
js = json.load("state_mask.json")
|
||||
action_num = 2
|
||||
c_puct = 5.
|
||||
|
||||
class MCTSNode:
|
||||
def __init__(self, parent, action, state, action_num, prior):
|
||||
self.parent = parent
|
||||
self.action = action
|
||||
self.children = {}
|
||||
self.state = state
|
||||
self.action_num = action_num
|
||||
self.prior = prior
|
||||
|
||||
def select_leaf(self):
|
||||
raise NotImplementedError("Need to implement function select_leaf")
|
||||
|
||||
def backup_value(self, action, value):
|
||||
raise NotImplementedError("Need to implement function backup_value")
|
||||
|
||||
def expand(self, action):
|
||||
raise NotImplementedError("Need to implement function expand")
|
||||
|
||||
def iteration(self):
|
||||
raise NotImplementedError("Need to implement function iteration")
|
||||
|
||||
|
||||
class UCTNode(MCTSNode):
|
||||
def __init__(self, parent, action, state, action_num, prior):
|
||||
super(UCTNode, self).__init__(parent, action, state, action_num, prior)
|
||||
self.Q = np.zeros([action_num])
|
||||
self.W = np.zeros([action_num])
|
||||
self.N = np.zeros([action_num])
|
||||
self.ucb = self.Q + c_puct * self.prior * math.sqrt(np.sum(self.N)) / (self.N + 1)
|
||||
|
||||
def select_leaf(self):
|
||||
action = np.argmax(self.ucb)
|
||||
if action in self.children.keys():
|
||||
self.children[action].select_leaf()
|
||||
else:
|
||||
# TODO: apply the action and evalate next state
|
||||
# state, value = self.env.step_forward(self.state, action)
|
||||
# self.children[action] = MCTSNode(self.env, self, action, state, prior)
|
||||
# self.backup_value(action, value)
|
||||
state, value = self.expand(action)
|
||||
self.children[action] = UCTNode(self.env, self, action, state, prior)
|
||||
|
||||
def backup_value(self, action, value):
|
||||
self.N[action] += 1
|
||||
self.W[action] += 1
|
||||
self.Q = self.W / self.N
|
||||
self.ucb = self.Q + c_puct * self.prior * math.sqrt(np.sum(self.N)) / (self.N + 1)
|
||||
self.parent.backup_value(self.parent.action, value)
|
||||
|
||||
class TSNode(MCTSNode):
|
||||
def __init__(self, parent, action, state, action_num, prior, method="Gaussian"):
|
||||
super(TSNode, self).__init__(parent, action, state, action_num, prior)
|
||||
if method == "Beta":
|
||||
self.alpha = np.ones([action_num])
|
||||
self.beta = np.ones([action_num])
|
||||
if method == "Gaussian":
|
||||
self.mu = np.zeros([action_num])
|
||||
self.sigma = np.zeros([action_num])
|
||||
|
||||
class ActionNode:
|
||||
def __init__(self, parent, action):
|
||||
self.parent = parent
|
||||
self.action = action
|
||||
self.children = {}
|
||||
|
25
tianshou/core/mcts_test.py
Normal file
25
tianshou/core/mcts_test.py
Normal file
@ -0,0 +1,25 @@
|
||||
import numpy as np
|
||||
|
||||
class TestEnv:
|
||||
def __init__(self, max_step=5):
|
||||
self.step = 0
|
||||
self.state = 0
|
||||
self.max_step = max_step
|
||||
self.reward = {i:np.random.uniform() for i in range(2**max_step)}
|
||||
self.best = max(self.reward.items(), key=lambda x:x[1])
|
||||
print("The best arm is {} with expected reward {}".format(self.best[0],self.best[1]))
|
||||
|
||||
def step_forward(self, action):
|
||||
print("Operate action {} at timestep {}".format(action, self.step))
|
||||
self.state = self.state + 2**self.step*action
|
||||
self.step = self.step + 1
|
||||
if self.step == self.max_step:
|
||||
reward = int(np.random.uniform() > self.reward[self.state])
|
||||
print("Get reward {}".format(reward))
|
||||
else:
|
||||
reward = 0
|
||||
return [self.state, reward]
|
||||
|
||||
if __name__=="__main__":
|
||||
env = TestEnv(1)
|
||||
env.step_forward(1)
|
0
tianshou/core/policy_value.json
Normal file
0
tianshou/core/policy_value.json
Normal file
4
tianshou/core/state_mask.json
Normal file
4
tianshou/core/state_mask.json
Normal file
@ -0,0 +1,4 @@
|
||||
{
|
||||
"state" : "10",
|
||||
"mask" : "1000"
|
||||
}
|
Loading…
x
Reference in New Issue
Block a user