diff --git a/tianshou/core/mcts/evaluator.py b/tianshou/core/mcts/evaluator.py new file mode 100644 index 0000000..85041be --- /dev/null +++ b/tianshou/core/mcts/evaluator.py @@ -0,0 +1,20 @@ +import numpy as np + +class evaluator(object): + def __init__(self, env, action_num): + self.env = env + self.action_num = action_num + + def __call__(self, state): + raise NotImplementedError("Need to implement the evaluator") + +class rollout_policy(evaluator): + def __init__(self, env, action_num): + super(rollout_policy, self).__init__(env, action_num) + self.is_terminated = False + + def __call__(self, state): + # TODO: prior for rollout policy + while not self.is_terminated: + action = np.random.randint(0,self.action_num) + state, is_terminated = self.env.step_forward(state, action) \ No newline at end of file diff --git a/tianshou/core/mcts/mcts.py b/tianshou/core/mcts/mcts.py index 16b0ea0..521b455 100644 --- a/tianshou/core/mcts/mcts.py +++ b/tianshou/core/mcts/mcts.py @@ -2,7 +2,7 @@ import numpy as np import math import time -c_puct = 5. +c_puct = 1 class MCTSNode(object): @@ -17,7 +17,7 @@ class MCTSNode(object): def selection(self): raise NotImplementedError("Need to implement function selection") - def backpropagation(self, action, value, is_terminated): + def backpropagation(self, action, value): raise NotImplementedError("Need to implement function backpropagation") def expansion(self, simulator, action): @@ -37,35 +37,41 @@ class UCTNode(MCTSNode): self.is_terminated = False def selection(self): - if self.is_terminated: - action = None - else: + if not self.is_terminated: action = np.argmax(self.ucb) - if action in self.children.keys(): - self.children[action].selection() + if action in self.children.keys(): + node, action = self.children[action].selection() + else: + node = self else: - return self, action + action = None + node = self + return node, action - def backpropagation(self, action, value, is_terminated): - self.is_terminated = is_terminated - self.N[action] += 1 - self.W[action] += value - for i in range(self.action_num): - if self.N[i] != 0: - self.Q[i] = (self.W[i] + 0.)/self.N[i] - self.ucb = self.Q + c_puct * self.prior * math.sqrt(np.sum(self.N)) / (self.N + 1) - if self.parent is not None: - self.parent.backpropagation(self.parent.action, value) + def backpropagation(self, action, value): + if action is None: + if self.parent is not None: + self.parent.backpropagation(self.action, value) + else: + self.N[action] += 1 + self.W[action] += value + for i in range(self.action_num): + if self.N[i] != 0: + self.Q[i] = (self.W[i] + 0.)/self.N[i] + self.ucb = self.Q + c_puct * self.prior * math.sqrt(np.sum(self.N)) / (self.N + 1.) + if self.parent is not None: + self.parent.backpropagation(self.action, value) def expansion(self, simulator, action): - next_state = simulator.step_forward(self.state, action) + next_state, is_terminated = simulator.step_forward(self.state, action) # TODO: Let users/evaluator give the prior prior = np.ones([self.action_num]) / self.action_num self.children[action] = UCTNode(self, action, next_state, self.action_num, prior) + self.children[action].is_terminated = is_terminated def simulation(self, evaluator, state): - value, is_ternimated = evaluator(state) - return value, is_ternimated + value = evaluator(state) + return value class TSNode(MCTSNode): @@ -105,24 +111,20 @@ class MCTS: raise ValueError("Need a stop criteria!") while (max_step is not None and self.step < self.max_step or max_step is None) \ and (max_time is not None and time.time() - self.start_time < self.max_time or max_time is None): + print(self.root.Q) self.expand() if max_step is not None: self.step += 1 def expand(self): - print(self.root.Q) - print(self.root.N) - print(self.root.W) node, new_action = self.root.selection() - print(node.state, new_action) if new_action is None: - value, is_terminated = node.simulation(self.evaluator, node.state) - node.backpropagation(node.action, value, is_terminated) - print(value) + value = node.simulation(self.evaluator, node.state) + node.backpropagation(new_action, value) else: node.expansion(self.simulator, new_action) - value, is_terminated = node.simulation(self.evaluator, node.children[new_action].state) - node.backpropagation(new_action, value, is_terminated) + value = node.simulation(self.evaluator, node.children[new_action].state) + node.backpropagation(new_action, value) if __name__=="__main__": diff --git a/tianshou/core/mcts/mcts_test.py b/tianshou/core/mcts/mcts_test.py index 4c86736..f708b39 100644 --- a/tianshou/core/mcts/mcts_test.py +++ b/tianshou/core/mcts/mcts_test.py @@ -6,29 +6,29 @@ class TestEnv: def __init__(self, max_step=5): self.max_step = max_step self.reward = {i:np.random.uniform() for i in range(2**max_step)} + # self.reward = {0:0.8, 1:0.2, 2:0.4, 3:0.6} self.best = max(self.reward.items(), key=lambda x:x[1]) - print("The best arm is {} with expected reward {}".format(self.best[0],self.best[1])) + # print("The best arm is {} with expected reward {}".format(self.best[0],self.best[1])) + print(self.reward) def step_forward(self, state, action): if action != 0 and action != 1: - raise ValueError("Action must be 0 or 1!") + raise ValueError("Action must be 0 or 1! Your action is {}".format(action)) if state[0] >= 2**state[1] or state[1] >= self.max_step: - raise ValueError("Invalid State!") + raise ValueError("Invalid State! Your state is {}".format(state)) # print("Operate action {} at state {}, timestep {}".format(action, state[0], state[1])) - state[0] = state[0] + 2**state[1]*action - state[1] = state[1] + 1 - return state - - def evaluator(self, state): - if state[1] == self.max_step: - reward = int(np.random.uniform() > self.reward[state[0]]) + new_state = [0,0] + new_state[0] = state[0] + 2**state[1]*action + new_state[1] = state[1] + 1 + if new_state[1] == self.max_step: + reward = int(np.random.uniform() < self.reward[state[0]]) is_terminated = True else: reward = 0 is_terminated = False - return reward, is_terminated + return new_state, reward, is_terminated if __name__=="__main__": - env = TestEnv(1) - evaluator = lambda state: env.evaluator(state) - mcts = MCTS(env, evaluator, [0,0], 2, np.ones([2])/2, max_step=1e4) + env = TestEnv(3) + evaluator = lambda state: env.step_forward(state, action) + mcts = MCTS(env, evaluator, [0,0], 2, np.array([0.5,0.5]), max_step=1e4)