diff --git a/tianshou/core/mcts/evaluator.py b/tianshou/core/mcts/evaluator.py index 85041be..aacdbe4 100644 --- a/tianshou/core/mcts/evaluator.py +++ b/tianshou/core/mcts/evaluator.py @@ -1,5 +1,6 @@ import numpy as np + class evaluator(object): def __init__(self, env, action_num): self.env = env @@ -8,6 +9,7 @@ class evaluator(object): def __call__(self, state): raise NotImplementedError("Need to implement the evaluator") + class rollout_policy(evaluator): def __init__(self, env, action_num): super(rollout_policy, self).__init__(env, action_num) @@ -15,6 +17,11 @@ class rollout_policy(evaluator): def __call__(self, state): # TODO: prior for rollout policy - while not self.is_terminated: - action = np.random.randint(0,self.action_num) - state, is_terminated = self.env.step_forward(state, action) \ No newline at end of file + total_reward = 0 + action = np.random.randint(0, self.action_num) + state, reward = self.env.step_forward(state, action) + while state is not None: + action = np.random.randint(0, self.action_num) + state, reward = self.env.step_forward(state, action) + total_reward += reward + return reward diff --git a/tianshou/core/mcts/mcts.py b/tianshou/core/mcts/mcts.py index 521b455..1bdc0ff 100644 --- a/tianshou/core/mcts/mcts.py +++ b/tianshou/core/mcts/mcts.py @@ -2,7 +2,7 @@ import numpy as np import math import time -c_puct = 1 +c_puct = 5 class MCTSNode(object): @@ -14,15 +14,12 @@ class MCTSNode(object): self.action_num = action_num self.prior = prior - def selection(self): + def selection(self, simulator): raise NotImplementedError("Need to implement function selection") - def backpropagation(self, action, value): + def backpropagation(self, action): raise NotImplementedError("Need to implement function backpropagation") - def expansion(self, simulator, action): - raise NotImplementedError("Need to implement function expansion") - def simulation(self, state, evaluator): raise NotImplementedError("Need to implement function simulation") @@ -34,40 +31,24 @@ class UCTNode(MCTSNode): self.W = np.zeros([action_num]) self.N = np.zeros([action_num]) self.ucb = self.Q + c_puct * self.prior * math.sqrt(np.sum(self.N)) / (self.N + 1) - self.is_terminated = False - def selection(self): - if not self.is_terminated: - action = np.argmax(self.ucb) - if action in self.children.keys(): - node, action = self.children[action].selection() - else: - node = self + def selection(self, simulator): + action = np.argmax(self.ucb) + if action in self.children.keys(): + return self.children[action].selection(simulator) else: - action = None - node = self - return node, action + self.children[action] = ActionNode(self, action) + return self.children[action].selection(simulator) - def backpropagation(self, action, value): - if action is None: - if self.parent is not None: - self.parent.backpropagation(self.action, value) - else: - self.N[action] += 1 - self.W[action] += value - for i in range(self.action_num): - if self.N[i] != 0: - self.Q[i] = (self.W[i] + 0.)/self.N[i] - self.ucb = self.Q + c_puct * self.prior * math.sqrt(np.sum(self.N)) / (self.N + 1.) - if self.parent is not None: - self.parent.backpropagation(self.action, value) - - def expansion(self, simulator, action): - next_state, is_terminated = simulator.step_forward(self.state, action) - # TODO: Let users/evaluator give the prior - prior = np.ones([self.action_num]) / self.action_num - self.children[action] = UCTNode(self, action, next_state, self.action_num, prior) - self.children[action].is_terminated = is_terminated + def backpropagation(self, action): + self.N[action] += 1 + self.W[action] += self.children[action].reward + for i in range(self.action_num): + if self.N[i] != 0: + self.Q[i] = (self.W[i] + 0.) / self.N[i] + self.ucb = self.Q + c_puct * self.prior * math.sqrt(np.sum(self.N)) / (self.N + 1.) + if self.parent is not None: + self.parent.backpropagation(self.children[action].reward) def simulation(self, evaluator, state): value = evaluator(state) @@ -90,13 +71,38 @@ class ActionNode: self.parent = parent self.action = action self.children = {} - self.value = {} + self.next_state = None + self.reward = 0 + + def selection(self, simulator): + self.next_state, self.reward = simulator.step_forward(self.parent.state, self.action) + if self.next_state is not None: + if self.next_state in self.children.keys(): + return self.children[self.next_state].selection(simulator) + else: + return self.parent, self.action + else: + return self.parent, self.action + + def expansion(self, action_num): + # TODO: Let users/evaluator give the prior + if self.next_state is not None: + prior = np.ones([action_num]) / action_num + self.children[self.next_state] = UCTNode(self.parent, self.action, self.next_state, action_num, prior) + return True + else: + return False + + def backpropagation(self, value): + self.reward += value + self.parent.backpropagation(self.action) class MCTS: def __init__(self, simulator, evaluator, root, action_num, prior, method="UCT", max_step=None, max_time=None): self.simulator = simulator self.evaluator = evaluator + self.action_num = action_num if method == "UCT": self.root = UCTNode(None, None, root, action_num, prior) if method == "TS": @@ -111,21 +117,25 @@ class MCTS: raise ValueError("Need a stop criteria!") while (max_step is not None and self.step < self.max_step or max_step is None) \ and (max_time is not None and time.time() - self.start_time < self.max_time or max_time is None): - print(self.root.Q) + print("Q={}".format(self.root.Q)) + print("N={}".format(self.root.N)) + print("W={}".format(self.root.W)) + print("UCB={}".format(self.root.ucb)) + print("\n") self.expand() if max_step is not None: self.step += 1 def expand(self): - node, new_action = self.root.selection() - if new_action is None: - value = node.simulation(self.evaluator, node.state) - node.backpropagation(new_action, value) + node, new_action = self.root.selection(self.simulator) + success = node.children[new_action].expansion(self.action_num) + if success: + value = node.simulation(self.evaluator, node.children[new_action].next_state) + node.children[new_action].backpropagation(value + 0.) else: - node.expansion(self.simulator, new_action) - value = node.simulation(self.evaluator, node.children[new_action].state) - node.backpropagation(new_action, value) + value = node.simulation(self.evaluator, node.state) + node.parent.children[node.action].backpropagation(value + 0.) -if __name__=="__main__": - pass \ No newline at end of file +if __name__ == "__main__": + pass diff --git a/tianshou/core/mcts/mcts_test.py b/tianshou/core/mcts/mcts_test.py index f708b39..a0425b8 100644 --- a/tianshou/core/mcts/mcts_test.py +++ b/tianshou/core/mcts/mcts_test.py @@ -1,34 +1,39 @@ import numpy as np from mcts import MCTS -import matplotlib.pyplot as plt +from evaluator import rollout_policy + class TestEnv: def __init__(self, max_step=5): self.max_step = max_step - self.reward = {i:np.random.uniform() for i in range(2**max_step)} + self.reward = {i: np.random.uniform() for i in range(2 ** max_step)} # self.reward = {0:0.8, 1:0.2, 2:0.4, 3:0.6} - self.best = max(self.reward.items(), key=lambda x:x[1]) + self.best = max(self.reward.items(), key=lambda x: x[1]) # print("The best arm is {} with expected reward {}".format(self.best[0],self.best[1])) print(self.reward) def step_forward(self, state, action): if action != 0 and action != 1: raise ValueError("Action must be 0 or 1! Your action is {}".format(action)) - if state[0] >= 2**state[1] or state[1] >= self.max_step: + if state[0] >= 2 ** state[1] or state[1] > self.max_step: raise ValueError("Invalid State! Your state is {}".format(state)) # print("Operate action {} at state {}, timestep {}".format(action, state[0], state[1])) - new_state = [0,0] - new_state[0] = state[0] + 2**state[1]*action - new_state[1] = state[1] + 1 - if new_state[1] == self.max_step: - reward = int(np.random.uniform() < self.reward[state[0]]) - is_terminated = True - else: + if state[1] == self.max_step: + new_state = None reward = 0 - is_terminated = False - return new_state, reward, is_terminated + else: + num = state[0] + 2 ** state[1] * action + step = state[1] + 1 + new_state = (num, step) + if step == self.max_step: + reward = int(np.random.uniform() < self.reward[state[0]]) + else: + reward = 0 + return new_state, reward -if __name__=="__main__": - env = TestEnv(3) - evaluator = lambda state: env.step_forward(state, action) - mcts = MCTS(env, evaluator, [0,0], 2, np.array([0.5,0.5]), max_step=1e4) + +if __name__ == "__main__": + env = TestEnv(1) + rollout = rollout_policy(env, 2) + evaluator = lambda state: rollout(state) + mcts = MCTS(env, evaluator, [0, 0], 2, np.array([0.5, 0.5]), max_step=1e4)