mcts update
This commit is contained in:
parent
1e07cb1fac
commit
31beb46563
@ -1,5 +1,6 @@
|
|||||||
import numpy as np
|
import numpy as np
|
||||||
|
|
||||||
|
|
||||||
class evaluator(object):
|
class evaluator(object):
|
||||||
def __init__(self, env, action_num):
|
def __init__(self, env, action_num):
|
||||||
self.env = env
|
self.env = env
|
||||||
@ -8,6 +9,7 @@ class evaluator(object):
|
|||||||
def __call__(self, state):
|
def __call__(self, state):
|
||||||
raise NotImplementedError("Need to implement the evaluator")
|
raise NotImplementedError("Need to implement the evaluator")
|
||||||
|
|
||||||
|
|
||||||
class rollout_policy(evaluator):
|
class rollout_policy(evaluator):
|
||||||
def __init__(self, env, action_num):
|
def __init__(self, env, action_num):
|
||||||
super(rollout_policy, self).__init__(env, action_num)
|
super(rollout_policy, self).__init__(env, action_num)
|
||||||
@ -15,6 +17,11 @@ class rollout_policy(evaluator):
|
|||||||
|
|
||||||
def __call__(self, state):
|
def __call__(self, state):
|
||||||
# TODO: prior for rollout policy
|
# TODO: prior for rollout policy
|
||||||
while not self.is_terminated:
|
total_reward = 0
|
||||||
action = np.random.randint(0,self.action_num)
|
action = np.random.randint(0, self.action_num)
|
||||||
state, is_terminated = self.env.step_forward(state, action)
|
state, reward = self.env.step_forward(state, action)
|
||||||
|
while state is not None:
|
||||||
|
action = np.random.randint(0, self.action_num)
|
||||||
|
state, reward = self.env.step_forward(state, action)
|
||||||
|
total_reward += reward
|
||||||
|
return reward
|
||||||
|
@ -2,7 +2,7 @@ import numpy as np
|
|||||||
import math
|
import math
|
||||||
import time
|
import time
|
||||||
|
|
||||||
c_puct = 1
|
c_puct = 5
|
||||||
|
|
||||||
|
|
||||||
class MCTSNode(object):
|
class MCTSNode(object):
|
||||||
@ -14,15 +14,12 @@ class MCTSNode(object):
|
|||||||
self.action_num = action_num
|
self.action_num = action_num
|
||||||
self.prior = prior
|
self.prior = prior
|
||||||
|
|
||||||
def selection(self):
|
def selection(self, simulator):
|
||||||
raise NotImplementedError("Need to implement function selection")
|
raise NotImplementedError("Need to implement function selection")
|
||||||
|
|
||||||
def backpropagation(self, action, value):
|
def backpropagation(self, action):
|
||||||
raise NotImplementedError("Need to implement function backpropagation")
|
raise NotImplementedError("Need to implement function backpropagation")
|
||||||
|
|
||||||
def expansion(self, simulator, action):
|
|
||||||
raise NotImplementedError("Need to implement function expansion")
|
|
||||||
|
|
||||||
def simulation(self, state, evaluator):
|
def simulation(self, state, evaluator):
|
||||||
raise NotImplementedError("Need to implement function simulation")
|
raise NotImplementedError("Need to implement function simulation")
|
||||||
|
|
||||||
@ -34,40 +31,24 @@ class UCTNode(MCTSNode):
|
|||||||
self.W = np.zeros([action_num])
|
self.W = np.zeros([action_num])
|
||||||
self.N = np.zeros([action_num])
|
self.N = np.zeros([action_num])
|
||||||
self.ucb = self.Q + c_puct * self.prior * math.sqrt(np.sum(self.N)) / (self.N + 1)
|
self.ucb = self.Q + c_puct * self.prior * math.sqrt(np.sum(self.N)) / (self.N + 1)
|
||||||
self.is_terminated = False
|
|
||||||
|
|
||||||
def selection(self):
|
def selection(self, simulator):
|
||||||
if not self.is_terminated:
|
action = np.argmax(self.ucb)
|
||||||
action = np.argmax(self.ucb)
|
if action in self.children.keys():
|
||||||
if action in self.children.keys():
|
return self.children[action].selection(simulator)
|
||||||
node, action = self.children[action].selection()
|
|
||||||
else:
|
|
||||||
node = self
|
|
||||||
else:
|
else:
|
||||||
action = None
|
self.children[action] = ActionNode(self, action)
|
||||||
node = self
|
return self.children[action].selection(simulator)
|
||||||
return node, action
|
|
||||||
|
|
||||||
def backpropagation(self, action, value):
|
def backpropagation(self, action):
|
||||||
if action is None:
|
self.N[action] += 1
|
||||||
if self.parent is not None:
|
self.W[action] += self.children[action].reward
|
||||||
self.parent.backpropagation(self.action, value)
|
for i in range(self.action_num):
|
||||||
else:
|
if self.N[i] != 0:
|
||||||
self.N[action] += 1
|
self.Q[i] = (self.W[i] + 0.) / self.N[i]
|
||||||
self.W[action] += value
|
self.ucb = self.Q + c_puct * self.prior * math.sqrt(np.sum(self.N)) / (self.N + 1.)
|
||||||
for i in range(self.action_num):
|
if self.parent is not None:
|
||||||
if self.N[i] != 0:
|
self.parent.backpropagation(self.children[action].reward)
|
||||||
self.Q[i] = (self.W[i] + 0.)/self.N[i]
|
|
||||||
self.ucb = self.Q + c_puct * self.prior * math.sqrt(np.sum(self.N)) / (self.N + 1.)
|
|
||||||
if self.parent is not None:
|
|
||||||
self.parent.backpropagation(self.action, value)
|
|
||||||
|
|
||||||
def expansion(self, simulator, action):
|
|
||||||
next_state, is_terminated = simulator.step_forward(self.state, action)
|
|
||||||
# TODO: Let users/evaluator give the prior
|
|
||||||
prior = np.ones([self.action_num]) / self.action_num
|
|
||||||
self.children[action] = UCTNode(self, action, next_state, self.action_num, prior)
|
|
||||||
self.children[action].is_terminated = is_terminated
|
|
||||||
|
|
||||||
def simulation(self, evaluator, state):
|
def simulation(self, evaluator, state):
|
||||||
value = evaluator(state)
|
value = evaluator(state)
|
||||||
@ -90,13 +71,38 @@ class ActionNode:
|
|||||||
self.parent = parent
|
self.parent = parent
|
||||||
self.action = action
|
self.action = action
|
||||||
self.children = {}
|
self.children = {}
|
||||||
self.value = {}
|
self.next_state = None
|
||||||
|
self.reward = 0
|
||||||
|
|
||||||
|
def selection(self, simulator):
|
||||||
|
self.next_state, self.reward = simulator.step_forward(self.parent.state, self.action)
|
||||||
|
if self.next_state is not None:
|
||||||
|
if self.next_state in self.children.keys():
|
||||||
|
return self.children[self.next_state].selection(simulator)
|
||||||
|
else:
|
||||||
|
return self.parent, self.action
|
||||||
|
else:
|
||||||
|
return self.parent, self.action
|
||||||
|
|
||||||
|
def expansion(self, action_num):
|
||||||
|
# TODO: Let users/evaluator give the prior
|
||||||
|
if self.next_state is not None:
|
||||||
|
prior = np.ones([action_num]) / action_num
|
||||||
|
self.children[self.next_state] = UCTNode(self.parent, self.action, self.next_state, action_num, prior)
|
||||||
|
return True
|
||||||
|
else:
|
||||||
|
return False
|
||||||
|
|
||||||
|
def backpropagation(self, value):
|
||||||
|
self.reward += value
|
||||||
|
self.parent.backpropagation(self.action)
|
||||||
|
|
||||||
|
|
||||||
class MCTS:
|
class MCTS:
|
||||||
def __init__(self, simulator, evaluator, root, action_num, prior, method="UCT", max_step=None, max_time=None):
|
def __init__(self, simulator, evaluator, root, action_num, prior, method="UCT", max_step=None, max_time=None):
|
||||||
self.simulator = simulator
|
self.simulator = simulator
|
||||||
self.evaluator = evaluator
|
self.evaluator = evaluator
|
||||||
|
self.action_num = action_num
|
||||||
if method == "UCT":
|
if method == "UCT":
|
||||||
self.root = UCTNode(None, None, root, action_num, prior)
|
self.root = UCTNode(None, None, root, action_num, prior)
|
||||||
if method == "TS":
|
if method == "TS":
|
||||||
@ -111,21 +117,25 @@ class MCTS:
|
|||||||
raise ValueError("Need a stop criteria!")
|
raise ValueError("Need a stop criteria!")
|
||||||
while (max_step is not None and self.step < self.max_step or max_step is None) \
|
while (max_step is not None and self.step < self.max_step or max_step is None) \
|
||||||
and (max_time is not None and time.time() - self.start_time < self.max_time or max_time is None):
|
and (max_time is not None and time.time() - self.start_time < self.max_time or max_time is None):
|
||||||
print(self.root.Q)
|
print("Q={}".format(self.root.Q))
|
||||||
|
print("N={}".format(self.root.N))
|
||||||
|
print("W={}".format(self.root.W))
|
||||||
|
print("UCB={}".format(self.root.ucb))
|
||||||
|
print("\n")
|
||||||
self.expand()
|
self.expand()
|
||||||
if max_step is not None:
|
if max_step is not None:
|
||||||
self.step += 1
|
self.step += 1
|
||||||
|
|
||||||
def expand(self):
|
def expand(self):
|
||||||
node, new_action = self.root.selection()
|
node, new_action = self.root.selection(self.simulator)
|
||||||
if new_action is None:
|
success = node.children[new_action].expansion(self.action_num)
|
||||||
value = node.simulation(self.evaluator, node.state)
|
if success:
|
||||||
node.backpropagation(new_action, value)
|
value = node.simulation(self.evaluator, node.children[new_action].next_state)
|
||||||
|
node.children[new_action].backpropagation(value + 0.)
|
||||||
else:
|
else:
|
||||||
node.expansion(self.simulator, new_action)
|
value = node.simulation(self.evaluator, node.state)
|
||||||
value = node.simulation(self.evaluator, node.children[new_action].state)
|
node.parent.children[node.action].backpropagation(value + 0.)
|
||||||
node.backpropagation(new_action, value)
|
|
||||||
|
|
||||||
|
|
||||||
if __name__=="__main__":
|
if __name__ == "__main__":
|
||||||
pass
|
pass
|
||||||
|
@ -1,34 +1,39 @@
|
|||||||
import numpy as np
|
import numpy as np
|
||||||
from mcts import MCTS
|
from mcts import MCTS
|
||||||
import matplotlib.pyplot as plt
|
from evaluator import rollout_policy
|
||||||
|
|
||||||
|
|
||||||
class TestEnv:
|
class TestEnv:
|
||||||
def __init__(self, max_step=5):
|
def __init__(self, max_step=5):
|
||||||
self.max_step = max_step
|
self.max_step = max_step
|
||||||
self.reward = {i:np.random.uniform() for i in range(2**max_step)}
|
self.reward = {i: np.random.uniform() for i in range(2 ** max_step)}
|
||||||
# self.reward = {0:0.8, 1:0.2, 2:0.4, 3:0.6}
|
# self.reward = {0:0.8, 1:0.2, 2:0.4, 3:0.6}
|
||||||
self.best = max(self.reward.items(), key=lambda x:x[1])
|
self.best = max(self.reward.items(), key=lambda x: x[1])
|
||||||
# print("The best arm is {} with expected reward {}".format(self.best[0],self.best[1]))
|
# print("The best arm is {} with expected reward {}".format(self.best[0],self.best[1]))
|
||||||
print(self.reward)
|
print(self.reward)
|
||||||
|
|
||||||
def step_forward(self, state, action):
|
def step_forward(self, state, action):
|
||||||
if action != 0 and action != 1:
|
if action != 0 and action != 1:
|
||||||
raise ValueError("Action must be 0 or 1! Your action is {}".format(action))
|
raise ValueError("Action must be 0 or 1! Your action is {}".format(action))
|
||||||
if state[0] >= 2**state[1] or state[1] >= self.max_step:
|
if state[0] >= 2 ** state[1] or state[1] > self.max_step:
|
||||||
raise ValueError("Invalid State! Your state is {}".format(state))
|
raise ValueError("Invalid State! Your state is {}".format(state))
|
||||||
# print("Operate action {} at state {}, timestep {}".format(action, state[0], state[1]))
|
# print("Operate action {} at state {}, timestep {}".format(action, state[0], state[1]))
|
||||||
new_state = [0,0]
|
if state[1] == self.max_step:
|
||||||
new_state[0] = state[0] + 2**state[1]*action
|
new_state = None
|
||||||
new_state[1] = state[1] + 1
|
|
||||||
if new_state[1] == self.max_step:
|
|
||||||
reward = int(np.random.uniform() < self.reward[state[0]])
|
|
||||||
is_terminated = True
|
|
||||||
else:
|
|
||||||
reward = 0
|
reward = 0
|
||||||
is_terminated = False
|
else:
|
||||||
return new_state, reward, is_terminated
|
num = state[0] + 2 ** state[1] * action
|
||||||
|
step = state[1] + 1
|
||||||
|
new_state = (num, step)
|
||||||
|
if step == self.max_step:
|
||||||
|
reward = int(np.random.uniform() < self.reward[state[0]])
|
||||||
|
else:
|
||||||
|
reward = 0
|
||||||
|
return new_state, reward
|
||||||
|
|
||||||
if __name__=="__main__":
|
|
||||||
env = TestEnv(3)
|
if __name__ == "__main__":
|
||||||
evaluator = lambda state: env.step_forward(state, action)
|
env = TestEnv(1)
|
||||||
mcts = MCTS(env, evaluator, [0,0], 2, np.array([0.5,0.5]), max_step=1e4)
|
rollout = rollout_policy(env, 2)
|
||||||
|
evaluator = lambda state: rollout(state)
|
||||||
|
mcts = MCTS(env, evaluator, [0, 0], 2, np.array([0.5, 0.5]), max_step=1e4)
|
||||||
|
Loading…
x
Reference in New Issue
Block a user