mcts update
This commit is contained in:
parent
767fd4ea20
commit
c5c2cdf0f3
0
tianshou/__init__.py
Normal file
0
tianshou/__init__.py
Normal file
0
tianshou/core/__init__.py
Normal file
0
tianshou/core/__init__.py
Normal file
0
tianshou/core/mcts/__init__.py
Normal file
0
tianshou/core/mcts/__init__.py
Normal file
@ -17,7 +17,7 @@ class MCTSNode(object):
|
||||
def selection(self):
|
||||
raise NotImplementedError("Need to implement function selection")
|
||||
|
||||
def backpropagation(self, action, value):
|
||||
def backpropagation(self, action, value, is_terminated):
|
||||
raise NotImplementedError("Need to implement function backpropagation")
|
||||
|
||||
def expansion(self, simulator, action):
|
||||
@ -34,20 +34,28 @@ class UCTNode(MCTSNode):
|
||||
self.W = np.zeros([action_num])
|
||||
self.N = np.zeros([action_num])
|
||||
self.ucb = self.Q + c_puct * self.prior * math.sqrt(np.sum(self.N)) / (self.N + 1)
|
||||
self.is_terminated = False
|
||||
|
||||
def selection(self):
|
||||
action = np.argmax(self.ucb)
|
||||
if self.is_terminated:
|
||||
action = None
|
||||
else:
|
||||
action = np.argmax(self.ucb)
|
||||
if action in self.children.keys():
|
||||
self.children[action].selection()
|
||||
else:
|
||||
return self, action
|
||||
|
||||
def backpropagation(self, action, value):
|
||||
def backpropagation(self, action, value, is_terminated):
|
||||
self.is_terminated = is_terminated
|
||||
self.N[action] += 1
|
||||
self.W[action] += 1
|
||||
self.Q = self.W / self.N
|
||||
self.W[action] += value
|
||||
for i in range(self.action_num):
|
||||
if self.N[i] != 0:
|
||||
self.Q[i] = (self.W[i] + 0.)/self.N[i]
|
||||
self.ucb = self.Q + c_puct * self.prior * math.sqrt(np.sum(self.N)) / (self.N + 1)
|
||||
self.parent.backup_value(self.parent.action, value)
|
||||
if self.parent is not None:
|
||||
self.parent.backpropagation(self.parent.action, value)
|
||||
|
||||
def expansion(self, simulator, action):
|
||||
next_state = simulator.step_forward(self.state, action)
|
||||
@ -56,8 +64,8 @@ class UCTNode(MCTSNode):
|
||||
self.children[action] = UCTNode(self, action, next_state, self.action_num, prior)
|
||||
|
||||
def simulation(self, evaluator, state):
|
||||
value = evaluator(state)
|
||||
return value
|
||||
value, is_ternimated = evaluator(state)
|
||||
return value, is_ternimated
|
||||
|
||||
|
||||
class TSNode(MCTSNode):
|
||||
@ -98,12 +106,23 @@ class MCTS:
|
||||
while (max_step is not None and self.step < self.max_step or max_step is None) \
|
||||
and (max_time is not None and time.time() - self.start_time < self.max_time or max_time is None):
|
||||
self.expand()
|
||||
if max_step is not None:
|
||||
self.step += 1
|
||||
|
||||
def expand(self):
|
||||
print(self.root.Q)
|
||||
print(self.root.N)
|
||||
print(self.root.W)
|
||||
node, new_action = self.root.selection()
|
||||
node.expansion(self.simulator, new_action)
|
||||
value = node.simulation(self.evaluator, node.children[new_action].state)
|
||||
node.backpropagation(new_action, value)
|
||||
print(node.state, new_action)
|
||||
if new_action is None:
|
||||
value, is_terminated = node.simulation(self.evaluator, node.state)
|
||||
node.backpropagation(node.action, value, is_terminated)
|
||||
print(value)
|
||||
else:
|
||||
node.expansion(self.simulator, new_action)
|
||||
value, is_terminated = node.simulation(self.evaluator, node.children[new_action].state)
|
||||
node.backpropagation(new_action, value, is_terminated)
|
||||
|
||||
|
||||
if __name__=="__main__":
|
@ -1,5 +1,6 @@
|
||||
import numpy as np
|
||||
from mcts import MCTS
|
||||
import matplotlib.pyplot as plt
|
||||
|
||||
class TestEnv:
|
||||
def __init__(self, max_step=5):
|
||||
@ -13,16 +14,21 @@ class TestEnv:
|
||||
raise ValueError("Action must be 0 or 1!")
|
||||
if state[0] >= 2**state[1] or state[1] >= self.max_step:
|
||||
raise ValueError("Invalid State!")
|
||||
print("Operate action {} at state {}, timestep {}".format(action, state[0], state[1]))
|
||||
# print("Operate action {} at state {}, timestep {}".format(action, state[0], state[1]))
|
||||
state[0] = state[0] + 2**state[1]*action
|
||||
state[1] = state[1] + 1
|
||||
return state
|
||||
|
||||
def evaluator(self, state):
|
||||
if state[1] == self.max_step:
|
||||
reward = int(np.random.uniform() > self.reward[state[0]])
|
||||
print("Get reward {}".format(reward))
|
||||
is_terminated = True
|
||||
else:
|
||||
reward = 0
|
||||
return [state, reward]
|
||||
is_terminated = False
|
||||
return reward, is_terminated
|
||||
|
||||
if __name__=="__main__":
|
||||
env = TestEnv(1)
|
||||
env.step_forward([0,0],1)
|
||||
evaluator = lambda state: env.evaluator(state)
|
||||
mcts = MCTS(env, evaluator, [0,0], 2, np.ones([2])/2, max_step=1e4)
|
Loading…
x
Reference in New Issue
Block a user