0. code refactor, try to merge Go and GoEnv
This commit is contained in:
parent
01c0c2483a
commit
31199c7d0d
@ -31,14 +31,14 @@ class Game:
|
|||||||
self.komi = komi
|
self.komi = komi
|
||||||
self.board = [utils.EMPTY] * (self.size * self.size)
|
self.board = [utils.EMPTY] * (self.size * self.size)
|
||||||
self.history = []
|
self.history = []
|
||||||
self.past = deque(maxlen=8)
|
self.latest_boards = deque(maxlen=8)
|
||||||
for _ in range(8):
|
for _ in range(8):
|
||||||
self.past.append(self.board)
|
self.latest_boards.append(self.board)
|
||||||
|
|
||||||
self.executor = go.Go(game=self)
|
self.executor = go.Go(game=self)
|
||||||
#self.strategy = strategy(checkpoint_path)
|
#self.strategy = strategy(checkpoint_path)
|
||||||
|
|
||||||
self.simulator = strategy.GoEnv()
|
self.simulator = strategy.GoEnv(game=self)
|
||||||
self.net = network_small.Network()
|
self.net = network_small.Network()
|
||||||
self.sess = self.net.forward(checkpoint_path)
|
self.sess = self.net.forward(checkpoint_path)
|
||||||
self.evaluator = lambda state: self.sess.run([tf.nn.softmax(self.net.p), self.net.v],
|
self.evaluator = lambda state: self.sess.run([tf.nn.softmax(self.net.p), self.net.v],
|
||||||
@ -57,7 +57,7 @@ class Game:
|
|||||||
self.board = [utils.EMPTY] * (self.size * self.size)
|
self.board = [utils.EMPTY] * (self.size * self.size)
|
||||||
self.history = []
|
self.history = []
|
||||||
for _ in range(8):
|
for _ in range(8):
|
||||||
self.past.append(self.board)
|
self.latest_boards.append(self.board)
|
||||||
|
|
||||||
def set_size(self, n):
|
def set_size(self, n):
|
||||||
self.size = n
|
self.size = n
|
||||||
@ -66,29 +66,29 @@ class Game:
|
|||||||
def set_komi(self, k):
|
def set_komi(self, k):
|
||||||
self.komi = k
|
self.komi = k
|
||||||
|
|
||||||
def data_process(self, history, color):
|
def generate_nn_input(self, history, color):
|
||||||
state = np.zeros([1, self.simulator.size, self.simulator.size, 17])
|
state = np.zeros([1, self.size, self.size, 17])
|
||||||
for i in range(8):
|
for i in range(8):
|
||||||
state[0, :, :, i] = np.array(np.array(history[i]) == np.ones(self.simulator.size ** 2)).reshape(self.simulator.size, self.simulator.size)
|
state[0, :, :, i] = np.array(np.array(history[i]) == np.ones(self.size ** 2)).reshape(self.size, self.size)
|
||||||
state[0, :, :, i + 8] = np.array(np.array(history[i]) == -np.ones(self.simulator.size ** 2)).reshape(self.simulator.size, self.simulator.size)
|
state[0, :, :, i + 8] = np.array(np.array(history[i]) == -np.ones(self.size ** 2)).reshape(self.size, self.size)
|
||||||
if color == utils.BLACK:
|
if color == utils.BLACK:
|
||||||
state[0, :, :, 16] = np.ones([self.simulator.size, self.simulator.size])
|
state[0, :, :, 16] = np.ones([self.size, self.size])
|
||||||
if color == utils.WHITE:
|
if color == utils.WHITE:
|
||||||
state[0, :, :, 16] = np.zeros([self.simulator.size, self.simulator.size])
|
state[0, :, :, 16] = np.zeros([self.size, self.size])
|
||||||
return state
|
return state
|
||||||
|
|
||||||
def strategy_gen_move(self, history, color):
|
def strategy_gen_move(self, latest_boards, color):
|
||||||
self.simulator.history = copy.copy(history)
|
self.simulator.latest_boards = copy.copy(latest_boards)
|
||||||
self.simulator.board = copy.copy(history[-1])
|
self.simulator.board = copy.copy(latest_boards[-1])
|
||||||
state = self.data_process(self.simulator.history, color)
|
nn_input = self.generate_nn_input(self.simulator.latest_boards, color)
|
||||||
mcts = MCTS(self.simulator, self.evaluator, state, self.simulator.size ** 2 + 1, inverse=True, max_step=10)
|
mcts = MCTS(self.simulator, self.evaluator, nn_input, self.size ** 2 + 1, inverse=True, max_step=1)
|
||||||
temp = 1
|
temp = 1
|
||||||
prob = mcts.root.N ** temp / np.sum(mcts.root.N ** temp)
|
prob = mcts.root.N ** temp / np.sum(mcts.root.N ** temp)
|
||||||
choice = np.random.choice(self.simulator.size ** 2 + 1, 1, p=prob).tolist()[0]
|
choice = np.random.choice(self.size ** 2 + 1, 1, p=prob).tolist()[0]
|
||||||
if choice == self.simulator.size ** 2:
|
if choice == self.size ** 2:
|
||||||
move = utils.PASS
|
move = utils.PASS
|
||||||
else:
|
else:
|
||||||
move = (choice % self.simulator.size + 1, choice / self.simulator.size + 1)
|
move = (choice % self.size + 1, choice / self.size + 1)
|
||||||
return move, prob
|
return move, prob
|
||||||
|
|
||||||
def do_move(self, color, vertex):
|
def do_move(self, color, vertex):
|
||||||
@ -100,7 +100,7 @@ class Game:
|
|||||||
def gen_move(self, color):
|
def gen_move(self, color):
|
||||||
# move = self.strategy.gen_move(color)
|
# move = self.strategy.gen_move(color)
|
||||||
# return move
|
# return move
|
||||||
move, self.prob = self.strategy_gen_move(self.past, color)
|
move, self.prob = self.strategy_gen_move(self.latest_boards, color)
|
||||||
self.do_move(color, move)
|
self.do_move(color, move)
|
||||||
return move
|
return move
|
||||||
|
|
||||||
@ -127,3 +127,6 @@ class Game:
|
|||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
g = Game()
|
g = Game()
|
||||||
g.show_board()
|
g.show_board()
|
||||||
|
#file = open("debug.txt", "a")
|
||||||
|
#file.write("mcts check\n")
|
||||||
|
#file.close()
|
||||||
|
@ -135,7 +135,7 @@ class Go:
|
|||||||
self.game.board[self.game._flatten(vertex)] = color
|
self.game.board[self.game._flatten(vertex)] = color
|
||||||
self._process_board(color, vertex)
|
self._process_board(color, vertex)
|
||||||
self.game.history.append(copy.copy(self.game.board))
|
self.game.history.append(copy.copy(self.game.board))
|
||||||
self.game.past.append(copy.copy(self.game.board))
|
self.game.latest_boards.append(copy.copy(self.game.board))
|
||||||
return True
|
return True
|
||||||
|
|
||||||
def _find_empty(self):
|
def _find_empty(self):
|
||||||
|
@ -14,15 +14,14 @@ DELTA = [[1, 0], [-1, 0], [0, -1], [0, 1]]
|
|||||||
CORNER_OFFSET = [[-1, -1], [-1, 1], [1, 1], [1, -1]]
|
CORNER_OFFSET = [[-1, -1], [-1, 1], [1, 1], [1, -1]]
|
||||||
|
|
||||||
class GoEnv:
|
class GoEnv:
|
||||||
def __init__(self, size=9, komi=6.5):
|
def __init__(self, **kwargs):
|
||||||
self.size = size
|
self.game = kwargs['game']
|
||||||
self.komi = komi
|
self.board = [utils.EMPTY] * (self.game.size * self.game.size)
|
||||||
self.board = [utils.EMPTY] * (self.size * self.size)
|
self.latest_boards = deque(maxlen=8)
|
||||||
self.history = deque(maxlen=8)
|
|
||||||
|
|
||||||
def _flatten(self, vertex):
|
def _flatten(self, vertex):
|
||||||
x, y = vertex
|
x, y = vertex
|
||||||
return (x - 1) * self.size + (y - 1)
|
return (x - 1) * self.game.size + (y - 1)
|
||||||
|
|
||||||
def _bfs(self, vertex, color, block, status, alive_break):
|
def _bfs(self, vertex, color, block, status, alive_break):
|
||||||
block.append(vertex)
|
block.append(vertex)
|
||||||
@ -35,7 +34,7 @@ class GoEnv:
|
|||||||
|
|
||||||
def _find_block(self, vertex, alive_break=False):
|
def _find_block(self, vertex, alive_break=False):
|
||||||
block = []
|
block = []
|
||||||
status = [False] * (self.size * self.size)
|
status = [False] * (self.game.size * self.game.size)
|
||||||
color = self.board[self._flatten(vertex)]
|
color = self.board[self._flatten(vertex)]
|
||||||
self._bfs(vertex, color, block, status, alive_break)
|
self._bfs(vertex, color, block, status, alive_break)
|
||||||
|
|
||||||
@ -73,7 +72,7 @@ class GoEnv:
|
|||||||
_board = copy.copy(self.board)
|
_board = copy.copy(self.board)
|
||||||
self.board[self._flatten(vertex)] = color
|
self.board[self._flatten(vertex)] = color
|
||||||
self._process_board(color, vertex)
|
self._process_board(color, vertex)
|
||||||
if self.board in self.history:
|
if self.board in self.latest_boards:
|
||||||
res = True
|
res = True
|
||||||
else:
|
else:
|
||||||
res = False
|
res = False
|
||||||
@ -83,8 +82,8 @@ class GoEnv:
|
|||||||
|
|
||||||
def _in_board(self, vertex):
|
def _in_board(self, vertex):
|
||||||
x, y = vertex
|
x, y = vertex
|
||||||
if x < 1 or x > self.size: return False
|
if x < 1 or x > self.game.size: return False
|
||||||
if y < 1 or y > self.size: return False
|
if y < 1 or y > self.game.size: return False
|
||||||
return True
|
return True
|
||||||
|
|
||||||
def _neighbor(self, vertex):
|
def _neighbor(self, vertex):
|
||||||
@ -151,21 +150,28 @@ class GoEnv:
|
|||||||
# print "many opponents, fake eye"
|
# print "many opponents, fake eye"
|
||||||
return False
|
return False
|
||||||
|
|
||||||
# def is_valid(self, color, vertex):
|
def knowledge_prunning(self, color, vertex):
|
||||||
def is_valid(self, state, action):
|
### check if it is an eye of yourself
|
||||||
|
### assumptions : notice that this judgement requires that the state is an endgame
|
||||||
|
if self._is_eye(color, vertex):
|
||||||
|
return False
|
||||||
|
return True
|
||||||
|
|
||||||
|
def simulate_is_valid(self, state, action):
|
||||||
# state is the play board, the shape is [1, 9, 9, 17]
|
# state is the play board, the shape is [1, 9, 9, 17]
|
||||||
if action == self.size * self.size:
|
if action == self.game.size * self.game.size:
|
||||||
vertex = (0, 0)
|
vertex = (0, 0)
|
||||||
else:
|
else:
|
||||||
vertex = (action / self.size + 1, action % self.size + 1)
|
vertex = (action / self.game.size + 1, action % self.game.size + 1)
|
||||||
if state[0, 0, 0, -1] == utils.BLACK:
|
if state[0, 0, 0, -1] == utils.BLACK:
|
||||||
color = utils.BLACK
|
color = utils.BLACK
|
||||||
else:
|
else:
|
||||||
color = utils.WHITE
|
color = utils.WHITE
|
||||||
self.history.clear()
|
self.latest_boards.clear()
|
||||||
for i in range(8):
|
for i in range(8):
|
||||||
self.history.append((state[:, :, :, i] - state[:, :, :, i + 8]).reshape(-1).tolist())
|
self.latest_boards.append((state[:, :, :, i] - state[:, :, :, i + 8]).reshape(-1).tolist())
|
||||||
self.board = copy.copy(self.history[-1])
|
self.board = copy.copy(self.latest_boards[-1])
|
||||||
|
|
||||||
### in board
|
### in board
|
||||||
if not self._in_board(vertex):
|
if not self._in_board(vertex):
|
||||||
return False
|
return False
|
||||||
@ -180,12 +186,11 @@ class GoEnv:
|
|||||||
if not self._is_qi(color, vertex):
|
if not self._is_qi(color, vertex):
|
||||||
return False
|
return False
|
||||||
|
|
||||||
### check if it is an eye of yourself
|
### forbid global isomorphous
|
||||||
### assumptions : notice that this judgement requires that the state is an endgame
|
if self._check_global_isomorphous(color, vertex):
|
||||||
if self._is_eye(color, vertex):
|
|
||||||
return False
|
return False
|
||||||
|
|
||||||
if self._check_global_isomorphous(color, vertex):
|
if not self.knowledge_prunning(color, vertex):
|
||||||
return False
|
return False
|
||||||
|
|
||||||
return True
|
return True
|
||||||
@ -206,17 +211,17 @@ class GoEnv:
|
|||||||
color = utils.BLACK
|
color = utils.BLACK
|
||||||
else:
|
else:
|
||||||
color = utils.WHITE
|
color = utils.WHITE
|
||||||
if action == self.size ** 2:
|
if action == self.game.size ** 2:
|
||||||
vertex = utils.PASS
|
vertex = utils.PASS
|
||||||
else:
|
else:
|
||||||
vertex = (action % self.size + 1, action / self.size + 1)
|
vertex = (action % self.game.size + 1, action / self.game.size + 1)
|
||||||
# print(vertex)
|
# print(vertex)
|
||||||
# print(self.board)
|
# print(self.board)
|
||||||
self.board = (state[:, :, :, 7] - state[:, :, :, 15]).reshape(-1).tolist()
|
self.board = (state[:, :, :, 7] - state[:, :, :, 15]).reshape(-1).tolist()
|
||||||
self.do_move(color, vertex)
|
self.do_move(color, vertex)
|
||||||
new_state = np.concatenate(
|
new_state = np.concatenate(
|
||||||
[state[:, :, :, 1:8], (np.array(self.board) == utils.BLACK).reshape(1, self.size, self.size, 1),
|
[state[:, :, :, 1:8], (np.array(self.board) == utils.BLACK).reshape(1, self.game.size, self.game.size, 1),
|
||||||
state[:, :, :, 9:16], (np.array(self.board) == utils.WHITE).reshape(1, self.size, self.size, 1),
|
state[:, :, :, 9:16], (np.array(self.board) == utils.WHITE).reshape(1, self.game.size, self.game.size, 1),
|
||||||
np.array(1 - state[:, :, :, -1]).reshape(1, self.size, self.size, 1)],
|
np.array(1 - state[:, :, :, -1]).reshape(1, self.game.size, self.game.size, 1)],
|
||||||
axis=3)
|
axis=3)
|
||||||
return new_state, 0
|
return new_state, 0
|
||||||
|
@ -75,7 +75,7 @@ class UCTNode(MCTSNode):
|
|||||||
start_time = time.time()
|
start_time = time.time()
|
||||||
self.mask = []
|
self.mask = []
|
||||||
for act in range(self.action_num - 1):
|
for act in range(self.action_num - 1):
|
||||||
if not simulator.is_valid(self.state, act):
|
if not simulator.simulate_is_valid(self.state, act):
|
||||||
self.mask.append(act)
|
self.mask.append(act)
|
||||||
self.ucb[act] = -float("Inf")
|
self.ucb[act] = -float("Inf")
|
||||||
else:
|
else:
|
||||||
|
Loading…
x
Reference in New Issue
Block a user