Tianshou/AlphaGo/game.py

135 lines
4.6 KiB
Python
Raw Normal View History

2017-11-26 13:36:52 +08:00
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
# $File: game.py
2017-12-01 01:37:55 +08:00
# $Date: Fri Dec 01 01:3738 2017 +0800
2017-11-26 13:36:52 +08:00
# $Author: renyong15 © <mails.tsinghua.edu.cn>
#
2017-11-28 17:00:10 +08:00
from __future__ import print_function
2017-11-26 13:36:52 +08:00
import utils
2017-11-28 17:00:10 +08:00
import copy
import tensorflow as tf
2017-12-05 23:17:20 +08:00
import numpy as np
2017-12-07 21:05:29 +08:00
import sys
import go
import network_small
import strategy
2017-11-28 17:00:10 +08:00
from collections import deque
from tianshou.core.mcts.mcts import MCTS
2017-11-28 17:00:10 +08:00
2017-11-26 13:36:52 +08:00
import Network
#from strategy import strategy
2017-11-26 13:36:52 +08:00
class Game:
'''
Load the real game and trained weights.
TODO : Maybe merge with the engine class in future,
currently leave it untouched for interacting with Go UI.
'''
2017-12-09 21:41:11 +08:00
def __init__(self, size=9, komi=6.5, checkpoint_path=None):
2017-11-26 13:36:52 +08:00
self.size = size
2017-12-04 11:01:49 +08:00
self.komi = komi
2017-12-19 11:16:17 +08:00
self.board = [utils.EMPTY] * (self.size ** 2)
2017-11-28 17:00:10 +08:00
self.history = []
self.latest_boards = deque(maxlen=8)
2017-12-08 17:05:33 +08:00
for _ in range(8):
self.latest_boards.append(self.board)
2017-11-26 13:36:52 +08:00
self.executor = go.Go(game=self)
#self.strategy = strategy(checkpoint_path)
self.simulator = strategy.GoEnv(game=self)
self.net = network_small.Network()
self.sess = self.net.forward(checkpoint_path)
self.evaluator = lambda state: self.sess.run([tf.nn.softmax(self.net.p), self.net.v],
feed_dict={self.net.x: state, self.net.is_training: False})
2017-11-26 13:36:52 +08:00
def _flatten(self, vertex):
x, y = vertex
2017-12-19 15:09:46 +08:00
return (x - 1) * self.size + (y - 1)
2017-11-26 13:36:52 +08:00
2017-12-01 01:37:55 +08:00
def _deflatten(self, idx):
2017-12-19 15:09:46 +08:00
x = idx // self.size + 1
y = idx % self.size + 1
return (x, y)
2017-12-01 01:37:55 +08:00
2017-11-26 13:36:52 +08:00
def clear(self):
2017-12-19 11:16:17 +08:00
self.board = [utils.EMPTY] * (self.size ** 2)
2017-12-08 17:05:33 +08:00
self.history = []
for _ in range(8):
self.latest_boards.append(self.board)
2017-11-26 13:36:52 +08:00
def set_size(self, n):
self.size = n
self.clear()
def set_komi(self, k):
self.komi = k
2017-12-19 11:16:17 +08:00
def generate_nn_input(self, latest_boards, color):
state = np.zeros([1, self.size, self.size, 17])
for i in range(8):
2017-12-19 11:16:17 +08:00
state[0, :, :, i] = np.array(np.array(latest_boards[i]) == np.ones(self.size ** 2)).reshape(self.size, self.size)
state[0, :, :, i + 8] = np.array(np.array(latest_boards[i]) == -np.ones(self.size ** 2)).reshape(self.size, self.size)
if color == utils.BLACK:
state[0, :, :, 16] = np.ones([self.size, self.size])
if color == utils.WHITE:
state[0, :, :, 16] = np.zeros([self.size, self.size])
return state
def think(self, latest_boards, color):
# TODO : using copy is right, or should we change to deepcopy?
2017-12-19 11:16:17 +08:00
self.simulator.simulate_latest_boards = copy.copy(latest_boards)
self.simulator.simulate_board = copy.copy(latest_boards[-1])
nn_input = self.generate_nn_input(self.simulator.simulate_latest_boards, color)
mcts = MCTS(self.simulator, self.evaluator, nn_input, self.size ** 2 + 1, inverse=True, max_step=1)
temp = 1
prob = mcts.root.N ** temp / np.sum(mcts.root.N ** temp)
choice = np.random.choice(self.size ** 2 + 1, 1, p=prob).tolist()[0]
if choice == self.size ** 2:
move = utils.PASS
else:
2017-12-19 15:09:46 +08:00
move = self._deflatten(choice)
return move, prob
2017-11-28 17:00:10 +08:00
def play_move(self, color, vertex):
# this function can be called directly to play the opponent's move
2017-11-26 13:36:52 +08:00
if vertex == utils.PASS:
return True
res = self.executor.executor_do_move(color, vertex)
2017-11-28 17:00:10 +08:00
return res
2017-11-26 13:36:52 +08:00
def think_play_move(self, color):
# although we dont need to return self.prob, however it is needed for neural network training
move, self.prob = self.think(self.latest_boards, color)
# play the move immediately
self.play_move(color, move)
2017-11-26 13:36:52 +08:00
return move
2017-11-28 17:00:10 +08:00
def status2symbol(self, s):
2017-12-04 11:01:49 +08:00
pool = {utils.WHITE: 'O', utils.EMPTY: '.', utils.BLACK: 'X', utils.FILL: 'F', utils.UNKNOWN: '?'}
2017-11-28 17:00:10 +08:00
return pool[s]
def show_board(self):
row = [i for i in range(1, 20)]
col = ' abcdefghijklmnopqrstuvwxyz'
print(' ', end='')
for j in range(self.size + 1):
print(col[j], end=' ')
print('')
for i in range(self.size):
print(row[i], end=' ')
if row[i] < 10:
print(' ', end='')
for j in range(self.size):
print(self.status2symbol(self.board[self._flatten((j + 1, i + 1))]), end=' ')
print('')
2017-12-07 21:05:29 +08:00
sys.stdout.flush()
2017-11-28 17:00:10 +08:00
if __name__ == "__main__":
g = Game()
g.show_board()
#file = open("debug.txt", "a")
#file.write("mcts check\n")
#file.close()