Tianshou/AlphaGo/game.py

132 lines
4.7 KiB
Python
Raw Normal View History

2017-11-26 13:36:52 +08:00
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
# $File: game.py
2017-12-01 01:37:55 +08:00
# $Date: Fri Dec 01 01:3738 2017 +0800
2017-11-26 13:36:52 +08:00
# $Author: renyong15 © <mails.tsinghua.edu.cn>
#
2017-11-28 17:00:10 +08:00
from __future__ import print_function
2017-11-26 13:36:52 +08:00
import utils
2017-11-28 17:00:10 +08:00
import copy
import tensorflow as tf
2017-12-05 23:17:20 +08:00
import numpy as np
2017-12-20 01:14:05 +08:00
import sys, os
2017-12-20 16:43:42 +08:00
import model
2017-11-28 17:00:10 +08:00
from collections import deque
2017-12-20 01:14:05 +08:00
sys.path.append(os.path.join(os.path.dirname(__file__), os.path.pardir))
from tianshou.core.mcts.mcts import MCTS
2017-11-28 17:00:10 +08:00
2017-12-23 13:05:25 +08:00
import go
import reversi
2017-12-24 01:07:46 +08:00
import time
2017-12-23 13:05:25 +08:00
2017-11-26 13:36:52 +08:00
class Game:
'''
Load the real game and trained weights.
2017-12-26 13:17:46 +08:00
TODO : Maybe merge with the engine class in future,
currently leave it untouched for interacting with Go UI.
'''
def __init__(self, name=None, role=None, debug=False, checkpoint_path=None):
2017-12-23 13:05:25 +08:00
self.name = name
2017-12-28 19:28:21 +08:00
if role is None:
raise ValueError("Need a role!")
2017-12-24 01:07:46 +08:00
self.role = role
self.debug = debug
2017-12-23 14:45:07 +08:00
if self.name == "go":
2017-12-23 13:05:25 +08:00
self.size = 9
self.komi = 3.75
2017-12-23 14:45:07 +08:00
self.history_length = 8
self.history = []
2017-12-24 01:07:46 +08:00
self.game_engine = go.Go(size=self.size, komi=self.komi, role=self.role)
2017-12-24 14:40:50 +08:00
self.board = [utils.EMPTY] * (self.size ** 2)
2017-12-23 14:45:07 +08:00
elif self.name == "reversi":
2017-12-23 13:05:25 +08:00
self.size = 8
2017-12-23 14:45:07 +08:00
self.history_length = 1
2017-12-24 14:40:50 +08:00
self.history = []
self.game_engine = reversi.Reversi(size=self.size)
2017-12-23 13:05:25 +08:00
self.board = self.game_engine.get_board()
else:
2017-12-23 14:45:07 +08:00
raise ValueError(name + " is an unknown game...")
self.evaluator = model.ResNet(self.size, self.size ** 2 + 1, history_length=self.history_length,
checkpoint_path=checkpoint_path)
2017-12-24 14:40:50 +08:00
self.latest_boards = deque(maxlen=self.history_length)
for _ in range(self.history_length):
self.latest_boards.append(self.board)
2017-12-01 01:37:55 +08:00
2017-11-26 13:36:52 +08:00
def clear(self):
2017-12-24 14:40:50 +08:00
if self.name == "go":
self.board = [utils.EMPTY] * (self.size ** 2)
self.history = []
if self.name == "reversi":
self.board = self.game_engine.get_board()
2017-12-23 14:45:07 +08:00
for _ in range(self.history_length):
self.latest_boards.append(self.board)
2017-11-26 13:36:52 +08:00
def set_size(self, n):
self.size = n
self.clear()
def set_komi(self, k):
self.komi = k
def think(self, latest_boards, color):
2017-12-24 01:07:46 +08:00
mcts = MCTS(self.game_engine, self.evaluator, [latest_boards, color],
self.size ** 2 + 1, role=self.role, debug=self.debug, inverse=True)
2017-12-23 22:05:34 +08:00
mcts.search(max_step=100)
if self.debug:
file = open("mcts_debug.log", 'ab')
2017-12-26 15:07:15 +08:00
np.savetxt(file, mcts.root.Q, header="\n" + self.role + " Q value : ", fmt='%.4f', newline=", ")
np.savetxt(file, mcts.root.W, header="\n" + self.role + " W value : ", fmt='%.4f', newline=", ")
np.savetxt(file, mcts.root.N, header="\n" + self.role + " N value : ", fmt="%d", newline=", ")
np.savetxt(file, mcts.root.prior, header="\n" + self.role + " prior : ", fmt='%.4f', newline=", ")
file.close()
temp = 1
prob = mcts.root.N ** temp / np.sum(mcts.root.N ** temp)
choice = np.random.choice(self.size ** 2 + 1, 1, p=prob).tolist()[0]
if choice == self.size ** 2:
move = utils.PASS
else:
move = self.game_engine._deflatten(choice)
return move, prob
2017-11-28 17:00:10 +08:00
def play_move(self, color, vertex):
# this function can be called directly to play the opponent's move
2017-11-26 13:36:52 +08:00
if vertex == utils.PASS:
return True
res = self.game_engine.executor_do_move(self.history, self.latest_boards, self.board, color, vertex)
2017-11-28 17:00:10 +08:00
return res
2017-11-26 13:36:52 +08:00
def think_play_move(self, color):
2017-12-20 16:43:42 +08:00
# although we don't need to return self.prob, however it is needed for neural network training
move, self.prob = self.think(self.latest_boards, color)
# play the move immediately
self.play_move(color, move)
2017-11-26 13:36:52 +08:00
return move
2017-11-28 17:00:10 +08:00
def status2symbol(self, s):
2017-12-04 11:01:49 +08:00
pool = {utils.WHITE: 'O', utils.EMPTY: '.', utils.BLACK: 'X', utils.FILL: 'F', utils.UNKNOWN: '?'}
2017-11-28 17:00:10 +08:00
return pool[s]
def show_board(self):
row = [i for i in range(1, 20)]
col = ' abcdefghijklmnopqrstuvwxyz'
print(' ', end='')
for j in range(self.size + 1):
print(col[j], end=' ')
print('')
for i in range(self.size):
print(row[i], end=' ')
if row[i] < 10:
print(' ', end='')
for j in range(self.size):
2017-12-24 14:40:50 +08:00
print(self.status2symbol(self.board[self.game_engine._flatten((j + 1, i + 1))]), end=' ')
2017-11-28 17:00:10 +08:00
print('')
2017-12-07 21:05:29 +08:00
sys.stdout.flush()
2017-11-28 17:00:10 +08:00
if __name__ == "__main__":
2017-12-28 19:28:21 +08:00
game = Game(name="reversi", role="black", checkpoint_path=None)
game.debug = True
game.think_play_move(utils.BLACK)