360 lines
16 KiB
Python
360 lines
16 KiB
Python
import tensorflow as tf
|
||
from cup import Cup
|
||
import numpy as np
|
||
from board import Board
|
||
import os
|
||
import time
|
||
import sys
|
||
import random
|
||
from eval import Eval
|
||
|
||
class Network:
|
||
hidden_size = 40
|
||
input_size = 196
|
||
output_size = 1
|
||
# Can't remember the best learning_rate, look this up
|
||
learning_rate = 0.1
|
||
|
||
# TODO: Actually compile tensorflow properly
|
||
#os.environ["TF_CPP_MIN_LOG_LEVEL"]="2"
|
||
|
||
def custom_tanh(self, x, name=None):
|
||
return tf.scalar_mul(tf.constant(2.00), tf.tanh(x, name))
|
||
|
||
def __init__(self, config, name):
|
||
self.config = config
|
||
self.session = tf.Session()
|
||
self.checkpoint_path = config['model_path']
|
||
self.name = name
|
||
|
||
# input = x
|
||
self.x = tf.placeholder('float', [1, Network.input_size], name='x')
|
||
self.value_next = tf.placeholder('float', [1, Network.output_size], name="value_next")
|
||
|
||
xavier_init = tf.contrib.layers.xavier_initializer()
|
||
|
||
W_1 = tf.get_variable("w_1", (Network.input_size, Network.hidden_size),
|
||
initializer=xavier_init)
|
||
W_2 = tf.get_variable("w_2", (Network.hidden_size, Network.output_size),
|
||
initializer=xavier_init)
|
||
|
||
b_1 = tf.get_variable("b_1", (Network.hidden_size,),
|
||
initializer=tf.zeros_initializer)
|
||
b_2 = tf.get_variable("b_2", (Network.output_size,),
|
||
initializer=tf.zeros_initializer)
|
||
|
||
value_after_input = tf.sigmoid(tf.matmul(self.x, W_1) + b_1, name='hidden_layer')
|
||
|
||
|
||
|
||
|
||
self.value = tf.sigmoid(tf.matmul(value_after_input, W_2) + b_2, name='output_layer')
|
||
|
||
# tf.reduce_sum basically finds the sum of its input, so this gives the
|
||
# difference between the two values, in case they should be lists, which
|
||
# they might be if our input changes
|
||
|
||
# TODO: Alexander thinks that self.value will be computed twice (instead of once)
|
||
difference_in_values = tf.reduce_sum(tf.subtract(self.value_next, self.value, name='difference'))
|
||
|
||
trainable_vars = tf.trainable_variables()
|
||
gradients = tf.gradients(self.value, trainable_vars)
|
||
|
||
apply_gradients = []
|
||
|
||
with tf.variable_scope('apply_gradients'):
|
||
for gradient, trainable_var in zip(gradients, trainable_vars):
|
||
# Hopefully this is Δw_t = α(V_t+1 - V_t)▿_wV_t.
|
||
backprop_calc = Network.learning_rate * difference_in_values * gradient
|
||
grad_apply = trainable_var.assign_add(backprop_calc)
|
||
apply_gradients.append(grad_apply)
|
||
|
||
self.training_op = tf.group(*apply_gradients, name='training_op')
|
||
|
||
self.saver = tf.train.Saver(max_to_keep=1)
|
||
self.session.run(tf.global_variables_initializer())
|
||
|
||
self.restore_model()
|
||
|
||
def eval_state(self, state):
|
||
# Run state through a network
|
||
|
||
# Remember to create placeholders for everything because wtf tensorflow
|
||
# and graphs
|
||
|
||
# Remember to create the dense layers
|
||
|
||
# Figure out a way of giving a layer a custom activiation function (we
|
||
# want something which gives [-2,2]. Naively tahn*2, however I fell this
|
||
# is wrong.
|
||
|
||
# tf.group, groups a bunch of actions, so calculate the different
|
||
# gradients for the different weights, by using tf.trainable_variables()
|
||
# to find all variables and tf.gradients(current_value,
|
||
# trainable_variables) to find all the gradients. We can then loop
|
||
# through this and calculate the trace for each gradient and variable
|
||
# pair (note, zip can be used to combine the two lists found before),
|
||
# and then we can calculate the overall change in weights, based on the
|
||
# formula listed in tesauro (learning_rate * difference_in_values *
|
||
# trace), this calculation can be assigned to a tf variable and put in a
|
||
# list and then this can be grouped into a single operation, essentially
|
||
# building our own backprop function.
|
||
|
||
# Grouping them is done by
|
||
# tf.group(*the_gradients_from_before_we_want_to_apply,
|
||
# name="training_op")
|
||
|
||
# If we remove the eligibily trace to begin with, we only have to
|
||
# implement learning_rate * (difference_in_values) * gradients (the
|
||
# before-mentioned calculation.
|
||
|
||
|
||
# print("Network is evaluating")
|
||
val = self.session.run(self.value, feed_dict={self.x: state})
|
||
#print("eval ({})".format(self.name), state, val, sep="\n")
|
||
return val
|
||
|
||
def save_model(self, episode_count):
|
||
self.saver.save(self.session, os.path.join(self.checkpoint_path, 'model.ckpt'))
|
||
with open(os.path.join(self.checkpoint_path, "episodes_trained"), 'w+') as f:
|
||
print("[NETWK] ({name}) Saving model to:".format(name = self.name),
|
||
os.path.join(self.checkpoint_path, 'model.ckpt'))
|
||
f.write(str(episode_count) + "\n")
|
||
|
||
def restore_model(self):
|
||
if os.path.isfile(os.path.join(self.checkpoint_path, 'model.ckpt.index')):
|
||
latest_checkpoint = tf.train.latest_checkpoint(self.checkpoint_path)
|
||
print("[NETWK] ({name}) Restoring model from:".format(name = self.name),
|
||
str(latest_checkpoint))
|
||
self.saver.restore(self.session, latest_checkpoint)
|
||
variables_names = [v.name for v in tf.trainable_variables()]
|
||
values = self.session.run(variables_names)
|
||
for k, v in zip(variables_names, values):
|
||
print("Variable: ", k)
|
||
print("Shape: ", v.shape)
|
||
print(v)
|
||
|
||
# Restore trained episode count for model
|
||
episode_count_path = os.path.join(self.checkpoint_path, "episodes_trained")
|
||
if os.path.isfile(episode_count_path):
|
||
with open(episode_count_path, 'r') as f:
|
||
self.config['start_episode'] = int(f.read())
|
||
|
||
# Have a circular dependency, #fuck, need to rewrite something
|
||
def adjust_weights(self, board, v_next):
|
||
# print("lol")
|
||
board = np.array(board).reshape((1,-1))
|
||
self.session.run(self.training_op, feed_dict = { self.x: board,
|
||
self.value_next: v_next })
|
||
|
||
|
||
# while game isn't done:
|
||
#x_next = g.next_move()
|
||
#value_next = network.eval_state(x_next)
|
||
#self.session.run(self.training_op, feed_dict={self.x: x, self.value_next: value_next})
|
||
#x = x_next
|
||
|
||
|
||
|
||
def make_move(self, board, roll):
|
||
# print(Board.pretty(board))
|
||
legal_moves = Board.calculate_legal_states(board, 1, roll)
|
||
moves_and_scores = [ (move, self.eval_state(np.array(Board.map_to_tesauro(move)).reshape(1,-1))) for move in legal_moves ]
|
||
scores = [ x[1] for x in moves_and_scores ]
|
||
best_score_index = np.array(scores).argmax()
|
||
best_move_pair = moves_and_scores[best_score_index]
|
||
#print("Found the best state, being:", np.array(move_scores).argmax())
|
||
return best_move_pair
|
||
|
||
|
||
def train_model(self, episodes=1000, save_step_size = 100, trained_eps = 0):
|
||
start_time = time.time()
|
||
|
||
def print_time_estimate(eps_completed):
|
||
cur_time = time.time()
|
||
time_diff = cur_time - start_time
|
||
eps_per_sec = eps_completed / time_diff
|
||
secs_per_ep = time_diff / eps_completed
|
||
eps_remaining = (episodes - eps_completed)
|
||
sys.stderr.write("[TRAIN] Averaging {per_sec} episodes per second\n".format(per_sec = round(eps_per_sec, 2)))
|
||
sys.stderr.write("[TRAIN] {eps_remaining} episodes remaining; approx. {time_remaining} seconds remaining\n".format(eps_remaining = eps_remaining, time_remaining = int(eps_remaining * secs_per_ep)))
|
||
|
||
|
||
sys.stderr.write("[TRAIN] Training {} episodes and save_step_size {}\n".format(episodes, save_step_size))
|
||
outcomes = []
|
||
for episode in range(1, episodes + 1):
|
||
sys.stderr.write("[TRAIN] Episode {}".format(episode + trained_eps))
|
||
# print("greerggeregr"*10000)
|
||
# TODO decide which player should be here
|
||
player = 1
|
||
|
||
roll = (random.randrange(1,7), random.randrange(1,7))
|
||
|
||
def tesaurofi(board):
|
||
return Board.map_to_tesauro(board)
|
||
|
||
prev_board, _ = self.make_move(Board.flip(Board.initial_state) if player == -1 else Board.initial_state, roll)
|
||
|
||
if player == -1:
|
||
prev_board = Board.flip(prev_board)
|
||
|
||
# print("board:",prev_board)
|
||
# print(len(prev_board))
|
||
|
||
# find the best move here, make this move, then change turn as the
|
||
# first thing inside of the while loop and then call
|
||
# best_move_and_score to get V_t+1
|
||
|
||
# i = 0
|
||
while Board.outcome(prev_board) is None:
|
||
#print(prev_board)
|
||
|
||
# print("-"*30)
|
||
# print(i)
|
||
# print(roll)
|
||
# print(Board.pretty(prev_board))
|
||
# print("/"*30)
|
||
# i += 1
|
||
|
||
player *= -1
|
||
roll = (random.randrange(1,7), random.randrange(1,7))
|
||
|
||
cur_board, cur_board_value = self.make_move(Board.flip(prev_board) if player == -1 else prev_board, roll)
|
||
#print("pls",cur_board_value)
|
||
if player == -1:
|
||
cur_board = Board.flip(cur_board)
|
||
|
||
self.adjust_weights(tesaurofi(prev_board), cur_board_value)
|
||
|
||
prev_board = cur_board
|
||
|
||
final_board = prev_board
|
||
sys.stderr.write("\t outcome {}".format(Board.outcome(final_board)[1]))
|
||
outcomes.append(Board.outcome(final_board)[1])
|
||
final_score = np.array([ Board.outcome(final_board)[1] ])
|
||
self.adjust_weights(tesaurofi(prev_board), final_score.reshape((1, 1)))
|
||
|
||
sys.stderr.write("\n")
|
||
|
||
if episode % min(save_step_size, episodes) == 0:
|
||
sys.stderr.write("[TRAIN] Saving model...\n")
|
||
self.save_model(episode+trained_eps)
|
||
|
||
if episode % 50 == 0:
|
||
print_time_estimate(episode)
|
||
|
||
sys.stderr.write("[TRAIN] Saving model for final episode...\n")
|
||
self.save_model(episode+trained_eps)
|
||
|
||
return outcomes
|
||
|
||
|
||
# take turn, which finds the best state and picks it, based on the current network
|
||
# save current state
|
||
# run training operation (session.run(self.training_op, {x:x, value_next, value_next})), (something which does the backprop, based on the state after having taken a turn, found before, and the state we saved in the beginning and from now we'll save it at the end of the turn
|
||
# save the current state again, so we can continue running backprop based on the "previous" turn.
|
||
|
||
# NOTE: We need to make a method so that we can take a single turn or at least just pick the next best move, so we know how to evaluate according to TD-learning. Right now, our game just continues in a while loop without nothing to stop it!
|
||
|
||
|
||
|
||
def eval(self, trained_eps = 0):
|
||
def do_eval(method, episodes = 1000, trained_eps = 0):
|
||
start_time = time.time()
|
||
|
||
def print_time_estimate(eps_completed):
|
||
cur_time = time.time()
|
||
time_diff = cur_time - start_time
|
||
eps_per_sec = eps_completed / time_diff
|
||
secs_per_ep = time_diff / eps_completed
|
||
eps_remaining = (episodes - eps_completed)
|
||
sys.stderr.write("[EVAL ] Averaging {per_sec} episodes per second\n".format(per_sec = round(eps_per_sec, 2)))
|
||
sys.stderr.write("[EVAL ] {eps_remaining} episodes remaining; approx. {time_remaining} seconds remaining\n".format(eps_remaining = eps_remaining, time_remaining = int(eps_remaining * secs_per_ep)))
|
||
|
||
sys.stderr.write("[EVAL ] Evaluating {eps} episode(s) with method '{method}'\n".format(eps=episodes, method=method))
|
||
|
||
if method == 'random':
|
||
outcomes = []
|
||
for i in range(1, episodes + 1):
|
||
sys.stderr.write("[EVAL ] Episode {}".format(i))
|
||
board = Board.initial_state
|
||
while Board.outcome(board) is None:
|
||
roll = (random.randrange(1,7), random.randrange(1,7))
|
||
board = (self.p1.make_move(board, self.p1.get_sym(), roll))[0]
|
||
roll = (random.randrange(1,7), random.randrange(1,7))
|
||
board = Board.flip(Eval.make_random_move(Board.flip(board), 1, roll))
|
||
sys.stderr.write("\t outcome {}".format(Board.outcome(board)[1]))
|
||
outcomes.append(Board.outcome(board)[1])
|
||
sys.stderr.write("\n")
|
||
|
||
if i % 50 == 0:
|
||
print_time_estimate(i)
|
||
return outcomes
|
||
elif method == 'pubeval':
|
||
outcomes = []
|
||
# Add the evaluation code for pubeval, the bot has a method make_pubeval_move(board, sym, roll), which can be used to get the best move according to pubeval
|
||
for i in range(1, episodes + 1):
|
||
sys.stderr.write("[EVAL ] Episode {}".format(i))
|
||
board = Board.initial_state
|
||
#print("init:", board, sep="\n")
|
||
while Board.outcome(board) is None:
|
||
#print("-"*30)
|
||
roll = (random.randrange(1,7), random.randrange(1,7))
|
||
#print(roll)
|
||
|
||
prev_board = tuple(board)
|
||
board = (self.make_move(board, roll))[0]
|
||
#print("post p1:", board, sep="\n")
|
||
|
||
#print("."*30)
|
||
roll = (random.randrange(1,7), random.randrange(1,7))
|
||
#print(roll)
|
||
|
||
prev_board = tuple(board)
|
||
board = Eval.make_pubeval_move(board, -1, roll)[0][0:26]
|
||
#print("post pubeval:", board, sep="\n")
|
||
|
||
|
||
#print("*"*30)
|
||
#print(board)
|
||
#print("+"*30)
|
||
sys.stderr.write("\t outcome {}".format(Board.outcome(board)[1]))
|
||
outcomes.append(Board.outcome(board)[1])
|
||
sys.stderr.write("\n")
|
||
|
||
if i % 10 == 0:
|
||
print_time_estimate(i)
|
||
|
||
return outcomes
|
||
# elif method == 'dumbmodel':
|
||
# config_prime = self.config.copy()
|
||
# config_prime['model_path'] = os.path.join(config_prime['model_storage_path'], 'dumbmodel')
|
||
# eval_bot = Bot(1, config = config_prime, name = "dumbmodel")
|
||
# #print(self.config, "\n", config_prime)
|
||
# outcomes = []
|
||
# for i in range(1, episodes + 1):
|
||
# sys.stderr.write("[EVAL ] Episode {}".format(i))
|
||
# board = Board.initial_state
|
||
# while Board.outcome(board) is None:
|
||
# roll = (random.randrange(1,7), random.randrange(1,7))
|
||
# board = (self.make_move(board, self.p1.get_sym(), roll))[0]
|
||
|
||
# roll = (random.randrange(1,7), random.randrange(1,7))
|
||
# board = Board.flip(eval_bot.make_move(Board.flip(board), self.p1.get_sym(), roll)[0])
|
||
# sys.stderr.write("\t outcome {}".format(Board.outcome(board)[1]))
|
||
# outcomes.append(Board.outcome(board)[1])
|
||
# sys.stderr.write("\n")
|
||
|
||
# if i % 50 == 0:
|
||
# print_time_estimate(i)
|
||
# return outcomes
|
||
else:
|
||
sys.stderr.write("[EVAL ] Evaluation method '{}' is not defined\n".format(method))
|
||
return [0]
|
||
|
||
return [ (method, do_eval(method,
|
||
self.config['episode_count'],
|
||
trained_eps = trained_eps))
|
||
for method
|
||
in self.config['eval_methods'] ]
|