backgammon/network.py

538 lines
20 KiB
Python
Raw Permalink Normal View History

2018-03-04 16:35:36 +00:00
import tensorflow as tf
import numpy as np
from board import Board
import os
2018-03-20 12:03:21 +00:00
import time
import sys
import random
2018-03-20 12:17:38 +00:00
from eval import Eval
2018-04-19 13:22:00 +00:00
import glob
2018-04-22 13:07:19 +00:00
from operator import itemgetter
import tensorflow.contrib.eager as tfe
from player import Player
2018-03-08 15:27:16 +00:00
class Network:
# board_features_quack has size 28
# board_features_quack_fat has size 30
# board_features_tesauro has size 198
board_reps = {
2018-05-10 13:28:33 +00:00
'quack-fat' : (30, Board.board_features_quack_fat),
'quack' : (28, Board.board_features_quack),
'tesauro' : (198, Board.board_features_tesauro),
'quack-norm' : (30, Board.board_features_quack_norm),
2018-05-22 13:10:41 +00:00
'tesauro-fat' : (726, Board.board_features_tesauro_fat),
2018-05-10 13:28:33 +00:00
'tesauro-poop': (198, Board.board_features_tesauro_wrong)
}
def custom_tanh(self, x, name=None):
2018-03-14 19:42:09 +00:00
return tf.scalar_mul(tf.constant(2.00), tf.tanh(x, name))
2018-03-20 12:03:21 +00:00
def __init__(self, config, name):
2018-05-10 13:28:33 +00:00
"""
:param config:
:param name:
"""
move_options = {
'1': self.make_move_1_ply,
'0': self.make_move_0_ply
}
2018-05-22 13:10:41 +00:00
self.max_or_min = {
1: np.argmax,
-1: np.argmin
}
tf.enable_eager_execution()
xavier_init = tf.contrib.layers.xavier_initializer()
2018-03-08 15:27:16 +00:00
self.config = config
self.checkpoint_path = os.path.join(config['model_storage_path'], config['model'])
2018-03-22 14:30:47 +00:00
2018-03-14 19:42:09 +00:00
self.name = name
2018-03-22 14:30:47 +00:00
self.make_move = move_options[
self.config['ply']
]
# Set board representation from config
self.input_size, self.board_trans_func = Network.board_reps[
self.config['board_representation']
]
self.output_size = 1
self.hidden_size = 40
2018-04-14 21:11:20 +00:00
self.max_learning_rate = 0.1
self.min_learning_rate = 0.001
2018-03-22 14:30:47 +00:00
# Restore trained episode count for model
episode_count_path = os.path.join(self.checkpoint_path, "episodes_trained")
if os.path.isfile(episode_count_path):
with open(episode_count_path, 'r') as f:
self.episodes_trained = int(f.read())
else:
self.episodes_trained = 0
global_step_path = os.path.join(self.checkpoint_path, "global_step")
if os.path.isfile(global_step_path):
with open(global_step_path, 'r') as f:
self.global_step = int(f.read())
else:
self.global_step = 0
self.model = tf.keras.Sequential([
tf.keras.layers.Dense(40, activation="sigmoid", kernel_initializer=xavier_init,
input_shape=(1,self.input_size)),
tf.keras.layers.Dense(1, activation="sigmoid", kernel_initializer=xavier_init)
])
2018-03-04 16:35:36 +00:00
2018-05-10 13:28:33 +00:00
def exp_decay(self, max_lr, global_step, decay_rate, decay_steps):
"""
Calculates the exponential decay on a learning rate
:param max_lr: The learning rate that the network starts at
:param global_step: The global step
:param decay_rate: The rate at which the learning rate should decay
:param decay_steps: The amount of steps between each decay
:return: The result of the exponential decay performed on the learning rate
"""
2018-05-18 12:55:10 +00:00
res = max_lr * decay_rate ** (global_step // decay_steps)
return res
2018-03-04 16:35:36 +00:00
def do_backprop(self, prev_state, value_next):
2018-05-10 13:28:33 +00:00
"""
Performs the Temporal-difference backpropagation step on the model
:param prev_state: The previous state of the game, this has its value recalculated
:param value_next: The value of the current move
:return: Nothing, the calculation is performed on the model of the network
"""
self.learning_rate = tf.maximum(self.min_learning_rate,
2018-05-18 12:55:10 +00:00
self.exp_decay(self.max_learning_rate, self.global_step, 0.96, 50000),
name="learning_rate")
2018-05-22 13:10:41 +00:00
with tf.GradientTape() as tape:
value = self.model(prev_state.reshape(1,-1))
grads = tape.gradient(value, self.model.variables)
difference_in_values = tf.reshape(tf.subtract(value_next, value, name='difference_in_values'), [])
tf.summary.scalar("difference_in_values", tf.abs(difference_in_values))
2018-03-04 16:35:36 +00:00
with tf.variable_scope('apply_gradients'):
for grad, train_var in zip(grads, self.model.variables):
backprop_calc = self.learning_rate * difference_in_values * grad
train_var.assign_add(backprop_calc)
2018-04-19 13:22:00 +00:00
2018-03-04 16:35:36 +00:00
def print_variables(self):
2018-05-10 13:28:33 +00:00
"""
Prints all the variables of the model
:return:
"""
variables = self.model.variables
for k in variables:
print(k)
def eval_state(self, state):
2018-05-10 13:28:33 +00:00
"""
Evaluates a single state
:param state:
:return:
"""
return self.model(state.reshape(1,-1))
def save_model(self, episode_count):
"""
2018-05-10 13:28:33 +00:00
Saves the model of the network, it references global_step as self.global_step
:param episode_count:
:return:
"""
2018-05-22 13:10:41 +00:00
tfe.Saver(self.model.variables).save(os.path.join(self.checkpoint_path, 'model.ckpt'))
2018-05-22 13:10:41 +00:00
with open(os.path.join(self.checkpoint_path, "episodes_trained"), 'w+') as f:
print("[NETWK] ({name}) Saving model to:".format(name=self.name),
os.path.join(self.checkpoint_path, 'model.ckpt'))
f.write(str(episode_count) + "\n")
with open(os.path.join(self.checkpoint_path, "global_step"), 'w+') as f:
print("[NETWK] ({name}) Saving global step to:".format(name=self.name),
os.path.join(self.checkpoint_path, 'model.ckpt'))
f.write(str(self.global_step) + "\n")
if self.config['verbose']:
self.print_variables()
def calc_vals(self, states):
2018-05-10 13:28:33 +00:00
"""
Calculate a score of each state in states
:param states: A number of states. The states have to be transformed before being given to this function.
2018-05-10 13:28:33 +00:00
:return:
"""
2018-05-18 12:55:10 +00:00
return self.model.predict_on_batch(states)
def restore_model(self):
"""
Restore a model for a session, such that a trained model and either be further trained or
used for evaluation
2018-05-18 12:55:10 +00:00
:return: Nothing. It's a side-effect that a model gets restored for the network.
"""
2018-04-19 13:22:00 +00:00
if glob.glob(os.path.join(self.checkpoint_path, 'model.ckpt*.index')):
2018-03-06 10:53:42 +00:00
latest_checkpoint = tf.train.latest_checkpoint(self.checkpoint_path)
print("[NETWK] ({name}) Restoring model from:".format(name=self.name),
2018-03-14 19:42:09 +00:00
str(latest_checkpoint))
tfe.Saver(self.model.variables).restore(latest_checkpoint)
2018-03-20 12:03:21 +00:00
# Restore trained episode count for model
episode_count_path = os.path.join(self.checkpoint_path, "episodes_trained")
2018-04-19 14:04:49 +00:00
if os.path.isfile(episode_count_path):
2018-03-20 12:03:21 +00:00
with open(episode_count_path, 'r') as f:
self.config['start_episode'] = int(f.read())
global_step_path = os.path.join(self.checkpoint_path, "global_step")
if os.path.isfile(global_step_path):
with open(global_step_path, 'r') as f:
self.config['global_step'] = int(f.read())
if self.config['verbose']:
self.print_variables()
2018-04-19 14:01:19 +00:00
def make_move_0_ply(self, board, roll, player):
"""
Find the best move given a board, roll and a player, by finding all possible states one can go to
and then picking the best, by using the network to evaluate each state. This is 0-ply, ie. no look-ahead.
The highest score is picked for the 1-player and the max(1-score) is picked for the -1-player.
:param board: Current board
:param roll: Current roll
:param player: Current player
:return: A pair of the best state to go to, together with the score of that state
"""
legal_moves = list(Board.calculate_legal_states(board, player, roll))
2018-05-11 19:24:10 +00:00
legal_states = np.array([self.board_trans_func(move, player)[0] for move in legal_moves])
scores = self.model.predict_on_batch(legal_states)
2018-05-22 13:10:41 +00:00
best_score_idx = self.max_or_min[player](scores)
2018-05-18 12:55:10 +00:00
best_move, best_score = legal_moves[best_score_idx], scores[best_score_idx]
2018-05-18 12:55:10 +00:00
return (best_move, best_score)
def make_move_1_ply(self, board, roll, player):
2018-05-10 13:28:33 +00:00
"""
Return the best board and best score based on a 1-ply look-ahead.
2018-05-10 13:28:33 +00:00
:param board:
:param roll:
:param player:
:return:
"""
2018-05-18 12:55:10 +00:00
start = time.time()
best_pair = self.calculate_1_ply(board, roll, player)
2018-05-20 14:52:05 +00:00
#print(time.time() - start)
return best_pair
2018-04-22 13:07:19 +00:00
def calculate_1_ply(self, board, roll, player):
2018-04-22 13:07:19 +00:00
"""
Find the best move based on a 1-ply look-ahead. First the x best moves are picked from a 0-ply and then
all moves and scores are found for them. The expected score is then calculated for each of the boards from the
0-ply.
2018-05-18 12:55:10 +00:00
2018-04-22 13:07:19 +00:00
:param board:
:param roll: The original roll
:param player: The current player
:return: Best possible move based on 1-ply look-ahead
2018-04-22 13:07:19 +00:00
"""
2018-04-29 10:14:14 +00:00
# find all legal states from the given board and the given roll
init_legal_states = Board.calculate_legal_states(board, player, roll)
2018-05-11 19:24:10 +00:00
legal_states = np.array([self.board_trans_func(state, player)[0] for state in init_legal_states])
2018-05-18 12:55:10 +00:00
scores = [ score.numpy()
for score
in self.calc_vals(legal_states) ]
moves_and_scores = list(zip(init_legal_states, scores))
2018-05-18 12:55:10 +00:00
sorted_moves_and_scores = sorted(moves_and_scores, key=itemgetter(1), reverse=(player == 1))
best_boards = [ x[0] for x in sorted_moves_and_scores[:10] ]
2018-05-22 13:10:41 +00:00
scores = self.do_ply(best_boards, player)
2018-05-22 13:10:41 +00:00
best_score_idx = self.max_or_min[player](scores)
# best_score_idx = np.array(trans_scores).argmax()
2018-05-18 12:55:10 +00:00
return (best_boards[best_score_idx], scores[best_score_idx])
def do_ply(self, boards, player):
"""
Calculates a single extra ply, resulting in a larger search space for our best move.
This is somewhat hardcoded to only do a single ply, seeing that it calls max on all scores, rather than
allowing the function to search deeper, which could result in an even larger search space. If we wish
to have more than 2-ply, this should be fixed, so we could extend this method to allow for 3-ply.
2018-04-29 10:14:14 +00:00
:param boards: The boards to try all rolls on
:param player: The player of the previous ply
:return: An array of scores where each index describes one of the boards which was given as param
to this function.
"""
2018-05-15 21:37:35 +00:00
all_rolls = [ (1, 1), (1, 2), (1, 3), (1, 4), (1, 5),
(1, 6), (2, 2), (2, 3), (2, 4), (2, 5),
(2, 6), (3, 3), (3, 4), (3, 5), (3, 6),
(4, 4), (4, 5), (4, 6), (5, 5), (5, 6),
(6, 6) ]
# start = time.time()
2018-05-15 21:37:35 +00:00
# print("/"*50)
length_list = []
2018-05-15 16:16:44 +00:00
test_list = []
2018-05-10 17:25:28 +00:00
# Prepping of data
2018-05-18 12:55:10 +00:00
start = time.time()
2018-05-15 21:37:35 +00:00
for board in boards:
length = 0
for roll in all_rolls:
2018-05-18 12:55:10 +00:00
all_states = Board.calculate_legal_states(board, player*-1, roll)
2018-05-10 17:06:53 +00:00
for state in all_states:
state = np.array(self.board_trans_func(state, player*-1)[0])
2018-05-15 16:16:44 +00:00
test_list.append(state)
2018-05-15 21:37:35 +00:00
length += 1
length_list.append(length)
2018-05-15 21:37:35 +00:00
# print(time.time() - start)
2018-05-15 16:16:44 +00:00
2018-05-22 13:10:41 +00:00
start = time.time()
2018-05-15 21:37:35 +00:00
2018-05-18 12:55:10 +00:00
all_scores = self.model.predict_on_batch(np.array(test_list))
2018-05-15 16:16:44 +00:00
split_scores = []
from_idx = 0
2018-05-15 21:37:35 +00:00
for length in length_list:
2018-05-18 12:55:10 +00:00
split_scores.append(all_scores[from_idx:from_idx+length])
2018-05-15 16:16:44 +00:00
from_idx += length
2018-05-15 17:29:27 +00:00
means_splits = [tf.reduce_mean(scores) for scores in split_scores]
2018-05-15 16:16:44 +00:00
2018-05-22 13:10:41 +00:00
# print(time.time() - start)
# print("/"*50)
return means_splits
2018-04-26 14:49:49 +00:00
2018-04-29 10:14:14 +00:00
def eval(self, episode_count, trained_eps = 0):
"""
Used to evaluate a model. Can either use pubeval, a model playing at an intermediate level, or dumbeval
a model which has been given random weights, so it acts deterministically random.
:param episode_count: The amount of episodes to run
:param trained_eps: The amount of episodes the model we want to evaluate, has trained
:param tf_session:
:return: outcomes: The outcomes of the evaluation session
"""
2018-04-22 13:07:19 +00:00
def do_eval(method, episodes = 1000, trained_eps = 0):
"""
Do the actual evaluation
:param method: Either pubeval or dumbeval
:param episodes: Amount of episodes to use in the evaluation
:param trained_eps:
:return: outcomes : Described above
"""
start_time = time.time()
def print_time_estimate(eps_completed):
cur_time = time.time()
time_diff = cur_time - start_time
eps_per_sec = eps_completed / time_diff
secs_per_ep = time_diff / eps_completed
eps_remaining = (episodes - eps_completed)
sys.stderr.write(
"[EVAL ] Averaging {per_sec} episodes per second\n".format(per_sec=round(eps_per_sec, 2)))
sys.stderr.write(
"[EVAL ] {eps_remaining} episodes remaining; approx. {time_remaining} seconds remaining\n".format(
eps_remaining=eps_remaining, time_remaining=int(eps_remaining * secs_per_ep)))
sys.stderr.write(
"[EVAL ] Evaluating {eps} episode(s) with method '{method}'\n".format(eps=episodes, method=method))
2018-04-22 13:07:19 +00:00
if method == 'pubeval':
outcomes = []
for i in range(1, episodes + 1):
sys.stderr.write("[EVAL ] Episode {}".format(i))
board = Board.initial_state
while Board.outcome(board) is None:
roll = (random.randrange(1, 7), random.randrange(1, 7))
board = (self.make_move(board, roll, 1))[0]
roll = (random.randrange(1, 7), random.randrange(1, 7))
board = Eval.make_pubeval_move(board, -1, roll)[0][0:26]
sys.stderr.write("\t outcome {}".format(Board.outcome(board)[1]))
outcomes.append(Board.outcome(board)[1])
sys.stderr.write("\n")
if i % 10 == 0:
print_time_estimate(i)
return outcomes
elif method == 'dumbeval':
outcomes = []
for i in range(1, episodes + 1):
sys.stderr.write("[EVAL ] Episode {}".format(i))
board = Board.initial_state
while Board.outcome(board) is None:
roll = (random.randrange(1, 7), random.randrange(1, 7))
board = (self.make_move(board, roll, 1))[0]
roll = (random.randrange(1, 7), random.randrange(1, 7))
board = Eval.make_dumbeval_move(board, -1, roll)[0][0:26]
sys.stderr.write("\t outcome {}".format(Board.outcome(board)[1]))
outcomes.append(Board.outcome(board)[1])
sys.stderr.write("\n")
if i % 10 == 0:
print_time_estimate(i)
return outcomes
else:
sys.stderr.write("[EVAL ] Evaluation method '{}' is not defined\n".format(method))
return [0]
2018-03-27 11:02:36 +00:00
outcomes = [ (method, do_eval(method,
episode_count,
trained_eps = trained_eps))
for method
in self.config['eval_methods'] ]
return outcomes
def play_against_network(self):
2018-05-15 21:37:35 +00:00
"""
Allows you to play against a supplied model.
:return:
"""
self.restore_model()
human_player = Player(-1)
cur_player = 1
player = 1
board = Board.initial_state
i = 0
while Board.outcome(board) is None:
print(Board.pretty(board))
roll = (random.randrange(1, 7), random.randrange(1, 7))
print("Bot rolled:", roll)
board, _ = self.make_move(board, roll, player)
print(Board.pretty(board))
roll = (random.randrange(1, 7), random.randrange(1, 7))
print("You rolled:", roll)
board = human_player.make_human_move(board, roll)
print("DONE "*10)
print(Board.pretty(board))
def train_model(self, episodes=1000, save_step_size=100, trained_eps=0):
2018-05-10 13:28:33 +00:00
"""
2018-05-15 21:37:35 +00:00
Train a model to by self-learning.
2018-05-10 13:28:33 +00:00
:param episodes:
:param save_step_size:
:param trained_eps:
:return:
"""
self.restore_model()
2018-05-22 13:10:41 +00:00
average_diffs = 0
start_time = time.time()
def print_time_estimate(eps_completed):
cur_time = time.time()
time_diff = cur_time - start_time
eps_per_sec = eps_completed / time_diff
secs_per_ep = time_diff / eps_completed
eps_remaining = (episodes - eps_completed)
sys.stderr.write(
"[TRAIN] Averaging {per_sec} episodes per second\n".format(per_sec=round(eps_per_sec, 2)))
sys.stderr.write(
"[TRAIN] {eps_remaining} episodes remaining; approx. {time_remaining} seconds remaining\n".format(
eps_remaining=eps_remaining, time_remaining=int(eps_remaining * secs_per_ep)))
2018-03-20 12:03:21 +00:00
sys.stderr.write("[TRAIN] Training {} episodes and save_step_size {}\n".format(episodes, save_step_size))
outcomes = []
for episode in range(1, episodes + 1):
2018-04-22 13:07:19 +00:00
sys.stderr.write("[TRAIN] Episode {}".format(episode + trained_eps))
# TODO decide which player should be here
2018-05-22 13:10:41 +00:00
# player = 1
player = random.choice([-1,1])
prev_board = Board.initial_state
i = 0
2018-05-22 13:10:41 +00:00
difference_in_values = 0
while Board.outcome(prev_board) is None:
i += 1
self.global_step += 1
cur_board, cur_board_value = self.make_move(prev_board,
(random.randrange(1, 7), random.randrange(1, 7)),
player)
2018-03-20 12:03:21 +00:00
2018-05-22 13:10:41 +00:00
difference_in_values += abs((cur_board_value - self.eval_state(self.board_trans_func(prev_board, player))))
if self.config['verbose']:
print("Difference in values:", difference_in_vals)
print("Current board value :", cur_board_value)
print("Current board is :\n",cur_board)
# adjust weights
if Board.outcome(cur_board) is None:
self.do_backprop(self.board_trans_func(prev_board, player), cur_board_value)
player *= -1
prev_board = cur_board
2018-03-22 14:30:47 +00:00
final_board = prev_board
sys.stderr.write("\t outcome {}\t turns {}".format(Board.outcome(final_board)[1], i))
outcomes.append(Board.outcome(final_board)[1])
final_score = np.array([Board.outcome(final_board)[1]])
scaled_final_score = ((final_score + 2) / 4)
2018-05-22 13:10:41 +00:00
difference_in_values += abs(scaled_final_score-cur_board_value)
average_diffs += (difference_in_values[0][0] / (i+1))
self.do_backprop(self.board_trans_func(prev_board, player), scaled_final_score.reshape(1,1))
sys.stderr.write("\n")
if episode % min(save_step_size, episodes) == 0:
sys.stderr.write("[TRAIN] Saving model...\n")
self.save_model(episode + trained_eps)
2018-03-20 12:03:21 +00:00
if episode % 50 == 0:
print_time_estimate(episode)
2018-03-20 12:03:21 +00:00
sys.stderr.write("[TRAIN] Saving model for final episode...\n")
2018-05-22 13:10:41 +00:00
self.save_model(episode+trained_eps)
2018-05-22 13:10:41 +00:00
return outcomes, average_diffs/len(outcomes)
2018-04-29 10:14:14 +00:00