diff --git a/main.py b/main.py index 8d42e2b..bc8de09 100644 --- a/main.py +++ b/main.py @@ -102,29 +102,29 @@ if args.list_models: sys.stderr.write(" {name}: {eps_trained}\n".format(name = model[0], eps_trained = model[1])) exit() - -# Set up network -from network import Network -network = Network(config, config['model']) -eps = config['start_episode'] -# Set up variables -episode_count = config['episode_count'] +if __name__ == "__main__": + # Set up network + from network import Network + network = Network(config, config['model']) + start_episode = network.episodes_trained + + # Set up variables + episode_count = config['episode_count'] -if args.train: - while True: - train_outcome = network.train_model(episodes = episode_count, trained_eps = eps) - eps += episode_count - log_train_outcome(train_outcome, trained_eps = eps) - if config['eval_after_train']: - eval_outcomes = network.eval(trained_eps = eps) - log_eval_outcomes(eval_outcomes, trained_eps = eps) - if not config['train_perpetually']: - break -elif args.eval: - eps = config['start_episode'] - outcomes = network.eval() - log_eval_outcomes(outcomes, trained_eps = eps) -#elif args.play: -# g.play(episodes = episode_count) + if args.train: + while True: + train_outcome = network.train_model(episodes = episode_count, trained_eps = start_episode) + start_episode += episode_count + log_train_outcome(train_outcome, trained_eps = start_episode) + if config['eval_after_train']: + eval_outcomes = network.eval(trained_eps = start_episode) + log_eval_outcomes(eval_outcomes, trained_eps = start_episode) + if not config['train_perpetually']: + break + elif args.eval: + outcomes = network.eval() + log_eval_outcomes(outcomes, trained_eps = start_episode) + # elif args.play: + # g.play(episodes = episode_count) diff --git a/network.py b/network.py index 62b1d17..f058d48 100644 --- a/network.py +++ b/network.py @@ -13,7 +13,7 @@ class Network: input_size = 26 output_size = 1 # Can't remember the best learning_rate, look this up - learning_rate = 0.1 + learning_rate = 0.05 # TODO: Actually compile tensorflow properly #os.environ["TF_CPP_MIN_LOG_LEVEL"]="2" @@ -23,12 +23,20 @@ class Network: def __init__(self, config, name): self.config = config - self.session = tf.Session() self.checkpoint_path = config['model_path'] + self.name = name + + # Restore trained episode count for model + episode_count_path = os.path.join(self.checkpoint_path, "episodes_trained") + if os.path.isfile(episode_count_path): + with open(episode_count_path, 'r') as f: + self.episodes_trained = int(f.read()) + else: + self.episodes_trained = 0 # input = x - self.x = tf.placeholder('float', [1, Network.input_size], name='x') + self.x = tf.placeholder('float', [1, Network.input_size], name='input') self.value_next = tf.placeholder('float', [1, Network.output_size], name="value_next") xavier_init = tf.contrib.layers.xavier_initializer() @@ -43,20 +51,22 @@ class Network: b_2 = tf.get_variable("b_2", (Network.output_size,), initializer=tf.zeros_initializer) - value_after_input = self.custom_tanh(tf.matmul(self.x, W_1) + b_1, name='hidden_layer') + normalized_input = tf.nn.l2_normalize(self.x) + value_after_input = tf.sigmoid(tf.matmul(normalized_input, W_1) + b_1, name='hidden_layer') - self.value = self.custom_tanh(tf.matmul(value_after_input, W_2) + b_2, name='output_layer') + self.value = tf.sigmoid(tf.matmul(value_after_input, W_2) + b_2, name='output_layer') # tf.reduce_sum basically finds the sum of its input, so this gives the # difference between the two values, in case they should be lists, which # they might be if our input changes # TODO: Alexander thinks that self.value will be computed twice (instead of once) - difference_in_values = tf.reduce_sum(self.value_next - self.value, name='difference') + difference_in_values = tf.reshape(tf.subtract(self.value_next, self.value, name='difference_in_values'), []) + tf.summary.scalar("difference_in_values", tf.abs(difference_in_values)) trainable_vars = tf.trainable_variables() gradients = tf.gradients(self.value, trainable_vars) - + apply_gradients = [] with tf.variable_scope('apply_gradients'): @@ -67,13 +77,10 @@ class Network: apply_gradients.append(grad_apply) self.training_op = tf.group(*apply_gradients, name='training_op') - - self.saver = tf.train.Saver(max_to_keep=1) - self.session.run(tf.global_variables_initializer()) - self.restore_model() + self.saver = tf.train.Saver(max_to_keep=1) - def eval_state(self, state): + def eval_state(self, sess, state): # Run state through a network # Remember to create placeholders for everything because wtf tensorflow @@ -107,25 +114,25 @@ class Network: # print("Network is evaluating") - val = self.session.run(self.value, feed_dict={self.x: state}) #print("eval ({})".format(self.name), state, val, sep="\n") - return val + return sess.run(self.value, feed_dict={self.x: state}) - def save_model(self, episode_count): - self.saver.save(self.session, os.path.join(self.checkpoint_path, 'model.ckpt')) + + def save_model(self, sess, episode_count): + self.saver.save(sess, os.path.join(self.checkpoint_path, 'model.ckpt')) with open(os.path.join(self.checkpoint_path, "episodes_trained"), 'w+') as f: print("[NETWK] ({name}) Saving model to:".format(name = self.name), os.path.join(self.checkpoint_path, 'model.ckpt')) f.write(str(episode_count) + "\n") - def restore_model(self): + def restore_model(self, sess): if os.path.isfile(os.path.join(self.checkpoint_path, 'model.ckpt.index')): latest_checkpoint = tf.train.latest_checkpoint(self.checkpoint_path) print("[NETWK] ({name}) Restoring model from:".format(name = self.name), str(latest_checkpoint)) - self.saver.restore(self.session, latest_checkpoint) + self.saver.restore(sess, latest_checkpoint) variables_names = [v.name for v in tf.trainable_variables()] - values = self.session.run(variables_names) + values = sess.run(variables_names) for k, v in zip(variables_names, values): print("Variable: ", k) print("Shape: ", v.shape) @@ -137,26 +144,10 @@ class Network: with open(episode_count_path, 'r') as f: self.config['start_episode'] = int(f.read()) - # Have a circular dependency, #fuck, need to rewrite something - def adjust_weights(self, board, v_next): -# print("lol") - board = np.array(board).reshape((1,26)) - self.session.run(self.training_op, feed_dict = { self.x: board, - self.value_next: v_next }) - - - # while game isn't done: - #x_next = g.next_move() - #value_next = network.eval_state(x_next) - #self.session.run(self.training_op, feed_dict={self.x: x, self.value_next: value_next}) - #x = x_next - - - - def make_move(self, board, roll): + def make_move(self, sess, board, roll): # print(Board.pretty(board)) legal_moves = Board.calculate_legal_states(board, 1, roll) - moves_and_scores = [ (move, self.eval_state(np.array(move).reshape(1,26))) for move in legal_moves ] + moves_and_scores = [ (move, self.eval_state(sess, np.array(move).reshape(1,26))) for move in legal_moves ] scores = [ x[1] for x in moves_and_scores ] best_score_index = np.array(scores).argmax() best_move_pair = moves_and_scores[best_score_index] @@ -165,73 +156,101 @@ class Network: def train_model(self, episodes=1000, save_step_size = 100, trained_eps = 0): - start_time = time.time() + with tf.Session() as sess: + writer = tf.summary.FileWriter("/tmp/log/tf", sess.graph) + + sess.run(tf.global_variables_initializer()) + self.restore_model(sess) + + variables_names = [v.name for v in tf.trainable_variables()] + values = sess.run(variables_names) + for k, v in zip(variables_names, values): + print("Variable: ", k) + print("Shape: ", v.shape) + print(v) - def print_time_estimate(eps_completed): - cur_time = time.time() - time_diff = cur_time - start_time - eps_per_sec = eps_completed / time_diff - secs_per_ep = time_diff / eps_completed - eps_remaining = (episodes - eps_completed) - sys.stderr.write("[TRAIN] Averaging {per_sec} episodes per second\n".format(per_sec = round(eps_per_sec, 2))) - sys.stderr.write("[TRAIN] {eps_remaining} episodes remaining; approx. {time_remaining} seconds remaining\n".format(eps_remaining = eps_remaining, time_remaining = int(eps_remaining * secs_per_ep))) + start_time = time.time() + + def print_time_estimate(eps_completed): + cur_time = time.time() + time_diff = cur_time - start_time + eps_per_sec = eps_completed / time_diff + secs_per_ep = time_diff / eps_completed + eps_remaining = (episodes - eps_completed) + sys.stderr.write("[TRAIN] Averaging {per_sec} episodes per second\n".format(per_sec = round(eps_per_sec, 2))) + sys.stderr.write("[TRAIN] {eps_remaining} episodes remaining; approx. {time_remaining} seconds remaining\n".format(eps_remaining = eps_remaining, time_remaining = int(eps_remaining * secs_per_ep))) - sys.stderr.write("[TRAIN] Training {} episodes and save_step_size {}\n".format(episodes, save_step_size)) - outcomes = [] - for episode in range(1, episodes + 1): - sys.stderr.write("[TRAIN] Episode {}".format(episode + trained_eps)) - # TODO decide which player should be here - player = 1 + sys.stderr.write("[TRAIN] Training {} episodes and save_step_size {}\n".format(episodes, save_step_size)) + outcomes = [] + for episode in range(1, episodes + 1): + sys.stderr.write("[TRAIN] Episode {}".format(episode + trained_eps)) + # TODO decide which player should be here + player = 1 - roll = (random.randrange(1,7), random.randrange(1,7)) - prev_board, _ = self.make_move(Board.flip(Board.initial_state) if player == -1 else Board.initial_state, roll) - if player == -1: - prev_board = Board.flip(prev_board) - - # find the best move here, make this move, then change turn as the - # first thing inside of the while loop and then call - # best_move_and_score to get V_t+1 - - # i = 0 - while Board.outcome(prev_board) is None: - # print("-"*30) - # print(i) - # print(roll) - # print(Board.pretty(prev_board)) - # print("/"*30) - # i += 1 - - player *= -1 roll = (random.randrange(1,7), random.randrange(1,7)) - - cur_board, cur_board_value = self.make_move(Board.flip(prev_board) if player == -1 else prev_board, roll) + prev_board, _ = self.make_move(sess, Board.flip(Board.initial_state) if player == -1 else Board.initial_state, roll) if player == -1: - cur_board = Board.flip(cur_board) - - self.adjust_weights(prev_board, cur_board_value) - - prev_board = cur_board - - final_board = prev_board - sys.stderr.write("\t outcome {}".format(Board.outcome(final_board)[1])) - outcomes.append(Board.outcome(final_board)[1]) - final_score = np.array([ Board.outcome(final_board)[1] ]) - self.adjust_weights(prev_board, final_score.reshape((1, 1))) - - sys.stderr.write("\n") + prev_board = Board.flip(prev_board) - if episode % min(save_step_size, episodes) == 0: - sys.stderr.write("[TRAIN] Saving model...\n") - self.save_model(episode+trained_eps) + # find the best move here, make this move, then change turn as the + # first thing inside of the while loop and then call + # best_move_and_score to get V_t+1 - if episode % 50 == 0: - print_time_estimate(episode) + # i = 0 + while Board.outcome(prev_board) is None: + # print("-"*30) + # print(i) + # print(roll) + # print(Board.pretty(prev_board)) + # print("/"*30) + # i += 1 + + player *= -1 + roll = (random.randrange(1,7), random.randrange(1,7)) - sys.stderr.write("[TRAIN] Saving model for final episode...\n") - self.save_model(episode+trained_eps) + cur_board, cur_board_value = self.make_move(sess, Board.flip(prev_board) if player == -1 else prev_board, roll) + if player == -1: + cur_board = Board.flip(cur_board) + + # print("cur_board_value:", cur_board_value) + + # adjust weights + sess.run(self.training_op, + feed_dict = { self.x: np.array(prev_board).reshape((1,26)), + self.value_next: cur_board_value }) + prev_board = cur_board + + final_board = prev_board + sys.stderr.write("\t outcome {}".format(Board.outcome(final_board)[1])) + outcomes.append(Board.outcome(final_board)[1]) + final_score = np.array([ Board.outcome(final_board)[1] ]) + scaled_final_score = ((final_score + 2) / 4) + + # print("scaled_final_score",scaled_final_score) + + with tf.name_scope("final"): + merged = tf.summary.merge_all() + summary, _ = sess.run([merged, self.training_op], + feed_dict = { self.x: np.array(prev_board).reshape((1,26)), + self.value_next: scaled_final_score.reshape((1, 1)) }) + writer.add_summary(summary, episode + trained_eps) + + sys.stderr.write("\n") + + if episode % min(save_step_size, episodes) == 0: + sys.stderr.write("[TRAIN] Saving model...\n") + self.save_model(sess, episode+trained_eps) + + if episode % 50 == 0: + print_time_estimate(episode) + + sys.stderr.write("[TRAIN] Saving model for final episode...\n") + self.save_model(sess, episode+trained_eps) + + writer.close() - return outcomes + return outcomes # take turn, which finds the best state and picks it, based on the current network @@ -244,7 +263,7 @@ class Network: def eval(self, trained_eps = 0): - def do_eval(method, episodes = 1000, trained_eps = 0): + def do_eval(sess, method, episodes = 1000, trained_eps = 0): start_time = time.time() def print_time_estimate(eps_completed): @@ -265,7 +284,7 @@ class Network: board = Board.initial_state while Board.outcome(board) is None: roll = (random.randrange(1,7), random.randrange(1,7)) - board = (self.p1.make_move(board, self.p1.get_sym(), roll))[0] + board = (self.p1.make_move(sess, board, self.p1.get_sym(), roll))[0] roll = (random.randrange(1,7), random.randrange(1,7)) board = Board.flip(Eval.make_random_move(Board.flip(board), 1, roll)) sys.stderr.write("\t outcome {}".format(Board.outcome(board)[1])) @@ -288,7 +307,7 @@ class Network: #print(roll) prev_board = tuple(board) - board = (self.make_move(board, roll))[0] + board = (self.make_move(sess, board, roll))[0] #print("post p1:", board, sep="\n") #print("."*30) @@ -336,9 +355,14 @@ class Network: else: sys.stderr.write("[EVAL ] Evaluation method '{}' is not defined\n".format(method)) return [0] - - return [ (method, do_eval(method, - self.config['episode_count'], - trained_eps = trained_eps)) - for method - in self.config['eval_methods'] ] + + with tf.Session() as session: + session .run(tf.global_variables_initializer()) + self.restore_model(session) + outcomes = [ (method, do_eval(session, + method, + self.config['episode_count'], + trained_eps = trained_eps)) + for method + in self.config['eval_methods'] ] + return outcomes diff --git a/plot.py b/plot.py index 8261cde..5a94f51 100644 --- a/plot.py +++ b/plot.py @@ -44,7 +44,7 @@ if __name__ == '__main__': plt.show() while True: - df = dataframes('default')['eval'] + df = dataframes('a')['eval'] print(df)