diff --git a/network.py b/network.py index 657b924..86e0fe2 100644 --- a/network.py +++ b/network.py @@ -323,42 +323,9 @@ class Network: scores_means = [tf.reduce_mean(score) for score in all_scores] transformed_means = [tf.reduce_mean(score) for score in transformed_scores] - - return ([scores_means, transformed_means]) - - print(time.time() - start) - # count = 0 - # # loop over boards - # for a_board in boards: - # a_board_scores = [] - # - # # loop over all rolls, for each board - # for roll in all_rolls: - # - # # find all states we can get to, given the board and roll and the opposite player - # all_rolls_boards = Board.calculate_legal_states(a_board, player*-1, roll) - # count += len(all_rolls_boards) - # # find scores for each board found above - # spec_roll_scores = [self.eval_state(sess, self.board_trans_func(new_board, player*-1)) - # for new_board in all_rolls_boards] - # - # # if the original player is the -1 player, then we need to find (1-value) - # spec_roll_scores = [x if player == 1 else (1-x) for x in spec_roll_scores] - # - # # find the best score - # best_score = max(spec_roll_scores) - # - # # append the best score to a_board_scores, where we keep track of the best score for each board - # a_board_scores.append(best_score) - # - # # save the expected average of board scores - # all_rolls_scores.append(sum(a_board_scores)/len(a_board_scores)) - # - # # return all the average scores - # print(count) - # return all_rolls_scores + return ([scores_means, transformed_means]) def calc_n_ply(self, n_init, sess, board, player, roll):