Merge branch 'master' into 'experimentation'

# Conflicts:
#   main.py
This commit is contained in:
Christoffer Müller Madsen 2018-05-22 13:11:43 +00:00
commit 1fd6c35baa

16
main.py
View File

@ -2,6 +2,7 @@ import argparse
import sys
import os
import time
import subprocess
# Parse command line arguments
parser = argparse.ArgumentParser(description="Backgammon games")
@ -79,16 +80,18 @@ if not os.path.isdir(log_path):
# Define helper functions
def log_train_outcome(outcome, diff_in_values, trained_eps = 0, log_path = os.path.join(model_path(), 'logs', "train.log")):
commit = subprocess.run(['git', 'describe', '--first-parent', '--always'], stdout=subprocess.PIPE).stdout.decode('utf-8').rstrip()
format_vars = { 'trained_eps': trained_eps,
'count': len(outcome),
'sum': sum(outcome),
'mean': sum(outcome) / len(outcome),
'time': int(time.time()),
'average_diff_in_vals': diff_in_values
'commit': commit
}
with open(log_path, 'a+') as f:
f.write("{time};{trained_eps};{count};{sum};{mean};{average_diff_in_vals}".format(**format_vars) + "\n")
f.write("{time};{trained_eps};{count};{sum};{mean};{average_diff_in_vals};{commit}".format(**format_vars) + "\n")
def log_eval_outcomes(outcomes, trained_eps = 0, log_path = os.path.join(model_path(), 'logs', "eval.log")):
@ -99,9 +102,12 @@ def log_eval_outcomes(outcomes, trained_eps = 0, log_path = os.path.join(model_p
:param log_path:
:return:
"""
commit = subprocess.run(['git', 'describe', '--first-parent', '--always'], stdout=subprocess.PIPE).stdout.decode('utf-8').rstrip()
for outcome in outcomes:
scores = outcome[1]
format_vars = { 'trained_eps': trained_eps,
format_vars = { 'commit': commit,
'trained_eps': trained_eps,
'method': outcome[0],
'count': len(scores),
'sum': sum(scores),
@ -109,9 +115,10 @@ def log_eval_outcomes(outcomes, trained_eps = 0, log_path = os.path.join(model_p
'time': int(time.time())
}
with open(log_path, 'a+') as f:
f.write("{time};{method};{trained_eps};{count};{sum};{mean}".format(**format_vars) + "\n")
f.write("{time};{method};{trained_eps};{count};{sum};{mean};{commit}".format(**format_vars) + "\n")
def log_bench_eval_outcomes(outcomes, log_path, index, time, trained_eps = 0):
commit = subprocess.run(['git', 'describe', '--first-parent', '--always'], stdout=subprocess.PIPE).stdout.decode('utf-8').rstrip()
for outcome in outcomes:
scores = outcome[1]
format_vars = { 'trained_eps': trained_eps,
@ -121,9 +128,10 @@ def log_bench_eval_outcomes(outcomes, log_path, index, time, trained_eps = 0):
'mean': sum(scores) / len(scores),
'time': time,
'index': index,
'commit': commit
}
with open(log_path, 'a+') as f:
f.write("{method};{count};{index};{time};{sum};{mean}".format(**format_vars) + "\n")
f.write("{method};{count};{index};{time};{sum};{mean};{commit}".format(**format_vars) + "\n")
def find_board_rep():
checkpoint_path = os.path.join(config['model_storage_path'], config['model'])