blob: c87b096967ec8a9569d7a5e3e3d5f21530b21c25 [file] [log] [blame]
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import datetime
import time
import logging
from collections import OrderedDict
'''
Utilities for logging experiment run stats, such as accuracy
and loss over time for different runs. Runtime arguments are stored
in the log.
'''
class ModelTrainerLog():
def __init__(self, expname, runtime_args):
now = datetime.datetime.fromtimestamp(time.time())
self.experiment_id = now.strftime('%Y%m%d_%H%M%S')
self.filename = "%s_%s.log" % (expname, self.experiment_id)
self.logstr("# %s" % str(runtime_args))
self.headers = None
self.start_time = time.time()
self.last_time = self.start_time
self.last_input_count = 0
def logstr(self, str):
with open(self.filename, "a") as f:
f.write(str + "\n")
f.close()
logging.getLogger("experiment_logger").info(str)
def log(self, input_count, batch_count, additional_values):
logdict = OrderedDict()
delta_t = time.time() - self.last_time
delta_count = input_count - self.last_input_count
self.last_time = time.time()
self.last_input_count = input_count
logdict['time'] = time.time() - self.start_time
logdict['input_counter'] = input_count
logdict['batch_count'] = batch_count
if delta_t > 0:
logdict['inputs_per_sec'] = delta_count / delta_t
else:
logdict['inputs_per_sec'] = 0.0
for k in sorted(additional_values.keys()):
logdict[k] = additional_values[k]
# Write the headers if they are not written yet
if self.headers is None:
self.headers = logdict.keys()[:]
self.logstr(",".join(self.headers))
self.logstr(",".join([str(v) for v in logdict.values()]))