Remove too many redundant tensorboard scalars

Clutters the UI, wastes disk space and cycles
This commit is contained in:
Giovanni Campagna 2019-03-01 11:40:52 -08:00
parent ca91b6e24f
commit a0ff18f1fe
1 changed files with 2 additions and 9 deletions

View File

@ -191,7 +191,7 @@ def train(args, model, opt, train_iters, train_iterations, field, rank=0, world_
val_loss, metric_dict = validate(val_task, val_iter, model, logger, field, world_size, rank, iteration, num_print=args.num_print, args=args)
if val_loss is not None:
log_entry = f'{args.timestamp}:{elapsed_time(logger)}:iteration_{iteration}:{round_progress}train_{task}:{task_progress}val_{val_task}:val_loss{val_loss.item():.4f}:'
writer.add_scalar(f'loss/val/{val_task}', val_loss.item(), iteration)
writer.add_scalar(f'loss/{val_task}/val', val_loss.item(), iteration)
else:
log_entry = f'{args.timestamp}:{elapsed_time(logger)}:iteration_{iteration}:{round_progress}train_{task}:{task_progress}val_{val_task}:'
@ -206,12 +206,8 @@ def train(args, model, opt, train_iters, train_iterations, field, rank=0, world_
logger.info(log_entry + metric_entry)
if writer is not None:
for metric_key, metric_value in metric_dict.items():
writer.add_scalar(f'val/{metric_key}/{val_task}', metric_value, iteration)
writer.add_scalar(f'{metric_key}/val/{val_task}', metric_value, iteration)
writer.add_scalar(f'{metric_key}/{val_task}/val', metric_value, iteration)
writer.add_scalar(f'{val_task}/{metric_key}/val', metric_value, iteration)
writer.add_scalar(f'{val_task}/val/{metric_key}', metric_value, iteration)
writer.add_scalar('val/deca', deca_score, iteration)
writer.add_scalar('deca/val', deca_score, iteration)
logger.info(f'{args.timestamp}:{elapsed_time(logger)}:iteration_{iteration}:{round_progress}train_{task}:{task_progress}val_deca:deca_{deca_score:.2f}')
@ -278,13 +274,10 @@ def train(args, model, opt, train_iters, train_iterations, field, rank=0, world_
len_answers = 0
if writer is not None:
writer.add_scalar(f'loss/train/{task}', local_loss, iteration)
writer.add_scalar(f'loss/{task}/train', local_loss, iteration)
for metric_key, metric_value in local_train_metric_dict.items():
writer.add_scalar(f'train/{metric_key}/{task}', metric_value, iteration)
writer.add_scalar(f'{metric_key}/train/{task}', metric_value, iteration)
writer.add_scalar(f'{metric_key}/{task}/train', metric_value, iteration)
writer.add_scalar(f'{task}/{metric_key}/train', metric_value, iteration)
writer.add_scalar(f'{task}/train/{metric_key}', metric_value, iteration)
local_loss = 0