genienlp/predict.py

347 lines
15 KiB
Python
Raw Normal View History

2018-06-20 06:22:34 +00:00
#!/usr/bin/env python3
import os
from text.torchtext.datasets.generic import Query
from text import torchtext
from argparse import ArgumentParser
import ujson as json
import torch
import numpy as np
import random
from pprint import pformat
from util import get_splits, set_seed, preprocess_examples, tokenizer
2018-06-20 06:22:34 +00:00
from metrics import compute_metrics
import models
from text.torchtext.data.utils import get_tokenizer
2018-06-20 06:22:34 +00:00
def get_all_splits(args, new_vocab):
splits = []
for task in args.tasks:
print(f'Loading {task}')
kwargs = {}
if not 'train' in args.evaluate:
kwargs['train'] = None
if not 'valid' in args.evaluate:
kwargs['validation'] = None
2018-06-20 06:22:34 +00:00
if not 'test' in args.evaluate:
kwargs['test'] = None
2018-06-20 06:22:34 +00:00
s = get_splits(args, task, new_vocab, **kwargs)[0]
preprocess_examples(args, [task], [s], new_vocab, train=False)
splits.append(s)
return splits
def prepare_data(args, FIELD):
new_vocab = torchtext.data.ReversibleField(batch_first=True, init_token='<init>', eos_token='<eos>', lower=args.lower, include_lengths=True)
2018-06-20 06:22:34 +00:00
splits = get_all_splits(args, new_vocab)
new_vocab.build_vocab(*splits)
print(f'Vocabulary has {len(FIELD.vocab)} tokens from training')
args.max_generative_vocab = min(len(FIELD.vocab), args.max_generative_vocab)
FIELD.append_vocab(new_vocab)
print(f'Vocabulary has expanded to {len(FIELD.vocab)} tokens')
char_vectors = torchtext.vocab.CharNGram(cache=args.embeddings)
glove_vectors = torchtext.vocab.GloVe(cache=args.embeddings)
vectors = [char_vectors, glove_vectors]
FIELD.vocab.load_vectors(vectors, True)
FIELD.decoder_to_vocab = {idx: FIELD.vocab.stoi[word] for idx, word in enumerate(FIELD.decoder_itos)}
FIELD.vocab_to_decoder = {idx: FIELD.decoder_stoi[word] for idx, word in enumerate(FIELD.vocab.itos) if word in FIELD.decoder_stoi}
splits = get_all_splits(args, FIELD)
return FIELD, splits
2018-09-18 00:30:36 +00:00
def to_iter(data, bs, device):
2018-06-20 06:22:34 +00:00
Iterator = torchtext.data.Iterator
it = Iterator(data, batch_size=bs,
2018-09-18 00:30:36 +00:00
device=device, batch_size_fn=None,
2018-06-20 06:22:34 +00:00
train=False, repeat=False, sort=None,
shuffle=None, reverse=False)
return it
def run(args, field, val_sets, model):
2018-09-18 00:30:36 +00:00
device = set_seed(args)
2018-06-20 06:22:34 +00:00
print(f'Preparing iterators')
2018-09-27 20:08:55 +00:00
if len(args.val_batch_size) == 1 and len(val_sets) > 1:
args.val_batch_size *= len(val_sets)
2018-09-18 00:30:36 +00:00
iters = [(name, to_iter(x, bs, device)) for name, x, bs in zip(args.tasks, val_sets, args.val_batch_size)]
2018-06-20 06:22:34 +00:00
def mult(ps):
r = 0
for p in ps:
this_r = 1
for s in p.size():
this_r *= s
r += this_r
return r
params = list(filter(lambda p: p.requires_grad, model.parameters()))
num_param = mult(params)
print(f'{args.model} has {num_param:,} parameters')
2018-09-18 00:30:36 +00:00
model.to(device)
2018-06-20 06:22:34 +00:00
2018-11-17 02:17:29 +00:00
decaScore = []
2018-06-20 06:22:34 +00:00
model.eval()
2018-09-18 00:30:36 +00:00
with torch.no_grad():
for task, it in iters:
2018-09-27 20:08:55 +00:00
print(task)
if args.eval_dir:
prediction_file_name = os.path.join(args.eval_dir, os.path.join(os.path.splitext(args.best_checkpoint)[0], args.evaluate, task + '.txt'))
answer_file_name = os.path.join(args.eval_dir, os.path.join(os.path.splitext(args.best_checkpoint)[0], args.evaluate, task + '.gold.txt'))
results_file_name = answer_file_name.replace('gold', 'results')
else:
prediction_file_name = os.path.join(os.path.splitext(args.best_checkpoint)[0], args.evaluate, task + '.txt')
answer_file_name = os.path.join(os.path.splitext(args.best_checkpoint)[0], args.evaluate, task + '.gold.txt')
results_file_name = answer_file_name.replace('gold', 'results')
2018-09-27 20:08:55 +00:00
if 'sql' in task or 'squad' in task:
2018-09-18 00:30:36 +00:00
ids_file_name = answer_file_name.replace('gold', 'ids')
if os.path.exists(prediction_file_name):
print('** ', prediction_file_name, ' already exists -- this is where predictions are stored **')
2018-12-12 20:29:46 +00:00
if args.overwrite:
print('**** overwriting ', prediction_file_name, ' ****')
2018-09-18 00:30:36 +00:00
if os.path.exists(answer_file_name):
print('** ', answer_file_name, ' already exists -- this is where ground truth answers are stored **')
2018-12-12 20:29:46 +00:00
if args.overwrite:
print('**** overwriting ', answer_file_name, ' ****')
2018-09-18 00:30:36 +00:00
if os.path.exists(results_file_name):
print('** ', results_file_name, ' already exists -- this is where metrics are stored **')
2018-12-12 20:29:46 +00:00
if args.overwrite:
print('**** overwriting ', results_file_name, ' ****')
else:
2018-11-17 02:17:29 +00:00
with open(results_file_name) as results_file:
2018-12-12 20:29:46 +00:00
if not args.silent:
for l in results_file:
print(l)
metrics = json.loads(results_file.readlines()[0])
decaScore.append(metrics[args.task_to_metric[task]])
2018-09-18 00:30:36 +00:00
continue
2018-11-17 02:17:29 +00:00
2018-09-18 00:30:36 +00:00
for x in [prediction_file_name, answer_file_name, results_file_name]:
os.makedirs(os.path.dirname(x), exist_ok=True)
2018-12-12 20:29:46 +00:00
if not os.path.exists(prediction_file_name) or args.overwrite:
2018-09-18 00:30:36 +00:00
with open(prediction_file_name, 'w') as prediction_file:
predictions = []
2018-09-27 20:08:55 +00:00
ids = []
2018-09-18 00:30:36 +00:00
for batch_idx, batch in enumerate(it):
2018-11-29 00:36:23 +00:00
_, p = model(batch, iteration=1)
if task == 'almond':
p = field.reverse(p, detokenize=lambda x: ' '.join(x))
else:
p = field.reverse(p)
2018-09-18 00:30:36 +00:00
for i, pp in enumerate(p):
if 'sql' in task:
2018-09-27 20:08:55 +00:00
ids.append(int(batch.wikisql_id[i]))
if 'squad' in task:
ids.append(it.dataset.q_ids[int(batch.squad_id[i])])
2018-11-29 00:36:23 +00:00
prediction_file.write(json.dumps(pp) + '\n')
2018-09-18 00:30:36 +00:00
predictions.append(pp)
2018-11-17 02:17:29 +00:00
if 'sql' in task:
with open(ids_file_name, 'w') as id_file:
for i in ids:
id_file.write(json.dumps(i) + '\n')
if 'squad' in task:
with open(ids_file_name, 'w') as id_file:
for i in ids:
id_file.write(i + '\n')
2018-09-18 00:30:36 +00:00
else:
with open(prediction_file_name) as prediction_file:
predictions = [x.strip() for x in prediction_file.readlines()]
2018-11-17 02:17:29 +00:00
if 'sql' in task or 'squad' in task:
with open(ids_file_name) as id_file:
ids = [int(x.strip()) for x in id_file.readlines()]
2018-09-27 20:08:55 +00:00
2018-09-18 00:30:36 +00:00
def from_all_answers(an):
return [it.dataset.all_answers[sid] for sid in an.tolist()]
2018-12-12 20:29:46 +00:00
if not os.path.exists(answer_file_name) or args.overwrite:
2018-09-18 00:30:36 +00:00
with open(answer_file_name, 'w') as answer_file:
answers = []
for batch_idx, batch in enumerate(it):
if hasattr(batch, 'wikisql_id'):
a = from_all_answers(batch.wikisql_id.data.cpu())
elif hasattr(batch, 'squad_id'):
a = from_all_answers(batch.squad_id.data.cpu())
elif hasattr(batch, 'woz_id'):
a = from_all_answers(batch.woz_id.data.cpu())
else:
if task == 'almond':
setattr(field, 'use_revtok', False)
setattr(field, 'tokenize', tokenizer)
a = field.reverse_almond(batch.answer.data)
setattr(field, 'use_revtok', True)
setattr(field, 'tokenize', 'revtok')
else:
a = field.reverse(batch.answer.data)
2018-09-18 00:30:36 +00:00
for aa in a:
answers.append(aa)
answer_file.write(json.dumps(aa) + '\n')
else:
with open(answer_file_name) as answer_file:
answers = [json.loads(x.strip()) for x in answer_file.readlines()]
if len(answers) > 0:
2018-12-12 20:29:46 +00:00
if not os.path.exists(results_file_name) or args.overwrite:
2018-11-08 21:52:48 +00:00
metrics, answers = compute_metrics(predictions, answers,
bleu='iwslt' in task or 'multi30k' in task or 'almond' in task,
dialogue='woz' in task,
rouge='cnn' in task, logical_form='sql' in task, corpus_f1='zre' in task,
2018-11-29 18:17:25 +00:00
func_accuracy='almond' in task and not args.reverse_task_bool,
dev_accuracy='almond' in task and not args.reverse_task_bool,
args=args)
2018-09-18 00:30:36 +00:00
with open(results_file_name, 'w') as results_file:
results_file.write(json.dumps(metrics) + '\n')
else:
with open(results_file_name) as results_file:
metrics = json.loads(results_file.readlines()[0])
if not args.silent:
for i, (p, a) in enumerate(zip(predictions, answers)):
print(f'Prediction {i+1}: {p}\nAnswer {i+1}: {a}\n')
2018-12-12 20:29:46 +00:00
print(metrics)
2018-11-17 02:17:29 +00:00
decaScore.append(metrics[args.task_to_metric[task]])
2018-12-12 20:29:46 +00:00
2018-11-17 02:17:29 +00:00
print(f'Evaluated Tasks:\n')
for i, (task, _) in enumerate(iters):
print(f'{task}: {decaScore[i]}')
print(f'-------------------')
print(f'DecaScore: {sum(decaScore)}\n')
print(f'\nSummary: | {sum(decaScore)} | {" | ".join([str(x) for x in decaScore])} |\n')
2018-06-20 06:22:34 +00:00
def get_args():
parser = ArgumentParser()
parser.add_argument('--path', required=True)
parser.add_argument('--evaluate', type=str, required=True)
parser.add_argument('--tasks', default=['almond', 'squad', 'iwslt.en.de', 'cnn_dailymail', 'multinli.in.out', 'sst', 'srl', 'zre', 'woz.en', 'wikisql', 'schema'], nargs='+')
2018-10-23 23:21:26 +00:00
parser.add_argument('--devices', default=[0], nargs='+', type=int, help='a list of devices that can be used (multi-gpu currently WIP)')
2018-06-20 06:22:34 +00:00
parser.add_argument('--seed', default=123, type=int, help='Random seed.')
parser.add_argument('--data', default='./decaNLP/.data/', type=str, help='where to load data from.')
parser.add_argument('--embeddings', default='./decaNLP/.embeddings', type=str, help='where to save embeddings.')
parser.add_argument('--checkpoint_name')
parser.add_argument('--bleu', action='store_true', help='whether to use the bleu metric (always on for iwslt)')
parser.add_argument('--rouge', action='store_true', help='whether to use the bleu metric (always on for cnn, dailymail, and cnn_dailymail)')
2018-12-12 20:29:46 +00:00
parser.add_argument('--overwrite', action='store_true', help='whether to overwrite previously written predictions')
parser.add_argument('--silent', action='store_true', help='whether to print predictions to stdout')
2018-06-27 18:52:02 +00:00
parser.add_argument('--skip_cache', action='store_true', dest='skip_cache_bool', help='whether use exisiting cached splits or generate new ones')
parser.add_argument('--reverse_task', action='store_true', dest='reverse_task_bool', help='whether to translate english to code or the other way around')
parser.add_argument('--eval_dir', type=str, default=None, help='use this directory to store eval results')
2018-06-20 06:22:34 +00:00
args = parser.parse_args()
with open(os.path.join(args.path, 'config.json')) as config_file:
config = json.load(config_file)
2018-09-27 20:08:55 +00:00
retrieve = ['model',
2018-06-20 06:22:34 +00:00
'transformer_layers', 'rnn_layers', 'transformer_hidden',
'dimension', 'load', 'max_val_context_length', 'val_batch_size',
'transformer_heads', 'max_output_length', 'max_generative_vocab',
'lower', 'cove', 'intermediate_cove', 'elmo', 'glove_and_char']
2018-06-20 06:22:34 +00:00
for r in retrieve:
if r in config:
setattr(args, r, config[r])
elif 'cove' in r:
setattr(args, r, False)
2018-12-10 19:41:07 +00:00
elif 'elmo' in r:
setattr(args, r, [-1])
2018-12-10 19:41:07 +00:00
elif 'glove_and_char' in r:
setattr(args, r, True)
else:
setattr(args, r, None)
2018-06-20 06:22:34 +00:00
args.dropout_ratio = 0.0
2018-11-29 00:36:23 +00:00
args.task_to_metric = {
'cnn_dailymail': 'avg_rouge',
2018-06-20 06:22:34 +00:00
'iwslt.en.de': 'bleu',
'multinli.in.out': 'em',
2018-06-20 06:22:34 +00:00
'squad': 'nf1',
'srl': 'nf1',
2018-11-29 18:17:25 +00:00
'almond': 'bleu' if args.reverse_task_bool else 'em',
'sst': 'em',
2018-06-20 06:22:34 +00:00
'wikisql': 'lfem',
'woz.en': 'joint_goal_em',
'zre': 'corpus_f1',
2018-11-29 00:36:23 +00:00
'schema': 'em'
}
2018-12-12 20:29:46 +00:00
if not args.checkpoint_name is None:
args.best_checkpoint = os.path.join(args.path, args.checkpoint_name)
2018-12-12 20:29:46 +00:00
else:
assert os.path.exists(os.path.join(args.path, 'process_0.log'))
args.best_checkpoint = get_best(args)
2018-06-20 06:22:34 +00:00
return args
def get_best(args):
with open(os.path.join(args.path, 'config.json')) as f:
save_every = json.load(f)['save_every']
with open(os.path.join(args.path, 'process_0.log')) as f:
lines = f.readlines()
best_score = 0
2018-11-29 18:17:25 +00:00
best_it = 10
2018-06-20 06:22:34 +00:00
deca_scores = {}
for l in lines:
if 'val' in l:
try:
task = l.split('val_')[1].split(':')[0]
except Exception as e:
print(e)
continue
it = int(l.split('iteration_')[1].split(':')[0])
metric = args.task_to_metric[task]
score = float(l.split(metric+'_')[1].split(':')[0])
if it in deca_scores:
deca_scores[it]['deca'] += score
deca_scores[it][metric] = score
else:
deca_scores[it] = {'deca': score, metric: score}
if deca_scores[it]['deca'] > best_score:
best_score = deca_scores[it]['deca']
best_it = it
print(best_it)
print(best_score)
return os.path.join(args.path, f'iteration_{int(best_it)}.pth')
if __name__ == '__main__':
args = get_args()
print(f'Arguments:\n{pformat(vars(args))}')
np.random.seed(args.seed)
random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
print(f'Loading from {args.best_checkpoint}')
# save_dict = torch.load(args.best_checkpoint)
if torch.cuda.is_available():
save_dict = torch.load(args.best_checkpoint)
else:
save_dict = torch.load(args.best_checkpoint, map_location='cpu')
2018-06-20 06:22:34 +00:00
field = save_dict['field']
print(f'Initializing Model')
Model = getattr(models, args.model)
model = Model(field, args)
model_dict = save_dict['model_state_dict']
backwards_compatible_cove_dict = {}
for k, v in model_dict.items():
if 'cove.rnn.' in k:
k = k.replace('cove.rnn.', 'cove.rnn1.')
backwards_compatible_cove_dict[k] = v
model_dict = backwards_compatible_cove_dict
model.load_state_dict(model_dict)
2018-06-20 06:22:34 +00:00
field, splits = prepare_data(args, field)
model.set_embeddings(field.vocab.vectors)
run(args, field, splits, model)