Use averaged params for evaluation

This commit is contained in:
Matthew Honnibal 2017-05-21 17:49:46 -05:00
parent 7811d97339
commit e14533757b
1 changed files with 2 additions and 1 deletions

View File

@ -53,7 +53,8 @@ def train(lang_id, output_dir, train_data, dev_data, n_iter, n_sents,
golds = list(golds)
nlp.update(docs, golds, drop=dropout, sgd=optimizer)
pbar.update(len(docs))
scorer = nlp.evaluate(corpus.dev_docs(nlp))
with nlp.use_params(optimizer.averages):
scorer = nlp.evaluate(corpus.dev_docs(nlp))
print_progress(i, {}, scorer.scores)
with (output_path / 'model.bin').open('wb') as file_:
dill.dump(nlp, file_, -1)