2017-04-15 10:05:47 +00:00
|
|
|
# coding: utf8
|
|
|
|
from __future__ import absolute_import, unicode_literals
|
2016-10-09 10:24:24 +00:00
|
|
|
|
|
|
|
import random
|
2017-03-11 17:12:48 +00:00
|
|
|
import tqdm
|
2017-05-15 19:46:08 +00:00
|
|
|
|
|
|
|
from thinc.neural.optimizers import Adam
|
|
|
|
from thinc.neural.ops import NumpyOps, CupyOps
|
|
|
|
|
2017-04-15 10:05:47 +00:00
|
|
|
from .gold import GoldParse, merge_sents
|
2016-10-09 10:24:24 +00:00
|
|
|
from .scorer import Scorer
|
|
|
|
|
|
|
|
|
|
|
|
class Trainer(object):
|
2017-04-15 09:59:21 +00:00
|
|
|
"""
|
|
|
|
Manage training of an NLP pipeline.
|
|
|
|
"""
|
2016-10-09 10:24:24 +00:00
|
|
|
def __init__(self, nlp, gold_tuples):
|
|
|
|
self.nlp = nlp
|
|
|
|
self.gold_tuples = gold_tuples
|
2017-03-11 17:12:48 +00:00
|
|
|
self.nr_epoch = 0
|
2016-10-09 10:24:24 +00:00
|
|
|
|
2016-10-13 01:24:53 +00:00
|
|
|
def epochs(self, nr_epoch, augment_data=None, gold_preproc=False):
|
2016-11-25 14:55:33 +00:00
|
|
|
cached_golds = {}
|
|
|
|
def _epoch(indices):
|
2017-03-11 17:12:48 +00:00
|
|
|
for i in tqdm.tqdm(indices):
|
2016-11-25 14:55:33 +00:00
|
|
|
raw_text, paragraph_tuples = self.gold_tuples[i]
|
2016-10-13 01:24:53 +00:00
|
|
|
if gold_preproc:
|
|
|
|
raw_text = None
|
|
|
|
else:
|
|
|
|
paragraph_tuples = merge_sents(paragraph_tuples)
|
2016-11-25 14:55:33 +00:00
|
|
|
if augment_data is None:
|
|
|
|
docs = self.make_docs(raw_text, paragraph_tuples)
|
|
|
|
if i in cached_golds:
|
|
|
|
golds = cached_golds[i]
|
|
|
|
else:
|
|
|
|
golds = self.make_golds(docs, paragraph_tuples)
|
|
|
|
else:
|
2016-10-09 10:24:24 +00:00
|
|
|
raw_text, paragraph_tuples = augment_data(raw_text, paragraph_tuples)
|
2016-11-25 14:55:33 +00:00
|
|
|
docs = self.make_docs(raw_text, paragraph_tuples)
|
|
|
|
golds = self.make_golds(docs, paragraph_tuples)
|
2016-10-09 10:24:24 +00:00
|
|
|
for doc, gold in zip(docs, golds):
|
|
|
|
yield doc, gold
|
|
|
|
|
2016-11-25 14:55:33 +00:00
|
|
|
indices = list(range(len(self.gold_tuples)))
|
2016-10-09 10:24:24 +00:00
|
|
|
for itn in range(nr_epoch):
|
2016-11-25 14:55:33 +00:00
|
|
|
random.shuffle(indices)
|
|
|
|
yield _epoch(indices)
|
2017-03-11 17:12:48 +00:00
|
|
|
self.nr_epoch += 1
|
|
|
|
|
2017-05-15 19:46:08 +00:00
|
|
|
def update(self, docs, golds, drop=0.):
|
2016-10-15 21:53:46 +00:00
|
|
|
for process in self.nlp.pipeline:
|
2016-10-09 10:24:24 +00:00
|
|
|
if hasattr(process, 'update'):
|
2017-05-15 19:46:08 +00:00
|
|
|
loss = process.update(doc, gold, sgd=self.sgd, drop=drop,
|
|
|
|
itn=self.nr_epoch)
|
|
|
|
self.sgd.finish_update()
|
2017-03-16 16:59:43 +00:00
|
|
|
else:
|
|
|
|
process(doc)
|
2016-10-09 10:24:24 +00:00
|
|
|
return doc
|
|
|
|
|
2016-10-13 01:24:53 +00:00
|
|
|
def evaluate(self, dev_sents, gold_preproc=False):
|
2016-10-09 10:24:24 +00:00
|
|
|
scorer = Scorer()
|
|
|
|
for raw_text, paragraph_tuples in dev_sents:
|
2016-10-13 01:24:53 +00:00
|
|
|
if gold_preproc:
|
|
|
|
raw_text = None
|
|
|
|
else:
|
|
|
|
paragraph_tuples = merge_sents(paragraph_tuples)
|
2016-10-09 10:24:24 +00:00
|
|
|
docs = self.make_docs(raw_text, paragraph_tuples)
|
|
|
|
golds = self.make_golds(docs, paragraph_tuples)
|
|
|
|
for doc, gold in zip(docs, golds):
|
2016-11-25 14:55:33 +00:00
|
|
|
for process in self.nlp.pipeline:
|
2016-10-09 10:24:24 +00:00
|
|
|
process(doc)
|
|
|
|
scorer.score(doc, gold)
|
|
|
|
return scorer
|
|
|
|
|
|
|
|
def make_docs(self, raw_text, paragraph_tuples):
|
|
|
|
if raw_text is not None:
|
|
|
|
return [self.nlp.tokenizer(raw_text)]
|
|
|
|
else:
|
|
|
|
return [self.nlp.tokenizer.tokens_from_list(sent_tuples[0][1])
|
|
|
|
for sent_tuples in paragraph_tuples]
|
|
|
|
|
|
|
|
def make_golds(self, docs, paragraph_tuples):
|
|
|
|
if len(docs) == 1:
|
2016-11-25 14:55:33 +00:00
|
|
|
return [GoldParse.from_annot_tuples(docs[0], sent_tuples[0])
|
2016-10-09 10:24:24 +00:00
|
|
|
for sent_tuples in paragraph_tuples]
|
|
|
|
else:
|
2016-11-25 14:55:33 +00:00
|
|
|
return [GoldParse.from_annot_tuples(doc, sent_tuples[0])
|
2016-10-09 10:24:24 +00:00
|
|
|
for doc, sent_tuples in zip(docs, paragraph_tuples)]
|