2016-10-09 10:24:24 +00:00
|
|
|
from __future__ import absolute_import
|
|
|
|
from __future__ import unicode_literals
|
|
|
|
|
|
|
|
import random
|
|
|
|
from .gold import GoldParse
|
|
|
|
from .scorer import Scorer
|
2016-10-13 01:24:53 +00:00
|
|
|
from .gold import merge_sents
|
2016-10-09 10:24:24 +00:00
|
|
|
|
|
|
|
|
|
|
|
class Trainer(object):
|
2016-10-12 12:26:02 +00:00
|
|
|
'''Manage training of an NLP pipeline.'''
|
2016-10-09 10:24:24 +00:00
|
|
|
def __init__(self, nlp, gold_tuples):
|
|
|
|
self.nlp = nlp
|
|
|
|
self.gold_tuples = gold_tuples
|
|
|
|
|
2016-10-13 01:24:53 +00:00
|
|
|
def epochs(self, nr_epoch, augment_data=None, gold_preproc=False):
|
2016-11-25 14:55:33 +00:00
|
|
|
cached_golds = {}
|
|
|
|
def _epoch(indices):
|
|
|
|
for i in indices:
|
|
|
|
raw_text, paragraph_tuples = self.gold_tuples[i]
|
2016-10-13 01:24:53 +00:00
|
|
|
if gold_preproc:
|
|
|
|
raw_text = None
|
|
|
|
else:
|
|
|
|
paragraph_tuples = merge_sents(paragraph_tuples)
|
2016-11-25 14:55:33 +00:00
|
|
|
if augment_data is None:
|
|
|
|
docs = self.make_docs(raw_text, paragraph_tuples)
|
|
|
|
if i in cached_golds:
|
|
|
|
golds = cached_golds[i]
|
|
|
|
else:
|
|
|
|
golds = self.make_golds(docs, paragraph_tuples)
|
|
|
|
else:
|
2016-10-09 10:24:24 +00:00
|
|
|
raw_text, paragraph_tuples = augment_data(raw_text, paragraph_tuples)
|
2016-11-25 14:55:33 +00:00
|
|
|
docs = self.make_docs(raw_text, paragraph_tuples)
|
|
|
|
golds = self.make_golds(docs, paragraph_tuples)
|
2016-10-09 10:24:24 +00:00
|
|
|
for doc, gold in zip(docs, golds):
|
|
|
|
yield doc, gold
|
|
|
|
|
2016-11-25 14:55:33 +00:00
|
|
|
indices = list(range(len(self.gold_tuples)))
|
2016-10-09 10:24:24 +00:00
|
|
|
for itn in range(nr_epoch):
|
2016-11-25 14:55:33 +00:00
|
|
|
random.shuffle(indices)
|
|
|
|
yield _epoch(indices)
|
2016-10-09 10:24:24 +00:00
|
|
|
|
|
|
|
def update(self, doc, gold):
|
2016-10-15 21:53:46 +00:00
|
|
|
for process in self.nlp.pipeline:
|
2016-10-09 10:24:24 +00:00
|
|
|
if hasattr(process, 'update'):
|
|
|
|
process.update(doc, gold)
|
|
|
|
process(doc)
|
|
|
|
return doc
|
|
|
|
|
2016-10-13 01:24:53 +00:00
|
|
|
def evaluate(self, dev_sents, gold_preproc=False):
|
2016-10-09 10:24:24 +00:00
|
|
|
scorer = Scorer()
|
|
|
|
for raw_text, paragraph_tuples in dev_sents:
|
2016-10-13 01:24:53 +00:00
|
|
|
if gold_preproc:
|
|
|
|
raw_text = None
|
|
|
|
else:
|
|
|
|
paragraph_tuples = merge_sents(paragraph_tuples)
|
2016-10-09 10:24:24 +00:00
|
|
|
docs = self.make_docs(raw_text, paragraph_tuples)
|
|
|
|
golds = self.make_golds(docs, paragraph_tuples)
|
|
|
|
for doc, gold in zip(docs, golds):
|
2016-11-25 14:55:33 +00:00
|
|
|
for process in self.nlp.pipeline:
|
2016-10-09 10:24:24 +00:00
|
|
|
process(doc)
|
|
|
|
scorer.score(doc, gold)
|
|
|
|
return scorer
|
|
|
|
|
|
|
|
def make_docs(self, raw_text, paragraph_tuples):
|
|
|
|
if raw_text is not None:
|
|
|
|
return [self.nlp.tokenizer(raw_text)]
|
|
|
|
else:
|
|
|
|
return [self.nlp.tokenizer.tokens_from_list(sent_tuples[0][1])
|
|
|
|
for sent_tuples in paragraph_tuples]
|
|
|
|
|
|
|
|
def make_golds(self, docs, paragraph_tuples):
|
|
|
|
if len(docs) == 1:
|
2016-11-25 14:55:33 +00:00
|
|
|
return [GoldParse.from_annot_tuples(docs[0], sent_tuples[0])
|
2016-10-09 10:24:24 +00:00
|
|
|
for sent_tuples in paragraph_tuples]
|
|
|
|
else:
|
2016-11-25 14:55:33 +00:00
|
|
|
return [GoldParse.from_annot_tuples(doc, sent_tuples[0])
|
2016-10-09 10:24:24 +00:00
|
|
|
for doc, sent_tuples in zip(docs, paragraph_tuples)]
|