2015-01-09 17:53:26 +00:00
|
|
|
#!/usr/bin/env python
|
|
|
|
from __future__ import division
|
|
|
|
from __future__ import unicode_literals
|
2015-07-24 02:52:35 +00:00
|
|
|
from __future__ import print_function
|
2015-01-09 17:53:26 +00:00
|
|
|
|
|
|
|
import os
|
|
|
|
from os import path
|
|
|
|
import shutil
|
2015-09-30 18:16:52 +00:00
|
|
|
import io
|
2015-01-09 17:53:26 +00:00
|
|
|
import random
|
|
|
|
|
|
|
|
import plac
|
2015-05-23 15:21:25 +00:00
|
|
|
import re
|
2015-01-09 17:53:26 +00:00
|
|
|
|
|
|
|
import spacy.util
|
|
|
|
|
|
|
|
from spacy.syntax.util import Config
|
2015-05-24 19:35:02 +00:00
|
|
|
from spacy.gold import read_json_file
|
|
|
|
from spacy.gold import GoldParse
|
2015-01-09 17:53:26 +00:00
|
|
|
|
2015-03-10 17:00:23 +00:00
|
|
|
from spacy.scorer import Scorer
|
|
|
|
|
2015-09-06 15:51:48 +00:00
|
|
|
from spacy.syntax.arc_eager import ArcEager
|
|
|
|
from spacy.syntax.ner import BiluoPushDown
|
|
|
|
from spacy.tagger import Tagger
|
|
|
|
from spacy.syntax.parser import Parser
|
2016-03-03 14:21:00 +00:00
|
|
|
from spacy.syntax.nonproj import PseudoProjectivity
|
2015-09-06 15:51:48 +00:00
|
|
|
|
2015-01-09 17:53:26 +00:00
|
|
|
|
2015-06-05 17:33:32 +00:00
|
|
|
def _corrupt(c, noise_level):
|
2015-05-24 00:50:14 +00:00
|
|
|
if random.random() >= noise_level:
|
|
|
|
return c
|
|
|
|
elif c == ' ':
|
|
|
|
return '\n'
|
|
|
|
elif c == '\n':
|
|
|
|
return ' '
|
|
|
|
elif c in ['.', "'", "!", "?"]:
|
|
|
|
return ''
|
|
|
|
else:
|
|
|
|
return c.lower()
|
|
|
|
|
|
|
|
|
2015-06-05 17:33:32 +00:00
|
|
|
def add_noise(orig, noise_level):
|
|
|
|
if random.random() >= noise_level:
|
|
|
|
return orig
|
|
|
|
elif type(orig) == list:
|
|
|
|
corrupted = [_corrupt(word, noise_level) for word in orig]
|
|
|
|
corrupted = [w for w in corrupted if w]
|
|
|
|
return corrupted
|
|
|
|
else:
|
|
|
|
return ''.join(_corrupt(c, noise_level) for c in orig)
|
|
|
|
|
|
|
|
|
2015-06-14 15:45:31 +00:00
|
|
|
def score_model(scorer, nlp, raw_text, annot_tuples, verbose=False):
|
2015-05-27 17:14:02 +00:00
|
|
|
if raw_text is None:
|
|
|
|
tokens = nlp.tokenizer.tokens_from_list(annot_tuples[1])
|
|
|
|
else:
|
2015-05-31 04:49:06 +00:00
|
|
|
tokens = nlp.tokenizer(raw_text)
|
2015-06-05 13:50:24 +00:00
|
|
|
nlp.tagger(tokens)
|
2015-05-30 23:11:11 +00:00
|
|
|
nlp.entity(tokens)
|
|
|
|
nlp.parser(tokens)
|
2015-05-27 17:14:02 +00:00
|
|
|
gold = GoldParse(tokens, annot_tuples)
|
2015-06-14 15:45:31 +00:00
|
|
|
scorer.score(tokens, gold, verbose=verbose)
|
2015-05-27 17:14:02 +00:00
|
|
|
|
|
|
|
|
2015-05-30 03:23:02 +00:00
|
|
|
def _merge_sents(sents):
|
|
|
|
m_deps = [[], [], [], [], [], []]
|
|
|
|
m_brackets = []
|
|
|
|
i = 0
|
|
|
|
for (ids, words, tags, heads, labels, ner), brackets in sents:
|
|
|
|
m_deps[0].extend(id_ + i for id_ in ids)
|
|
|
|
m_deps[1].extend(words)
|
|
|
|
m_deps[2].extend(tags)
|
|
|
|
m_deps[3].extend(head + i for head in heads)
|
|
|
|
m_deps[4].extend(labels)
|
|
|
|
m_deps[5].extend(ner)
|
|
|
|
m_brackets.extend((b['first'] + i, b['last'] + i, b['label']) for b in brackets)
|
|
|
|
i += len(ids)
|
|
|
|
return [(m_deps, m_brackets)]
|
|
|
|
|
2015-05-30 23:11:11 +00:00
|
|
|
|
2016-10-09 10:24:24 +00:00
|
|
|
def train(Language, train_data, dev_data, model_dir, tagger_cfg, parser_cfg, entity_cfg,
|
2016-10-04 23:11:46 +00:00
|
|
|
n_iter=15, seed=0, gold_preproc=False, n_sents=0, corruption_level=0):
|
2015-07-24 02:52:35 +00:00
|
|
|
print("Itn.\tP.Loss\tUAS\tNER F.\tTag %\tToken %")
|
2016-10-09 10:24:24 +00:00
|
|
|
format_str = '{:d}\t{:d}\t{uas:.3f}\t{ents_f:.3f}\t{tags_acc:.3f}\t{token_acc:.3f}'
|
|
|
|
with Language.train(model_dir, train_data,
|
|
|
|
tagger_cfg, parser_cfg, entity_cfg) as trainer:
|
2015-05-24 19:35:02 +00:00
|
|
|
loss = 0
|
2016-10-09 10:24:24 +00:00
|
|
|
for itn, epoch in enumerate(trainer.epochs(n_iter, augment_data=None)):
|
|
|
|
for doc, gold in epoch:
|
|
|
|
trainer.update(doc, gold)
|
|
|
|
dev_scores = trainer.evaluate(dev_data)
|
|
|
|
print(format_str.format(itn, loss, **dev_scores.scores))
|
2015-01-09 17:53:26 +00:00
|
|
|
|
2015-10-05 23:35:22 +00:00
|
|
|
|
2015-06-05 21:49:26 +00:00
|
|
|
def evaluate(Language, gold_tuples, model_dir, gold_preproc=False, verbose=False,
|
2015-10-05 23:35:22 +00:00
|
|
|
beam_width=None, cand_preproc=None):
|
2016-10-09 10:24:24 +00:00
|
|
|
nlp = Language(path=model_dir)
|
2016-03-03 14:21:00 +00:00
|
|
|
if nlp.lang == 'de':
|
|
|
|
nlp.vocab.morphology.lemmatizer = lambda string,pos: set([string])
|
2015-06-05 21:49:26 +00:00
|
|
|
if beam_width is not None:
|
|
|
|
nlp.parser.cfg.beam_width = beam_width
|
2015-03-10 17:00:23 +00:00
|
|
|
scorer = Scorer()
|
2015-05-29 23:25:46 +00:00
|
|
|
for raw_text, sents in gold_tuples:
|
2015-05-30 03:23:02 +00:00
|
|
|
if gold_preproc:
|
|
|
|
raw_text = None
|
|
|
|
else:
|
|
|
|
sents = _merge_sents(sents)
|
2015-05-29 23:25:46 +00:00
|
|
|
for annot_tuples, brackets in sents:
|
2015-05-30 03:23:02 +00:00
|
|
|
if raw_text is None:
|
2015-05-29 23:25:46 +00:00
|
|
|
tokens = nlp.tokenizer.tokens_from_list(annot_tuples[1])
|
|
|
|
nlp.tagger(tokens)
|
|
|
|
nlp.parser(tokens)
|
2015-11-11 17:43:52 +00:00
|
|
|
nlp.entity(tokens)
|
2015-05-29 23:25:46 +00:00
|
|
|
else:
|
2015-10-05 23:35:22 +00:00
|
|
|
tokens = nlp(raw_text)
|
2015-05-29 23:25:46 +00:00
|
|
|
gold = GoldParse(tokens, annot_tuples)
|
|
|
|
scorer.score(tokens, gold, verbose=verbose)
|
2015-03-10 17:00:23 +00:00
|
|
|
return scorer
|
2015-03-08 05:17:12 +00:00
|
|
|
|
|
|
|
|
2015-10-05 23:35:22 +00:00
|
|
|
def write_parses(Language, dev_loc, model_dir, out_loc):
|
2015-06-07 17:08:48 +00:00
|
|
|
nlp = Language(data_dir=model_dir)
|
|
|
|
gold_tuples = read_json_file(dev_loc)
|
2015-03-20 00:14:20 +00:00
|
|
|
scorer = Scorer()
|
2015-10-10 03:13:01 +00:00
|
|
|
out_file = io.open(out_loc, 'w', 'utf8')
|
2015-06-07 17:08:48 +00:00
|
|
|
for raw_text, sents in gold_tuples:
|
|
|
|
sents = _merge_sents(sents)
|
|
|
|
for annot_tuples, brackets in sents:
|
|
|
|
if raw_text is None:
|
|
|
|
tokens = nlp.tokenizer.tokens_from_list(annot_tuples[1])
|
|
|
|
nlp.tagger(tokens)
|
2015-06-16 21:36:54 +00:00
|
|
|
nlp.entity(tokens)
|
2015-06-07 17:08:48 +00:00
|
|
|
nlp.parser(tokens)
|
|
|
|
else:
|
2015-10-05 23:35:22 +00:00
|
|
|
tokens = nlp(raw_text)
|
|
|
|
#gold = GoldParse(tokens, annot_tuples)
|
|
|
|
#scorer.score(tokens, gold, verbose=False)
|
|
|
|
for sent in tokens.sents:
|
|
|
|
for t in sent:
|
|
|
|
if not t.is_space:
|
|
|
|
out_file.write(
|
|
|
|
'%d\t%s\t%s\t%s\t%s\n' % (t.i, t.orth_, t.tag_, t.head.orth_, t.dep_)
|
|
|
|
)
|
|
|
|
out_file.write('\n')
|
2015-03-20 00:14:20 +00:00
|
|
|
|
|
|
|
|
2015-02-23 19:05:04 +00:00
|
|
|
@plac.annotations(
|
2016-04-24 16:44:24 +00:00
|
|
|
language=("The language to train", "positional", None, str, ['en','de', 'zh']),
|
2015-05-29 01:52:55 +00:00
|
|
|
train_loc=("Location of training file or directory"),
|
|
|
|
dev_loc=("Location of development file or directory"),
|
2015-06-05 21:49:26 +00:00
|
|
|
model_dir=("Location of output model directory",),
|
|
|
|
eval_only=("Skip training, and only evaluate", "flag", "e", bool),
|
2015-05-24 00:50:14 +00:00
|
|
|
corruption_level=("Amount of noise to add to training data", "option", "c", float),
|
2015-05-29 23:25:46 +00:00
|
|
|
gold_preproc=("Use gold-standard sentence boundaries in training?", "flag", "g", bool),
|
2015-03-20 00:14:20 +00:00
|
|
|
out_loc=("Out location", "option", "o", str),
|
2015-03-14 15:09:55 +00:00
|
|
|
n_sents=("Number of training sentences", "option", "n", int),
|
2015-05-24 19:35:02 +00:00
|
|
|
n_iter=("Number of training iterations", "option", "i", int),
|
2015-03-14 15:09:55 +00:00
|
|
|
verbose=("Verbose error reporting", "flag", "v", bool),
|
2015-06-14 18:28:14 +00:00
|
|
|
debug=("Debug mode", "flag", "d", bool),
|
2016-03-03 14:21:00 +00:00
|
|
|
pseudoprojective=("Use pseudo-projective parsing", "flag", "p", bool),
|
2015-02-23 19:05:04 +00:00
|
|
|
)
|
2016-03-03 14:21:00 +00:00
|
|
|
def main(language, train_loc, dev_loc, model_dir, n_sents=0, n_iter=15, out_loc="", verbose=False,
|
|
|
|
debug=False, corruption_level=0.0, gold_preproc=False, eval_only=False, pseudoprojective=False):
|
2016-10-04 23:11:46 +00:00
|
|
|
parser_cfg = dict(locals())
|
|
|
|
tagger_cfg = dict(locals())
|
|
|
|
entity_cfg = dict(locals())
|
|
|
|
|
2016-04-24 16:44:24 +00:00
|
|
|
lang = spacy.util.get_lang_class(language)
|
2016-10-09 10:24:24 +00:00
|
|
|
|
|
|
|
parser_cfg['features'] = lang.Defaults.parser_features
|
|
|
|
entity_cfg['features'] = lang.Defaults.entity_features
|
2016-03-03 14:21:00 +00:00
|
|
|
|
2015-06-05 21:49:26 +00:00
|
|
|
if not eval_only:
|
|
|
|
gold_train = list(read_json_file(train_loc))
|
2016-10-09 10:24:24 +00:00
|
|
|
gold_dev = list(read_json_file(dev_loc))
|
|
|
|
train(lang, gold_train, gold_dev, model_dir, tagger_cfg, parser_cfg, entity_cfg,
|
2016-10-04 23:11:46 +00:00
|
|
|
n_sents=n_sents, gold_preproc=gold_preproc, corruption_level=corruption_level,
|
|
|
|
n_iter=n_iter)
|
2015-10-05 23:35:22 +00:00
|
|
|
if out_loc:
|
2016-03-03 14:21:00 +00:00
|
|
|
write_parses(lang, dev_loc, model_dir, out_loc)
|
|
|
|
scorer = evaluate(lang, list(read_json_file(dev_loc)),
|
2015-07-17 20:38:05 +00:00
|
|
|
model_dir, gold_preproc=gold_preproc, verbose=verbose)
|
2015-07-24 02:52:35 +00:00
|
|
|
print('TOK', scorer.token_acc)
|
|
|
|
print('POS', scorer.tags_acc)
|
|
|
|
print('UAS', scorer.uas)
|
|
|
|
print('LAS', scorer.las)
|
|
|
|
|
|
|
|
print('NER P', scorer.ents_p)
|
|
|
|
print('NER R', scorer.ents_r)
|
|
|
|
print('NER F', scorer.ents_f)
|
2015-04-19 08:31:31 +00:00
|
|
|
|
2015-01-09 17:53:26 +00:00
|
|
|
|
|
|
|
if __name__ == '__main__':
|
|
|
|
plac.call(main)
|