2015-01-09 17:53:26 +00:00
|
|
|
#!/usr/bin/env python
|
|
|
|
from __future__ import division
|
|
|
|
from __future__ import unicode_literals
|
|
|
|
|
|
|
|
import os
|
|
|
|
from os import path
|
|
|
|
import shutil
|
|
|
|
import codecs
|
|
|
|
import random
|
|
|
|
import time
|
|
|
|
import gzip
|
|
|
|
|
|
|
|
import plac
|
|
|
|
import cProfile
|
|
|
|
import pstats
|
|
|
|
|
|
|
|
import spacy.util
|
|
|
|
from spacy.en import English
|
|
|
|
from spacy.en.pos import POS_TEMPLATES, POS_TAGS, setup_model_dir
|
|
|
|
|
|
|
|
from spacy.syntax.parser import GreedyParser
|
2015-02-02 12:02:48 +00:00
|
|
|
from spacy.syntax.parser import OracleError
|
2015-01-09 17:53:26 +00:00
|
|
|
from spacy.syntax.util import Config
|
2015-03-09 05:46:53 +00:00
|
|
|
from spacy.syntax.conll import read_docparse_file
|
|
|
|
from spacy.syntax.conll import GoldParse
|
2015-01-09 17:53:26 +00:00
|
|
|
|
2015-03-10 17:00:23 +00:00
|
|
|
from spacy.scorer import Scorer
|
|
|
|
|
2015-01-09 17:53:26 +00:00
|
|
|
|
2015-01-30 05:36:24 +00:00
|
|
|
def is_punct_label(label):
|
|
|
|
return label == 'P' or label.lower() == 'punct'
|
|
|
|
|
|
|
|
|
2015-01-09 17:53:26 +00:00
|
|
|
def read_tokenized_gold(file_):
|
|
|
|
"""Read a standard CoNLL/MALT-style format"""
|
|
|
|
sents = []
|
|
|
|
for sent_str in file_.read().strip().split('\n\n'):
|
2015-01-28 17:21:13 +00:00
|
|
|
ids = []
|
2015-01-09 17:53:26 +00:00
|
|
|
words = []
|
|
|
|
heads = []
|
|
|
|
labels = []
|
|
|
|
tags = []
|
|
|
|
for i, line in enumerate(sent_str.split('\n')):
|
2015-02-02 12:02:48 +00:00
|
|
|
id_, word, pos_string, head_idx, label = _parse_line(line)
|
2015-01-09 17:53:26 +00:00
|
|
|
words.append(word)
|
|
|
|
if head_idx == -1:
|
|
|
|
head_idx = i
|
2015-01-28 17:21:13 +00:00
|
|
|
ids.append(id_)
|
2015-01-09 17:53:26 +00:00
|
|
|
heads.append(head_idx)
|
|
|
|
labels.append(label)
|
|
|
|
tags.append(pos_string)
|
2015-02-02 12:02:48 +00:00
|
|
|
text = ' '.join(words)
|
|
|
|
sents.append((text, [words], ids, words, tags, heads, labels))
|
2015-01-09 17:53:26 +00:00
|
|
|
return sents
|
|
|
|
|
|
|
|
|
|
|
|
def read_docparse_gold(file_):
|
2015-01-29 23:31:03 +00:00
|
|
|
paragraphs = []
|
2015-02-09 08:57:56 +00:00
|
|
|
for sent_str in file_.read().strip().split('\n\n'):
|
2015-02-02 12:02:48 +00:00
|
|
|
if not sent_str.strip():
|
|
|
|
continue
|
2015-01-09 17:53:26 +00:00
|
|
|
words = []
|
|
|
|
heads = []
|
|
|
|
labels = []
|
|
|
|
tags = []
|
2015-01-28 17:21:13 +00:00
|
|
|
ids = []
|
2015-01-09 17:53:26 +00:00
|
|
|
lines = sent_str.strip().split('\n')
|
2015-02-18 03:02:09 +00:00
|
|
|
raw_text = lines.pop(0).strip()
|
|
|
|
tok_text = lines.pop(0).strip()
|
2015-02-02 12:02:48 +00:00
|
|
|
for i, line in enumerate(lines):
|
2015-01-28 17:21:13 +00:00
|
|
|
id_, word, pos_string, head_idx, label = _parse_line(line)
|
|
|
|
if label == 'root':
|
|
|
|
label = 'ROOT'
|
2015-01-09 17:53:26 +00:00
|
|
|
words.append(word)
|
2015-01-28 17:21:13 +00:00
|
|
|
if head_idx < 0:
|
|
|
|
head_idx = id_
|
|
|
|
ids.append(id_)
|
2015-01-09 17:53:26 +00:00
|
|
|
heads.append(head_idx)
|
|
|
|
labels.append(label)
|
|
|
|
tags.append(pos_string)
|
2015-01-29 23:31:03 +00:00
|
|
|
tokenized = [sent_str.replace('<SEP>', ' ').split(' ')
|
|
|
|
for sent_str in tok_text.split('<SENT>')]
|
|
|
|
paragraphs.append((raw_text, tokenized, ids, words, tags, heads, labels))
|
|
|
|
return paragraphs
|
|
|
|
|
2015-01-09 17:53:26 +00:00
|
|
|
|
2015-01-28 17:21:13 +00:00
|
|
|
def _map_indices_to_tokens(ids, heads):
|
2015-01-29 23:31:03 +00:00
|
|
|
mapped = []
|
|
|
|
for head in heads:
|
|
|
|
if head not in ids:
|
|
|
|
mapped.append(None)
|
|
|
|
else:
|
|
|
|
mapped.append(ids.index(head))
|
|
|
|
return mapped
|
2015-01-28 17:21:13 +00:00
|
|
|
|
|
|
|
|
2015-01-09 17:53:26 +00:00
|
|
|
def _parse_line(line):
|
|
|
|
pieces = line.split()
|
|
|
|
if len(pieces) == 4:
|
2015-01-28 17:21:13 +00:00
|
|
|
return 0, pieces[0], pieces[1], int(pieces[2]) - 1, pieces[3]
|
2015-01-09 17:53:26 +00:00
|
|
|
else:
|
2015-01-28 17:21:13 +00:00
|
|
|
id_ = int(pieces[0])
|
2015-01-09 17:53:26 +00:00
|
|
|
word = pieces[1]
|
|
|
|
pos = pieces[3]
|
2015-01-28 17:21:13 +00:00
|
|
|
head_idx = int(pieces[6])
|
2015-01-09 17:53:26 +00:00
|
|
|
label = pieces[7]
|
2015-01-28 17:21:13 +00:00
|
|
|
return id_, word, pos, head_idx, label
|
2015-01-09 17:53:26 +00:00
|
|
|
|
2015-01-29 23:31:03 +00:00
|
|
|
|
2015-01-30 05:36:24 +00:00
|
|
|
loss = 0
|
2015-01-29 23:31:03 +00:00
|
|
|
def _align_annotations_to_non_gold_tokens(tokens, words, annot):
|
2015-01-30 05:36:24 +00:00
|
|
|
global loss
|
2015-01-29 23:31:03 +00:00
|
|
|
tags = []
|
|
|
|
heads = []
|
|
|
|
labels = []
|
2015-01-30 05:36:24 +00:00
|
|
|
orig_words = list(words)
|
|
|
|
missed = []
|
2015-01-29 23:31:03 +00:00
|
|
|
for token in tokens:
|
|
|
|
while annot and token.idx > annot[0][0]:
|
2015-01-30 05:36:24 +00:00
|
|
|
miss_id, miss_tag, miss_head, miss_label = annot.pop(0)
|
|
|
|
miss_w = words.pop(0)
|
|
|
|
if not is_punct_label(miss_label):
|
|
|
|
missed.append(miss_w)
|
|
|
|
loss += 1
|
2015-01-29 23:31:03 +00:00
|
|
|
if not annot:
|
|
|
|
tags.append(None)
|
|
|
|
heads.append(None)
|
|
|
|
labels.append(None)
|
|
|
|
continue
|
|
|
|
id_, tag, head, label = annot[0]
|
|
|
|
if token.idx == id_:
|
|
|
|
tags.append(tag)
|
|
|
|
heads.append(head)
|
|
|
|
labels.append(label)
|
|
|
|
annot.pop(0)
|
|
|
|
words.pop(0)
|
|
|
|
elif token.idx < id_:
|
|
|
|
tags.append(None)
|
|
|
|
heads.append(None)
|
|
|
|
labels.append(None)
|
|
|
|
else:
|
|
|
|
raise StandardError
|
2015-01-30 05:36:24 +00:00
|
|
|
#if missed:
|
|
|
|
# print orig_words
|
|
|
|
# print missed
|
|
|
|
# for t in tokens:
|
|
|
|
# print t.idx, t.orth_
|
2015-01-29 23:31:03 +00:00
|
|
|
return loss, tags, heads, labels
|
|
|
|
|
|
|
|
|
|
|
|
def iter_data(paragraphs, tokenizer, gold_preproc=False):
|
|
|
|
for raw, tokenized, ids, words, tags, heads, labels in paragraphs:
|
|
|
|
if not gold_preproc:
|
|
|
|
tokens = tokenizer(raw)
|
|
|
|
loss, tags, heads, labels = _align_annotations_to_non_gold_tokens(
|
2015-01-30 05:36:24 +00:00
|
|
|
tokens, list(words),
|
|
|
|
zip(ids, tags, heads, labels))
|
2015-01-29 23:31:03 +00:00
|
|
|
ids = [t.idx for t in tokens]
|
|
|
|
heads = _map_indices_to_tokens(ids, heads)
|
|
|
|
yield tokens, tags, heads, labels
|
|
|
|
else:
|
|
|
|
assert len(words) == len(heads)
|
|
|
|
for words in tokenized:
|
|
|
|
sent_ids = ids[:len(words)]
|
|
|
|
sent_tags = tags[:len(words)]
|
|
|
|
sent_heads = heads[:len(words)]
|
|
|
|
sent_labels = labels[:len(words)]
|
|
|
|
sent_heads = _map_indices_to_tokens(sent_ids, sent_heads)
|
|
|
|
tokens = tokenizer.tokens_from_list(words)
|
|
|
|
yield tokens, sent_tags, sent_heads, sent_labels
|
|
|
|
ids = ids[len(words):]
|
|
|
|
tags = tags[len(words):]
|
|
|
|
heads = heads[len(words):]
|
|
|
|
labels = labels[len(words):]
|
|
|
|
|
|
|
|
|
2015-01-09 17:53:26 +00:00
|
|
|
def get_labels(sents):
|
|
|
|
left_labels = set()
|
|
|
|
right_labels = set()
|
2015-01-29 23:31:03 +00:00
|
|
|
for raw, tokenized, ids, words, tags, heads, labels in sents:
|
2015-01-09 17:53:26 +00:00
|
|
|
for child, (head, label) in enumerate(zip(heads, labels)):
|
|
|
|
if head > child:
|
|
|
|
left_labels.add(label)
|
|
|
|
elif head < child:
|
|
|
|
right_labels.add(label)
|
|
|
|
return list(sorted(left_labels)), list(sorted(right_labels))
|
|
|
|
|
|
|
|
|
2015-03-09 05:46:53 +00:00
|
|
|
def train(Language, train_loc, model_dir, n_iter=15, feat_set=u'basic', seed=0,
|
|
|
|
gold_preproc=False, force_gold=False, n_sents=0):
|
2015-01-09 17:53:26 +00:00
|
|
|
dep_model_dir = path.join(model_dir, 'deps')
|
|
|
|
pos_model_dir = path.join(model_dir, 'pos')
|
2015-03-09 05:46:53 +00:00
|
|
|
ner_model_dir = path.join(model_dir, 'ner')
|
2015-01-09 17:53:26 +00:00
|
|
|
if path.exists(dep_model_dir):
|
|
|
|
shutil.rmtree(dep_model_dir)
|
|
|
|
if path.exists(pos_model_dir):
|
|
|
|
shutil.rmtree(pos_model_dir)
|
2015-03-09 05:46:53 +00:00
|
|
|
if path.exists(ner_model_dir):
|
|
|
|
shutil.rmtree(ner_model_dir)
|
2015-01-09 17:53:26 +00:00
|
|
|
os.mkdir(dep_model_dir)
|
|
|
|
os.mkdir(pos_model_dir)
|
2015-03-09 05:46:53 +00:00
|
|
|
os.mkdir(ner_model_dir)
|
|
|
|
|
|
|
|
setup_model_dir(sorted(POS_TAGS.keys()), POS_TAGS, POS_TEMPLATES, pos_model_dir)
|
|
|
|
|
|
|
|
gold_tuples = read_docparse_file(train_loc)
|
2015-01-09 17:53:26 +00:00
|
|
|
|
|
|
|
Config.write(dep_model_dir, 'config', features=feat_set, seed=seed,
|
2015-03-09 05:46:53 +00:00
|
|
|
labels=Language.ParserTransitionSystem.get_labels(gold_tuples))
|
2015-03-09 11:06:01 +00:00
|
|
|
Config.write(ner_model_dir, 'config', features='ner', seed=seed,
|
2015-03-09 05:46:53 +00:00
|
|
|
labels=Language.EntityTransitionSystem.get_labels(gold_tuples))
|
|
|
|
|
2015-03-10 17:00:23 +00:00
|
|
|
if n_sents > 0:
|
|
|
|
gold_tuples = gold_tuples[:n_sents]
|
2015-01-09 17:53:26 +00:00
|
|
|
nlp = Language()
|
2015-03-10 17:00:23 +00:00
|
|
|
|
|
|
|
print "Itn.\tUAS\tNER F.\tTag %"
|
2015-01-09 17:53:26 +00:00
|
|
|
for itn in range(n_iter):
|
2015-03-10 17:00:23 +00:00
|
|
|
scorer = Scorer()
|
2015-03-09 05:46:53 +00:00
|
|
|
for raw_text, segmented_text, annot_tuples in gold_tuples:
|
2015-03-08 05:17:12 +00:00
|
|
|
if gold_preproc:
|
2015-03-09 05:46:53 +00:00
|
|
|
sents = [nlp.tokenizer.tokens_from_list(s) for s in segmented_text]
|
2015-03-08 05:17:12 +00:00
|
|
|
else:
|
2015-03-09 05:46:53 +00:00
|
|
|
sents = [nlp.tokenizer(raw_text)]
|
|
|
|
for tokens in sents:
|
2015-03-09 11:06:01 +00:00
|
|
|
gold = GoldParse(tokens, annot_tuples)
|
2015-03-09 05:46:53 +00:00
|
|
|
nlp.tagger(tokens)
|
2015-03-14 15:09:55 +00:00
|
|
|
nlp.parser.train(tokens, gold, force_gold=force_gold)
|
|
|
|
#nlp.entity.train(tokens, gold, force_gold=force_gold)
|
2015-03-10 17:00:23 +00:00
|
|
|
nlp.tagger.train(tokens, gold.tags)
|
|
|
|
|
2015-03-14 15:09:55 +00:00
|
|
|
#nlp.entity(tokens)
|
2015-03-10 17:00:23 +00:00
|
|
|
nlp.parser(tokens)
|
|
|
|
scorer.score(tokens, gold, verbose=False)
|
|
|
|
print '%d:\t%.3f\t%.3f\t%.3f' % (itn, scorer.uas, scorer.ents_f, scorer.tags_acc)
|
2015-03-09 05:46:53 +00:00
|
|
|
random.shuffle(gold_tuples)
|
2015-01-09 17:53:26 +00:00
|
|
|
nlp.parser.model.end_training()
|
2015-03-10 17:00:23 +00:00
|
|
|
nlp.entity.model.end_training()
|
2015-01-09 17:53:26 +00:00
|
|
|
nlp.tagger.model.end_training()
|
|
|
|
|
|
|
|
|
2015-03-14 15:09:55 +00:00
|
|
|
def evaluate(Language, dev_loc, model_dir, gold_preproc=False, verbose=True):
|
2015-03-10 17:00:23 +00:00
|
|
|
assert not gold_preproc
|
2015-01-09 17:53:26 +00:00
|
|
|
nlp = Language()
|
2015-03-10 17:00:23 +00:00
|
|
|
gold_tuples = read_docparse_file(dev_loc)
|
|
|
|
scorer = Scorer()
|
2015-03-09 05:46:53 +00:00
|
|
|
for raw_text, segmented_text, annot_tuples in gold_tuples:
|
2015-03-10 17:00:23 +00:00
|
|
|
tokens = nlp(raw_text)
|
|
|
|
gold = GoldParse(tokens, annot_tuples)
|
2015-03-11 06:27:22 +00:00
|
|
|
scorer.score(tokens, gold, verbose=verbose)
|
2015-03-10 17:00:23 +00:00
|
|
|
return scorer
|
2015-03-08 05:17:12 +00:00
|
|
|
|
|
|
|
|
2015-02-23 19:05:04 +00:00
|
|
|
@plac.annotations(
|
|
|
|
train_loc=("Training file location",),
|
|
|
|
dev_loc=("Dev. file location",),
|
|
|
|
model_dir=("Location of output model directory",),
|
2015-03-14 15:09:55 +00:00
|
|
|
n_sents=("Number of training sentences", "option", "n", int),
|
|
|
|
verbose=("Verbose error reporting", "flag", "v", bool),
|
2015-02-23 19:05:04 +00:00
|
|
|
)
|
2015-03-14 15:09:55 +00:00
|
|
|
def main(train_loc, dev_loc, model_dir, n_sents=0, verbose=False):
|
2015-03-09 05:46:53 +00:00
|
|
|
train(English, train_loc, model_dir,
|
|
|
|
gold_preproc=False, force_gold=False, n_sents=n_sents)
|
2015-03-14 15:09:55 +00:00
|
|
|
scorer = evaluate(English, dev_loc, model_dir, gold_preproc=False, verbose=verbose)
|
2015-03-10 17:00:23 +00:00
|
|
|
print 'POS', scorer.tags_acc
|
|
|
|
print 'UAS', scorer.uas
|
|
|
|
print 'LAS', scorer.las
|
|
|
|
|
|
|
|
print 'NER P', scorer.ents_p
|
|
|
|
print 'NER R', scorer.ents_r
|
|
|
|
print 'NER F', scorer.ents_f
|
2015-01-09 17:53:26 +00:00
|
|
|
|
|
|
|
|
|
|
|
if __name__ == '__main__':
|
|
|
|
plac.call(main)
|