2018-11-30 19:16:14 +00:00
|
|
|
# flake8: noqa
|
|
|
|
"""Train for CONLL 2017 UD treebank evaluation. Takes .conllu files, writes
|
2018-03-10 22:41:55 +00:00
|
|
|
.conllu format for development data, allowing the official scorer to be used.
|
2018-11-30 19:16:14 +00:00
|
|
|
"""
|
2018-03-10 22:41:55 +00:00
|
|
|
from __future__ import unicode_literals
|
2018-11-30 19:16:14 +00:00
|
|
|
|
2018-03-10 22:41:55 +00:00
|
|
|
import plac
|
|
|
|
from pathlib import Path
|
|
|
|
import re
|
|
|
|
import sys
|
|
|
|
import json
|
|
|
|
|
|
|
|
import spacy
|
|
|
|
import spacy.util
|
2019-03-20 00:19:34 +00:00
|
|
|
from spacy.tokens import Token, Doc
|
|
|
|
from spacy.gold import GoldParse
|
|
|
|
from spacy.util import compounding, minibatch, minibatch_by_words
|
|
|
|
from spacy.syntax.nonproj import projectivize
|
|
|
|
from spacy.matcher import Matcher
|
|
|
|
from spacy import displacy
|
2018-03-10 22:41:55 +00:00
|
|
|
from collections import defaultdict, Counter
|
|
|
|
from timeit import default_timer as timer
|
|
|
|
|
|
|
|
import itertools
|
|
|
|
import random
|
|
|
|
import numpy.random
|
|
|
|
|
|
|
|
from . import conll17_ud_eval
|
|
|
|
|
2019-03-20 00:19:34 +00:00
|
|
|
from spacy import lang
|
|
|
|
from spacy.lang import zh
|
|
|
|
from spacy.lang import ja
|
2018-03-10 22:41:55 +00:00
|
|
|
|
2018-09-13 22:54:59 +00:00
|
|
|
try:
|
|
|
|
import torch
|
|
|
|
except ImportError:
|
|
|
|
torch = None
|
|
|
|
|
2018-03-10 22:41:55 +00:00
|
|
|
|
|
|
|
################
|
|
|
|
# Data reading #
|
|
|
|
################
|
|
|
|
|
2018-11-30 19:16:14 +00:00
|
|
|
space_re = re.compile("\s+")
|
2018-03-10 22:41:55 +00:00
|
|
|
|
2018-11-30 19:16:14 +00:00
|
|
|
|
|
|
|
def split_text(text):
|
|
|
|
return [space_re.sub(" ", par.strip()) for par in text.split("\n\n")]
|
|
|
|
|
|
|
|
|
|
|
|
def read_data(
|
|
|
|
nlp,
|
|
|
|
conllu_file,
|
|
|
|
text_file,
|
|
|
|
raw_text=True,
|
|
|
|
oracle_segments=False,
|
|
|
|
max_doc_length=None,
|
|
|
|
limit=None,
|
|
|
|
):
|
|
|
|
"""Read the CONLLU format into (Doc, GoldParse) tuples. If raw_text=True,
|
2018-03-10 22:41:55 +00:00
|
|
|
include Doc objects created using nlp.make_doc and then aligned against
|
|
|
|
the gold-standard sequences. If oracle_segments=True, include Doc objects
|
2018-11-30 19:16:14 +00:00
|
|
|
created from the gold-standard segments. At least one must be True."""
|
2018-03-10 22:41:55 +00:00
|
|
|
if not raw_text and not oracle_segments:
|
|
|
|
raise ValueError("At least one of raw_text or oracle_segments must be True")
|
|
|
|
paragraphs = split_text(text_file.read())
|
|
|
|
conllu = read_conllu(conllu_file)
|
|
|
|
# sd is spacy doc; cd is conllu doc
|
|
|
|
# cs is conllu sent, ct is conllu token
|
|
|
|
docs = []
|
|
|
|
golds = []
|
|
|
|
for doc_id, (text, cd) in enumerate(zip(paragraphs, conllu)):
|
|
|
|
sent_annots = []
|
|
|
|
for cs in cd:
|
|
|
|
sent = defaultdict(list)
|
|
|
|
for id_, word, lemma, pos, tag, morph, head, dep, _, space_after in cs:
|
2018-11-30 19:16:14 +00:00
|
|
|
if "." in id_:
|
2018-03-10 22:41:55 +00:00
|
|
|
continue
|
2018-11-30 19:16:14 +00:00
|
|
|
if "-" in id_:
|
2018-03-10 22:41:55 +00:00
|
|
|
continue
|
2018-11-30 19:16:14 +00:00
|
|
|
id_ = int(id_) - 1
|
|
|
|
head = int(head) - 1 if head != "0" else id_
|
|
|
|
sent["words"].append(word)
|
|
|
|
sent["tags"].append(tag)
|
2019-03-06 23:14:43 +00:00
|
|
|
sent["morphology"].append(_parse_morph_string(morph))
|
2019-03-09 11:50:50 +00:00
|
|
|
sent["morphology"][-1].add("POS_%s" % pos)
|
2018-11-30 19:16:14 +00:00
|
|
|
sent["heads"].append(head)
|
|
|
|
sent["deps"].append("ROOT" if dep == "root" else dep)
|
|
|
|
sent["spaces"].append(space_after == "_")
|
|
|
|
sent["entities"] = ["-"] * len(sent["words"])
|
|
|
|
sent["heads"], sent["deps"] = projectivize(sent["heads"], sent["deps"])
|
2018-03-10 22:41:55 +00:00
|
|
|
if oracle_segments:
|
2018-11-30 19:16:14 +00:00
|
|
|
docs.append(Doc(nlp.vocab, words=sent["words"], spaces=sent["spaces"]))
|
2018-03-10 22:41:55 +00:00
|
|
|
golds.append(GoldParse(docs[-1], **sent))
|
2018-09-26 19:02:42 +00:00
|
|
|
assert golds[-1].morphology is not None
|
2018-03-10 22:41:55 +00:00
|
|
|
|
|
|
|
sent_annots.append(sent)
|
|
|
|
if raw_text and max_doc_length and len(sent_annots) >= max_doc_length:
|
|
|
|
doc, gold = _make_gold(nlp, None, sent_annots)
|
2018-09-26 19:02:42 +00:00
|
|
|
assert gold.morphology is not None
|
2018-03-10 22:41:55 +00:00
|
|
|
sent_annots = []
|
|
|
|
docs.append(doc)
|
|
|
|
golds.append(gold)
|
|
|
|
if limit and len(docs) >= limit:
|
|
|
|
return docs, golds
|
|
|
|
|
|
|
|
if raw_text and sent_annots:
|
|
|
|
doc, gold = _make_gold(nlp, None, sent_annots)
|
|
|
|
docs.append(doc)
|
|
|
|
golds.append(gold)
|
|
|
|
if limit and len(docs) >= limit:
|
|
|
|
return docs, golds
|
|
|
|
return docs, golds
|
|
|
|
|
2018-09-25 19:32:24 +00:00
|
|
|
def _parse_morph_string(morph_string):
|
|
|
|
if morph_string == '_':
|
2018-09-26 19:02:42 +00:00
|
|
|
return set()
|
2018-09-25 19:32:24 +00:00
|
|
|
output = []
|
|
|
|
replacements = {'1': 'one', '2': 'two', '3': 'three'}
|
|
|
|
for feature in morph_string.split('|'):
|
|
|
|
key, value = feature.split('=')
|
|
|
|
value = replacements.get(value, value)
|
2018-09-26 19:02:42 +00:00
|
|
|
value = value.split(',')[0]
|
2018-09-25 19:32:24 +00:00
|
|
|
output.append('%s_%s' % (key, value.lower()))
|
|
|
|
return set(output)
|
2018-03-10 22:41:55 +00:00
|
|
|
|
|
|
|
def read_conllu(file_):
|
|
|
|
docs = []
|
|
|
|
sent = []
|
|
|
|
doc = []
|
|
|
|
for line in file_:
|
2018-11-30 19:16:14 +00:00
|
|
|
if line.startswith("# newdoc"):
|
2018-03-10 22:41:55 +00:00
|
|
|
if doc:
|
|
|
|
docs.append(doc)
|
|
|
|
doc = []
|
2018-11-30 19:16:14 +00:00
|
|
|
elif line.startswith("#"):
|
2018-03-10 22:41:55 +00:00
|
|
|
continue
|
|
|
|
elif not line.strip():
|
|
|
|
if sent:
|
|
|
|
doc.append(sent)
|
|
|
|
sent = []
|
|
|
|
else:
|
2018-11-30 19:16:14 +00:00
|
|
|
sent.append(list(line.strip().split("\t")))
|
2018-03-10 22:41:55 +00:00
|
|
|
if len(sent[-1]) != 10:
|
|
|
|
print(repr(line))
|
|
|
|
raise ValueError
|
|
|
|
if sent:
|
|
|
|
doc.append(sent)
|
|
|
|
if doc:
|
|
|
|
docs.append(doc)
|
|
|
|
return docs
|
|
|
|
|
|
|
|
|
2018-05-07 13:52:47 +00:00
|
|
|
def _make_gold(nlp, text, sent_annots, drop_deps=0.0):
|
2018-03-10 22:41:55 +00:00
|
|
|
# Flatten the conll annotations, and adjust the head indices
|
|
|
|
flat = defaultdict(list)
|
2018-05-07 13:52:47 +00:00
|
|
|
sent_starts = []
|
2018-03-10 22:41:55 +00:00
|
|
|
for sent in sent_annots:
|
2019-03-07 00:31:23 +00:00
|
|
|
flat["heads"].extend(len(flat["words"])+head for head in sent["heads"])
|
|
|
|
for field in ["words", "tags", "deps", "morphology", "entities", "spaces"]:
|
2018-03-10 22:41:55 +00:00
|
|
|
flat[field].extend(sent[field])
|
2018-05-07 13:52:47 +00:00
|
|
|
sent_starts.append(True)
|
2018-11-30 19:16:14 +00:00
|
|
|
sent_starts.extend([False] * (len(sent["words"]) - 1))
|
2018-03-10 22:41:55 +00:00
|
|
|
# Construct text if necessary
|
2018-11-30 19:16:14 +00:00
|
|
|
assert len(flat["words"]) == len(flat["spaces"])
|
2018-03-10 22:41:55 +00:00
|
|
|
if text is None:
|
2018-11-30 19:16:14 +00:00
|
|
|
text = "".join(
|
|
|
|
word + " " * space for word, space in zip(flat["words"], flat["spaces"])
|
|
|
|
)
|
2018-03-10 22:41:55 +00:00
|
|
|
doc = nlp.make_doc(text)
|
2018-11-30 19:16:14 +00:00
|
|
|
flat.pop("spaces")
|
2018-03-10 22:41:55 +00:00
|
|
|
gold = GoldParse(doc, **flat)
|
2018-05-07 13:52:47 +00:00
|
|
|
gold.sent_starts = sent_starts
|
|
|
|
for i in range(len(gold.heads)):
|
|
|
|
if random.random() < drop_deps:
|
|
|
|
gold.heads[i] = None
|
|
|
|
gold.labels[i] = None
|
|
|
|
|
2018-03-10 22:41:55 +00:00
|
|
|
return doc, gold
|
|
|
|
|
2018-11-30 19:16:14 +00:00
|
|
|
|
2018-03-10 22:41:55 +00:00
|
|
|
#############################
|
|
|
|
# Data transforms for spaCy #
|
|
|
|
#############################
|
|
|
|
|
2018-11-30 19:16:14 +00:00
|
|
|
|
2018-03-10 22:41:55 +00:00
|
|
|
def golds_to_gold_tuples(docs, golds):
|
2018-11-30 19:16:14 +00:00
|
|
|
"""Get out the annoying 'tuples' format used by begin_training, given the
|
|
|
|
GoldParse objects."""
|
2018-03-10 22:41:55 +00:00
|
|
|
tuples = []
|
|
|
|
for doc, gold in zip(docs, golds):
|
|
|
|
text = doc.text
|
|
|
|
ids, words, tags, heads, labels, iob = zip(*gold.orig_annot)
|
|
|
|
sents = [((ids, words, tags, heads, labels, iob), [])]
|
|
|
|
tuples.append((text, sents))
|
|
|
|
return tuples
|
|
|
|
|
|
|
|
|
|
|
|
##############
|
|
|
|
# Evaluation #
|
|
|
|
##############
|
|
|
|
|
2018-11-30 19:16:14 +00:00
|
|
|
|
2018-03-10 22:41:55 +00:00
|
|
|
def evaluate(nlp, text_loc, gold_loc, sys_loc, limit=None):
|
2018-11-30 19:16:14 +00:00
|
|
|
if text_loc.parts[-1].endswith(".conllu"):
|
2018-05-08 11:47:45 +00:00
|
|
|
docs = []
|
|
|
|
with text_loc.open() as file_:
|
|
|
|
for conllu_doc in read_conllu(file_):
|
|
|
|
for conllu_sent in conllu_doc:
|
|
|
|
words = [line[1] for line in conllu_sent]
|
|
|
|
docs.append(Doc(nlp.vocab, words=words))
|
|
|
|
for name, component in nlp.pipeline:
|
|
|
|
docs = list(component.pipe(docs))
|
|
|
|
else:
|
2018-11-30 19:16:14 +00:00
|
|
|
with text_loc.open("r", encoding="utf8") as text_file:
|
2018-05-08 11:47:45 +00:00
|
|
|
texts = split_text(text_file.read())
|
|
|
|
docs = list(nlp.pipe(texts))
|
2018-11-30 19:16:14 +00:00
|
|
|
with sys_loc.open("w", encoding="utf8") as out_file:
|
2018-03-10 22:41:55 +00:00
|
|
|
write_conllu(docs, out_file)
|
2018-11-30 19:16:14 +00:00
|
|
|
with gold_loc.open("r", encoding="utf8") as gold_file:
|
2018-03-10 22:41:55 +00:00
|
|
|
gold_ud = conll17_ud_eval.load_conllu(gold_file)
|
2018-11-30 19:16:14 +00:00
|
|
|
with sys_loc.open("r", encoding="utf8") as sys_file:
|
2018-03-10 22:41:55 +00:00
|
|
|
sys_ud = conll17_ud_eval.load_conllu(sys_file)
|
|
|
|
scores = conll17_ud_eval.evaluate(gold_ud, sys_ud)
|
Improve label management in parser and NER (#2108)
This patch does a few smallish things that tighten up the training workflow a little, and allow memory use during training to be reduced by letting the GoldCorpus stream data properly.
Previously, the parser and entity recognizer read and saved labels as lists, with extra labels noted separately. Lists were used becaue ordering is very important, to ensure that the label-to-class mapping is stable.
We now manage labels as nested dictionaries, first keyed by the action, and then keyed by the label. Values are frequencies. The trick is, how do we save new labels? We need to make sure we iterate over these in the same order they're added. Otherwise, we'll get different class IDs, and the model's predictions won't make sense.
To allow stable sorting, we map the new labels to negative values. If we have two new labels, they'll be noted as having "frequency" -1 and -2. The next new label will then have "frequency" -3. When we sort by (frequency, label), we then get a stable sort.
Storing frequencies then allows us to make the next nice improvement. Previously we had to iterate over the whole training set, to pre-process it for the deprojectivisation. This led to storing the whole training set in memory. This was most of the required memory during training.
To prevent this, we now store the frequencies as we stream in the data, and deprojectivize as we go. Once we've built the frequencies, we can then apply a frequency cut-off when we decide how many classes to make.
Finally, to allow proper data streaming, we also have to have some way of shuffling the iterator. This is awkward if the training files have multiple documents in them. To solve this, the GoldCorpus class now writes the training data to disk in msgpack files, one per document. We can then shuffle the data by shuffling the paths.
This is a squash merge, as I made a lot of very small commits. Individual commit messages below.
* Simplify label management for TransitionSystem and its subclasses
* Fix serialization for new label handling format in parser
* Simplify and improve GoldCorpus class. Reduce memory use, write to temp dir
* Set actions in transition system
* Require thinc 6.11.1.dev4
* Fix error in parser init
* Add unicode declaration
* Fix unicode declaration
* Update textcat test
* Try to get model training on less memory
* Print json loc for now
* Try rapidjson to reduce memory use
* Remove rapidjson requirement
* Try rapidjson for reduced mem usage
* Handle None heads when projectivising
* Stream json docs
* Fix train script
* Handle projectivity in GoldParse
* Fix projectivity handling
* Add minibatch_by_words util from ud_train
* Minibatch by number of words in spacy.cli.train
* Move minibatch_by_words util to spacy.util
* Fix label handling
* More hacking at label management in parser
* Fix encoding in msgpack serialization in GoldParse
* Adjust batch sizes in parser training
* Fix minibatch_by_words
* Add merge_subtokens function to pipeline.pyx
* Register merge_subtokens factory
* Restore use of msgpack tmp directory
* Use minibatch-by-words in train
* Handle retokenization in scorer
* Change back-off approach for missing labels. Use 'dep' label
* Update NER for new label management
* Set NER tags for over-segmented words
* Fix label alignment in gold
* Fix label back-off for infrequent labels
* Fix int type in labels dict key
* Fix int type in labels dict key
* Update feature definition for 8 feature set
* Update ud-train script for new label stuff
* Fix json streamer
* Print the line number if conll eval fails
* Update children and sentence boundaries after deprojectivisation
* Export set_children_from_heads from doc.pxd
* Render parses during UD training
* Remove print statement
* Require thinc 6.11.1.dev6. Try adding wheel as install_requires
* Set different dev version, to flush pip cache
* Update thinc version
* Update GoldCorpus docs
* Remove print statements
* Fix formatting and links [ci skip]
2018-03-19 01:58:08 +00:00
|
|
|
return docs, scores
|
2018-03-10 22:41:55 +00:00
|
|
|
|
|
|
|
|
|
|
|
def write_conllu(docs, file_):
|
|
|
|
merger = Matcher(docs[0].vocab)
|
2018-11-30 19:16:14 +00:00
|
|
|
merger.add("SUBTOK", None, [{"DEP": "subtok", "op": "+"}])
|
2018-03-10 22:41:55 +00:00
|
|
|
for i, doc in enumerate(docs):
|
|
|
|
matches = merger(doc)
|
2018-11-30 19:16:14 +00:00
|
|
|
spans = [doc[start : end + 1] for _, start, end in matches]
|
2019-03-09 00:41:34 +00:00
|
|
|
seen_tokens = set()
|
2019-02-15 09:29:44 +00:00
|
|
|
with doc.retokenize() as retokenizer:
|
|
|
|
for span in spans:
|
2019-03-09 00:41:34 +00:00
|
|
|
span_tokens = set(range(span.start, span.end))
|
|
|
|
if not span_tokens.intersection(seen_tokens):
|
|
|
|
retokenizer.merge(span)
|
|
|
|
seen_tokens.update(span_tokens)
|
|
|
|
|
2018-03-10 22:41:55 +00:00
|
|
|
file_.write("# newdoc id = {i}\n".format(i=i))
|
|
|
|
for j, sent in enumerate(doc.sents):
|
|
|
|
file_.write("# sent_id = {i}.{j}\n".format(i=i, j=j))
|
|
|
|
file_.write("# text = {text}\n".format(text=sent.text))
|
|
|
|
for k, token in enumerate(sent):
|
2018-09-13 22:54:59 +00:00
|
|
|
if token.head.i > sent[-1].i or token.head.i < sent[0].i:
|
2018-11-30 19:16:14 +00:00
|
|
|
for word in doc[sent[0].i - 10 : sent[0].i]:
|
2018-09-13 22:54:59 +00:00
|
|
|
print(word.i, word.head.i, word.text, word.dep_)
|
|
|
|
for word in sent:
|
|
|
|
print(word.i, word.head.i, word.text, word.dep_)
|
2018-11-30 19:16:14 +00:00
|
|
|
for word in doc[sent[-1].i : sent[-1].i + 10]:
|
2018-09-13 22:54:59 +00:00
|
|
|
print(word.i, word.head.i, word.text, word.dep_)
|
2018-11-30 19:16:14 +00:00
|
|
|
raise ValueError(
|
|
|
|
"Invalid parse: head outside sentence (%s)" % token.text
|
|
|
|
)
|
|
|
|
file_.write(token._.get_conllu_lines(k) + "\n")
|
|
|
|
file_.write("\n")
|
2018-03-10 22:41:55 +00:00
|
|
|
|
|
|
|
|
|
|
|
def print_progress(itn, losses, ud_scores):
|
|
|
|
fields = {
|
2018-11-30 19:16:14 +00:00
|
|
|
"dep_loss": losses.get("parser", 0.0),
|
2019-03-07 00:31:23 +00:00
|
|
|
"morph_loss": losses.get("morphologizer", 0.0),
|
2018-11-30 19:16:14 +00:00
|
|
|
"tag_loss": losses.get("tagger", 0.0),
|
|
|
|
"words": ud_scores["Words"].f1 * 100,
|
|
|
|
"sents": ud_scores["Sentences"].f1 * 100,
|
|
|
|
"tags": ud_scores["XPOS"].f1 * 100,
|
|
|
|
"uas": ud_scores["UAS"].f1 * 100,
|
|
|
|
"las": ud_scores["LAS"].f1 * 100,
|
2019-03-07 00:31:23 +00:00
|
|
|
"morph": ud_scores["Feats"].f1 * 100,
|
2018-03-10 22:41:55 +00:00
|
|
|
}
|
2019-03-07 00:31:23 +00:00
|
|
|
header = ["Epoch", "P.Loss", "M.Loss", "LAS", "UAS", "TAG", "MORPH", "SENT", "WORD"]
|
2018-03-10 22:41:55 +00:00
|
|
|
if itn == 0:
|
2018-11-30 19:16:14 +00:00
|
|
|
print("\t".join(header))
|
2019-03-07 00:31:23 +00:00
|
|
|
tpl = "\t".join((
|
|
|
|
"{:d}",
|
|
|
|
"{dep_loss:.1f}",
|
|
|
|
"{morph_loss:.1f}",
|
|
|
|
"{las:.1f}",
|
|
|
|
"{uas:.1f}",
|
|
|
|
"{tags:.1f}",
|
|
|
|
"{morph:.1f}",
|
|
|
|
"{sents:.1f}",
|
|
|
|
"{words:.1f}",
|
|
|
|
))
|
2018-03-10 22:41:55 +00:00
|
|
|
print(tpl.format(itn, **fields))
|
|
|
|
|
2018-11-30 19:16:14 +00:00
|
|
|
|
|
|
|
# def get_sent_conllu(sent, sent_id):
|
2018-03-10 22:41:55 +00:00
|
|
|
# lines = ["# sent_id = {sent_id}".format(sent_id=sent_id)]
|
|
|
|
|
2018-11-30 19:16:14 +00:00
|
|
|
|
2018-03-10 22:41:55 +00:00
|
|
|
def get_token_conllu(token, i):
|
|
|
|
if token._.begins_fused:
|
|
|
|
n = 1
|
|
|
|
while token.nbor(n)._.inside_fused:
|
|
|
|
n += 1
|
2018-11-30 19:16:14 +00:00
|
|
|
id_ = "%d-%d" % (i, i + n)
|
|
|
|
lines = [id_, token.text, "_", "_", "_", "_", "_", "_", "_", "_"]
|
2018-03-10 22:41:55 +00:00
|
|
|
else:
|
|
|
|
lines = []
|
|
|
|
if token.head.i == token.i:
|
|
|
|
head = 0
|
|
|
|
else:
|
|
|
|
head = i + (token.head.i - token.i) + 1
|
2019-03-08 17:54:25 +00:00
|
|
|
features = list(token.morph)
|
2018-09-26 19:02:42 +00:00
|
|
|
feat_str = []
|
2019-03-07 00:31:23 +00:00
|
|
|
replacements = {"one": "1", "two": "2", "three": "3"}
|
2018-09-26 19:02:42 +00:00
|
|
|
for feat in features:
|
2019-03-07 00:31:23 +00:00
|
|
|
if not feat.startswith("begin") and not feat.startswith("end"):
|
2019-03-09 00:20:11 +00:00
|
|
|
key, value = feat.split("_", 1)
|
2018-09-26 19:02:42 +00:00
|
|
|
value = replacements.get(value, value)
|
2019-03-07 00:31:23 +00:00
|
|
|
feat_str.append("%s=%s" % (key, value.title()))
|
2018-09-26 19:02:42 +00:00
|
|
|
if not feat_str:
|
2019-03-07 00:31:23 +00:00
|
|
|
feat_str = "_"
|
2018-09-26 19:02:42 +00:00
|
|
|
else:
|
2019-03-07 00:31:23 +00:00
|
|
|
feat_str = "|".join(feat_str)
|
2018-09-26 19:02:42 +00:00
|
|
|
fields = [str(i+1), token.text, token.lemma_, token.pos_, token.tag_, feat_str,
|
2019-03-07 00:31:23 +00:00
|
|
|
str(head), token.dep_.lower(), "_", "_"]
|
2018-11-30 19:16:14 +00:00
|
|
|
lines.append("\t".join(fields))
|
|
|
|
return "\n".join(lines)
|
|
|
|
|
|
|
|
Token.set_extension("get_conllu_lines", method=get_token_conllu)
|
|
|
|
Token.set_extension("begins_fused", default=False)
|
|
|
|
Token.set_extension("inside_fused", default=False)
|
2018-03-10 22:41:55 +00:00
|
|
|
|
|
|
|
|
|
|
|
##################
|
|
|
|
# Initialization #
|
|
|
|
##################
|
|
|
|
|
|
|
|
|
2018-04-29 13:49:32 +00:00
|
|
|
def load_nlp(corpus, config, vectors=None):
|
2018-11-30 19:16:14 +00:00
|
|
|
lang = corpus.split("_")[0]
|
2018-03-10 22:41:55 +00:00
|
|
|
nlp = spacy.blank(lang)
|
|
|
|
if config.vectors:
|
2018-11-30 19:16:14 +00:00
|
|
|
if not vectors:
|
|
|
|
raise ValueError(
|
|
|
|
"config asks for vectors, but no vectors "
|
|
|
|
"directory set on command line (use -v)"
|
|
|
|
)
|
2018-04-29 13:49:32 +00:00
|
|
|
if (Path(vectors) / corpus).exists():
|
2018-11-30 19:16:14 +00:00
|
|
|
nlp.vocab.from_disk(Path(vectors) / corpus / "vocab")
|
|
|
|
nlp.meta["treebank"] = corpus
|
2018-03-10 22:41:55 +00:00
|
|
|
return nlp
|
2018-11-30 19:16:14 +00:00
|
|
|
|
2018-03-10 22:41:55 +00:00
|
|
|
|
2018-03-27 09:53:35 +00:00
|
|
|
def initialize_pipeline(nlp, docs, golds, config, device):
|
2019-03-09 00:20:11 +00:00
|
|
|
nlp.add_pipe(nlp.create_pipe("tagger", config={"set_morphology": False}))
|
2019-03-07 00:31:23 +00:00
|
|
|
nlp.add_pipe(nlp.create_pipe("morphologizer"))
|
2018-11-30 19:16:14 +00:00
|
|
|
nlp.add_pipe(nlp.create_pipe("parser"))
|
2018-03-10 22:41:55 +00:00
|
|
|
if config.multitask_tag:
|
2018-11-30 19:16:14 +00:00
|
|
|
nlp.parser.add_multitask_objective("tag")
|
2018-03-10 22:41:55 +00:00
|
|
|
if config.multitask_sent:
|
2018-11-30 19:16:14 +00:00
|
|
|
nlp.parser.add_multitask_objective("sent_start")
|
2018-03-10 22:41:55 +00:00
|
|
|
for gold in golds:
|
|
|
|
for tag in gold.tags:
|
|
|
|
if tag is not None:
|
|
|
|
nlp.tagger.add_label(tag)
|
2018-09-13 22:54:59 +00:00
|
|
|
if torch is not None and device != -1:
|
2018-11-30 19:16:14 +00:00
|
|
|
torch.set_default_tensor_type("torch.cuda.FloatTensor")
|
2018-11-29 14:54:47 +00:00
|
|
|
optimizer = nlp.begin_training(
|
2018-11-30 19:16:14 +00:00
|
|
|
lambda: golds_to_gold_tuples(docs, golds),
|
|
|
|
device=device,
|
|
|
|
subword_features=config.subword_features,
|
|
|
|
conv_depth=config.conv_depth,
|
|
|
|
bilstm_depth=config.bilstm_depth,
|
|
|
|
)
|
2018-11-29 14:54:47 +00:00
|
|
|
if config.pretrained_tok2vec:
|
|
|
|
_load_pretrained_tok2vec(nlp, config.pretrained_tok2vec)
|
|
|
|
return optimizer
|
|
|
|
|
|
|
|
|
|
|
|
def _load_pretrained_tok2vec(nlp, loc):
|
|
|
|
"""Load pre-trained weights for the 'token-to-vector' part of the component
|
|
|
|
models, which is typically a CNN. See 'spacy pretrain'. Experimental.
|
|
|
|
"""
|
2018-11-30 19:16:14 +00:00
|
|
|
with Path(loc).open("rb") as file_:
|
2018-11-29 14:54:47 +00:00
|
|
|
weights_data = file_.read()
|
|
|
|
loaded = []
|
|
|
|
for name, component in nlp.pipeline:
|
2018-11-30 19:16:14 +00:00
|
|
|
if hasattr(component, "model") and hasattr(component.model, "tok2vec"):
|
2018-11-29 14:54:47 +00:00
|
|
|
component.tok2vec.from_bytes(weights_data)
|
|
|
|
loaded.append(name)
|
|
|
|
return loaded
|
2018-03-10 22:41:55 +00:00
|
|
|
|
|
|
|
|
|
|
|
########################
|
|
|
|
# Command line helpers #
|
|
|
|
########################
|
|
|
|
|
2018-11-30 19:16:14 +00:00
|
|
|
|
2018-03-10 22:41:55 +00:00
|
|
|
class Config(object):
|
2018-11-30 19:16:14 +00:00
|
|
|
def __init__(
|
|
|
|
self,
|
|
|
|
vectors=None,
|
|
|
|
max_doc_length=10,
|
|
|
|
multitask_tag=False,
|
|
|
|
multitask_sent=False,
|
|
|
|
multitask_dep=False,
|
|
|
|
multitask_vectors=None,
|
|
|
|
bilstm_depth=0,
|
|
|
|
nr_epoch=30,
|
|
|
|
min_batch_size=100,
|
|
|
|
max_batch_size=1000,
|
|
|
|
batch_by_words=True,
|
|
|
|
dropout=0.2,
|
|
|
|
conv_depth=4,
|
|
|
|
subword_features=True,
|
|
|
|
vectors_dir=None,
|
|
|
|
pretrained_tok2vec=None,
|
|
|
|
):
|
2018-09-13 16:05:48 +00:00
|
|
|
if vectors_dir is not None:
|
|
|
|
if vectors is None:
|
|
|
|
vectors = True
|
|
|
|
if multitask_vectors is None:
|
|
|
|
multitask_vectors = True
|
2018-03-11 00:26:45 +00:00
|
|
|
for key, value in locals().items():
|
2018-03-10 23:59:39 +00:00
|
|
|
setattr(self, key, value)
|
2018-11-30 19:16:14 +00:00
|
|
|
|
2018-03-10 22:41:55 +00:00
|
|
|
@classmethod
|
2018-09-13 16:05:48 +00:00
|
|
|
def load(cls, loc, vectors_dir=None):
|
2018-11-30 19:16:14 +00:00
|
|
|
with Path(loc).open("r", encoding="utf8") as file_:
|
2018-03-10 22:41:55 +00:00
|
|
|
cfg = json.load(file_)
|
2018-09-13 16:05:48 +00:00
|
|
|
if vectors_dir is not None:
|
2018-11-30 19:16:14 +00:00
|
|
|
cfg["vectors_dir"] = vectors_dir
|
2018-03-10 22:41:55 +00:00
|
|
|
return cls(**cfg)
|
|
|
|
|
|
|
|
|
|
|
|
class Dataset(object):
|
|
|
|
def __init__(self, path, section):
|
|
|
|
self.path = path
|
|
|
|
self.section = section
|
|
|
|
self.conllu = None
|
|
|
|
self.text = None
|
|
|
|
for file_path in self.path.iterdir():
|
|
|
|
name = file_path.parts[-1]
|
2018-11-30 19:16:14 +00:00
|
|
|
if section in name and name.endswith("conllu"):
|
2018-03-10 22:41:55 +00:00
|
|
|
self.conllu = file_path
|
2018-11-30 19:16:14 +00:00
|
|
|
elif section in name and name.endswith("txt"):
|
2018-03-10 22:41:55 +00:00
|
|
|
self.text = file_path
|
|
|
|
if self.conllu is None:
|
|
|
|
msg = "Could not find .txt file in {path} for {section}"
|
|
|
|
raise IOError(msg.format(section=section, path=path))
|
|
|
|
if self.text is None:
|
|
|
|
msg = "Could not find .txt file in {path} for {section}"
|
2018-11-30 19:16:14 +00:00
|
|
|
self.lang = self.conllu.parts[-1].split("-")[0].split("_")[0]
|
2018-03-10 22:41:55 +00:00
|
|
|
|
|
|
|
|
|
|
|
class TreebankPaths(object):
|
|
|
|
def __init__(self, ud_path, treebank, **cfg):
|
2018-11-30 19:16:14 +00:00
|
|
|
self.train = Dataset(ud_path / treebank, "train")
|
|
|
|
self.dev = Dataset(ud_path / treebank, "dev")
|
2018-03-10 22:41:55 +00:00
|
|
|
self.lang = self.train.lang
|
|
|
|
|
|
|
|
|
|
|
|
@plac.annotations(
|
|
|
|
ud_dir=("Path to Universal Dependencies corpus", "positional", None, Path),
|
2018-11-30 19:16:14 +00:00
|
|
|
corpus=(
|
|
|
|
"UD corpus to train and evaluate on, e.g. en, es_ancora, etc",
|
|
|
|
"positional",
|
|
|
|
None,
|
|
|
|
str,
|
|
|
|
),
|
2018-03-10 22:41:55 +00:00
|
|
|
parses_dir=("Directory to write the development parses", "positional", None, Path),
|
2018-09-13 12:24:08 +00:00
|
|
|
config=("Path to json formatted config file", "option", "C", Path),
|
2018-03-27 09:53:35 +00:00
|
|
|
limit=("Size limit", "option", "n", int),
|
2018-09-13 22:54:59 +00:00
|
|
|
gpu_device=("Use GPU", "option", "g", int),
|
2018-05-08 11:47:45 +00:00
|
|
|
use_oracle_segments=("Use oracle segments", "flag", "G", int),
|
2018-11-30 19:16:14 +00:00
|
|
|
vectors_dir=(
|
|
|
|
"Path to directory with pre-trained vectors, named e.g. en/",
|
|
|
|
"option",
|
|
|
|
"v",
|
|
|
|
Path,
|
|
|
|
),
|
2018-03-10 22:41:55 +00:00
|
|
|
)
|
2018-11-30 19:16:14 +00:00
|
|
|
def main(
|
|
|
|
ud_dir,
|
|
|
|
parses_dir,
|
|
|
|
corpus,
|
|
|
|
config=None,
|
|
|
|
limit=0,
|
|
|
|
gpu_device=-1,
|
|
|
|
vectors_dir=None,
|
|
|
|
use_oracle_segments=False,
|
|
|
|
):
|
2019-09-09 14:32:11 +00:00
|
|
|
# temp fix to avoid import issues cf https://github.com/explosion/spaCy/issues/4200
|
|
|
|
import tqdm
|
|
|
|
|
2018-03-27 09:53:35 +00:00
|
|
|
spacy.util.fix_random_seed()
|
2018-03-23 10:36:38 +00:00
|
|
|
lang.zh.Chinese.Defaults.use_jieba = False
|
|
|
|
lang.ja.Japanese.Defaults.use_janome = False
|
2018-11-30 19:16:14 +00:00
|
|
|
|
2018-09-13 12:24:08 +00:00
|
|
|
if config is not None:
|
2018-09-13 16:05:48 +00:00
|
|
|
config = Config.load(config, vectors_dir=vectors_dir)
|
2018-09-13 12:24:08 +00:00
|
|
|
else:
|
2018-09-13 16:05:48 +00:00
|
|
|
config = Config(vectors_dir=vectors_dir)
|
2018-03-10 22:41:55 +00:00
|
|
|
paths = TreebankPaths(ud_dir, corpus)
|
|
|
|
if not (parses_dir / corpus).exists():
|
|
|
|
(parses_dir / corpus).mkdir()
|
|
|
|
print("Train and evaluate", corpus, "using lang", paths.lang)
|
2018-04-29 13:49:32 +00:00
|
|
|
nlp = load_nlp(paths.lang, config, vectors=vectors_dir)
|
2018-03-10 22:41:55 +00:00
|
|
|
|
2018-11-30 19:16:14 +00:00
|
|
|
docs, golds = read_data(
|
|
|
|
nlp,
|
|
|
|
paths.train.conllu.open(),
|
|
|
|
paths.train.text.open(),
|
|
|
|
max_doc_length=config.max_doc_length,
|
|
|
|
limit=limit,
|
|
|
|
)
|
2018-03-10 22:41:55 +00:00
|
|
|
|
2018-09-13 22:54:59 +00:00
|
|
|
optimizer = initialize_pipeline(nlp, docs, golds, config, gpu_device)
|
2018-03-10 22:41:55 +00:00
|
|
|
|
2018-09-13 12:24:08 +00:00
|
|
|
batch_sizes = compounding(config.min_batch_size, config.max_batch_size, 1.001)
|
2018-05-16 18:11:59 +00:00
|
|
|
beam_prob = compounding(0.2, 0.8, 1.001)
|
2018-03-10 22:41:55 +00:00
|
|
|
for i in range(config.nr_epoch):
|
2018-11-30 19:16:14 +00:00
|
|
|
docs, golds = read_data(
|
|
|
|
nlp,
|
|
|
|
paths.train.conllu.open(),
|
|
|
|
paths.train.text.open(),
|
|
|
|
max_doc_length=config.max_doc_length,
|
|
|
|
limit=limit,
|
|
|
|
oracle_segments=use_oracle_segments,
|
|
|
|
raw_text=not use_oracle_segments,
|
|
|
|
)
|
Improve label management in parser and NER (#2108)
This patch does a few smallish things that tighten up the training workflow a little, and allow memory use during training to be reduced by letting the GoldCorpus stream data properly.
Previously, the parser and entity recognizer read and saved labels as lists, with extra labels noted separately. Lists were used becaue ordering is very important, to ensure that the label-to-class mapping is stable.
We now manage labels as nested dictionaries, first keyed by the action, and then keyed by the label. Values are frequencies. The trick is, how do we save new labels? We need to make sure we iterate over these in the same order they're added. Otherwise, we'll get different class IDs, and the model's predictions won't make sense.
To allow stable sorting, we map the new labels to negative values. If we have two new labels, they'll be noted as having "frequency" -1 and -2. The next new label will then have "frequency" -3. When we sort by (frequency, label), we then get a stable sort.
Storing frequencies then allows us to make the next nice improvement. Previously we had to iterate over the whole training set, to pre-process it for the deprojectivisation. This led to storing the whole training set in memory. This was most of the required memory during training.
To prevent this, we now store the frequencies as we stream in the data, and deprojectivize as we go. Once we've built the frequencies, we can then apply a frequency cut-off when we decide how many classes to make.
Finally, to allow proper data streaming, we also have to have some way of shuffling the iterator. This is awkward if the training files have multiple documents in them. To solve this, the GoldCorpus class now writes the training data to disk in msgpack files, one per document. We can then shuffle the data by shuffling the paths.
This is a squash merge, as I made a lot of very small commits. Individual commit messages below.
* Simplify label management for TransitionSystem and its subclasses
* Fix serialization for new label handling format in parser
* Simplify and improve GoldCorpus class. Reduce memory use, write to temp dir
* Set actions in transition system
* Require thinc 6.11.1.dev4
* Fix error in parser init
* Add unicode declaration
* Fix unicode declaration
* Update textcat test
* Try to get model training on less memory
* Print json loc for now
* Try rapidjson to reduce memory use
* Remove rapidjson requirement
* Try rapidjson for reduced mem usage
* Handle None heads when projectivising
* Stream json docs
* Fix train script
* Handle projectivity in GoldParse
* Fix projectivity handling
* Add minibatch_by_words util from ud_train
* Minibatch by number of words in spacy.cli.train
* Move minibatch_by_words util to spacy.util
* Fix label handling
* More hacking at label management in parser
* Fix encoding in msgpack serialization in GoldParse
* Adjust batch sizes in parser training
* Fix minibatch_by_words
* Add merge_subtokens function to pipeline.pyx
* Register merge_subtokens factory
* Restore use of msgpack tmp directory
* Use minibatch-by-words in train
* Handle retokenization in scorer
* Change back-off approach for missing labels. Use 'dep' label
* Update NER for new label management
* Set NER tags for over-segmented words
* Fix label alignment in gold
* Fix label back-off for infrequent labels
* Fix int type in labels dict key
* Fix int type in labels dict key
* Update feature definition for 8 feature set
* Update ud-train script for new label stuff
* Fix json streamer
* Print the line number if conll eval fails
* Update children and sentence boundaries after deprojectivisation
* Export set_children_from_heads from doc.pxd
* Render parses during UD training
* Remove print statement
* Require thinc 6.11.1.dev6. Try adding wheel as install_requires
* Set different dev version, to flush pip cache
* Update thinc version
* Update GoldCorpus docs
* Remove print statements
* Fix formatting and links [ci skip]
2018-03-19 01:58:08 +00:00
|
|
|
Xs = list(zip(docs, golds))
|
|
|
|
random.shuffle(Xs)
|
2018-09-13 12:24:08 +00:00
|
|
|
if config.batch_by_words:
|
|
|
|
batches = minibatch_by_words(Xs, size=batch_sizes)
|
|
|
|
else:
|
|
|
|
batches = minibatch(Xs, size=batch_sizes)
|
2018-03-10 22:41:55 +00:00
|
|
|
losses = {}
|
|
|
|
n_train_words = sum(len(doc) for doc in docs)
|
|
|
|
with tqdm.tqdm(total=n_train_words, leave=False) as pbar:
|
|
|
|
for batch in batches:
|
|
|
|
batch_docs, batch_gold = zip(*batch)
|
|
|
|
pbar.update(sum(len(doc) for doc in batch_docs))
|
2018-11-30 19:16:14 +00:00
|
|
|
nlp.parser.cfg["beam_update_prob"] = next(beam_prob)
|
|
|
|
nlp.update(
|
|
|
|
batch_docs,
|
|
|
|
batch_gold,
|
|
|
|
sgd=optimizer,
|
|
|
|
drop=config.dropout,
|
|
|
|
losses=losses,
|
|
|
|
)
|
|
|
|
|
|
|
|
out_path = parses_dir / corpus / "epoch-{i}.conllu".format(i=i)
|
2018-03-10 22:41:55 +00:00
|
|
|
with nlp.use_params(optimizer.averages):
|
2018-05-08 11:47:45 +00:00
|
|
|
if use_oracle_segments:
|
|
|
|
parsed_docs, scores = evaluate(nlp, paths.dev.conllu,
|
2018-09-26 19:02:42 +00:00
|
|
|
paths.dev.conllu, out_path)
|
2018-05-08 11:47:45 +00:00
|
|
|
else:
|
|
|
|
parsed_docs, scores = evaluate(nlp, paths.dev.text,
|
2018-09-26 19:02:42 +00:00
|
|
|
paths.dev.conllu, out_path)
|
|
|
|
print_progress(i, losses, scores)
|
Improve label management in parser and NER (#2108)
This patch does a few smallish things that tighten up the training workflow a little, and allow memory use during training to be reduced by letting the GoldCorpus stream data properly.
Previously, the parser and entity recognizer read and saved labels as lists, with extra labels noted separately. Lists were used becaue ordering is very important, to ensure that the label-to-class mapping is stable.
We now manage labels as nested dictionaries, first keyed by the action, and then keyed by the label. Values are frequencies. The trick is, how do we save new labels? We need to make sure we iterate over these in the same order they're added. Otherwise, we'll get different class IDs, and the model's predictions won't make sense.
To allow stable sorting, we map the new labels to negative values. If we have two new labels, they'll be noted as having "frequency" -1 and -2. The next new label will then have "frequency" -3. When we sort by (frequency, label), we then get a stable sort.
Storing frequencies then allows us to make the next nice improvement. Previously we had to iterate over the whole training set, to pre-process it for the deprojectivisation. This led to storing the whole training set in memory. This was most of the required memory during training.
To prevent this, we now store the frequencies as we stream in the data, and deprojectivize as we go. Once we've built the frequencies, we can then apply a frequency cut-off when we decide how many classes to make.
Finally, to allow proper data streaming, we also have to have some way of shuffling the iterator. This is awkward if the training files have multiple documents in them. To solve this, the GoldCorpus class now writes the training data to disk in msgpack files, one per document. We can then shuffle the data by shuffling the paths.
This is a squash merge, as I made a lot of very small commits. Individual commit messages below.
* Simplify label management for TransitionSystem and its subclasses
* Fix serialization for new label handling format in parser
* Simplify and improve GoldCorpus class. Reduce memory use, write to temp dir
* Set actions in transition system
* Require thinc 6.11.1.dev4
* Fix error in parser init
* Add unicode declaration
* Fix unicode declaration
* Update textcat test
* Try to get model training on less memory
* Print json loc for now
* Try rapidjson to reduce memory use
* Remove rapidjson requirement
* Try rapidjson for reduced mem usage
* Handle None heads when projectivising
* Stream json docs
* Fix train script
* Handle projectivity in GoldParse
* Fix projectivity handling
* Add minibatch_by_words util from ud_train
* Minibatch by number of words in spacy.cli.train
* Move minibatch_by_words util to spacy.util
* Fix label handling
* More hacking at label management in parser
* Fix encoding in msgpack serialization in GoldParse
* Adjust batch sizes in parser training
* Fix minibatch_by_words
* Add merge_subtokens function to pipeline.pyx
* Register merge_subtokens factory
* Restore use of msgpack tmp directory
* Use minibatch-by-words in train
* Handle retokenization in scorer
* Change back-off approach for missing labels. Use 'dep' label
* Update NER for new label management
* Set NER tags for over-segmented words
* Fix label alignment in gold
* Fix label back-off for infrequent labels
* Fix int type in labels dict key
* Fix int type in labels dict key
* Update feature definition for 8 feature set
* Update ud-train script for new label stuff
* Fix json streamer
* Print the line number if conll eval fails
* Update children and sentence boundaries after deprojectivisation
* Export set_children_from_heads from doc.pxd
* Render parses during UD training
* Remove print statement
* Require thinc 6.11.1.dev6. Try adding wheel as install_requires
* Set different dev version, to flush pip cache
* Update thinc version
* Update GoldCorpus docs
* Remove print statements
* Fix formatting and links [ci skip]
2018-03-19 01:58:08 +00:00
|
|
|
|
|
|
|
|
|
|
|
def _render_parses(i, to_render):
|
2018-11-30 19:16:14 +00:00
|
|
|
to_render[0].user_data["title"] = "Batch %d" % i
|
|
|
|
with Path("/tmp/parses.html").open("w") as file_:
|
|
|
|
html = displacy.render(to_render[:5], style="dep", page=True)
|
Improve label management in parser and NER (#2108)
This patch does a few smallish things that tighten up the training workflow a little, and allow memory use during training to be reduced by letting the GoldCorpus stream data properly.
Previously, the parser and entity recognizer read and saved labels as lists, with extra labels noted separately. Lists were used becaue ordering is very important, to ensure that the label-to-class mapping is stable.
We now manage labels as nested dictionaries, first keyed by the action, and then keyed by the label. Values are frequencies. The trick is, how do we save new labels? We need to make sure we iterate over these in the same order they're added. Otherwise, we'll get different class IDs, and the model's predictions won't make sense.
To allow stable sorting, we map the new labels to negative values. If we have two new labels, they'll be noted as having "frequency" -1 and -2. The next new label will then have "frequency" -3. When we sort by (frequency, label), we then get a stable sort.
Storing frequencies then allows us to make the next nice improvement. Previously we had to iterate over the whole training set, to pre-process it for the deprojectivisation. This led to storing the whole training set in memory. This was most of the required memory during training.
To prevent this, we now store the frequencies as we stream in the data, and deprojectivize as we go. Once we've built the frequencies, we can then apply a frequency cut-off when we decide how many classes to make.
Finally, to allow proper data streaming, we also have to have some way of shuffling the iterator. This is awkward if the training files have multiple documents in them. To solve this, the GoldCorpus class now writes the training data to disk in msgpack files, one per document. We can then shuffle the data by shuffling the paths.
This is a squash merge, as I made a lot of very small commits. Individual commit messages below.
* Simplify label management for TransitionSystem and its subclasses
* Fix serialization for new label handling format in parser
* Simplify and improve GoldCorpus class. Reduce memory use, write to temp dir
* Set actions in transition system
* Require thinc 6.11.1.dev4
* Fix error in parser init
* Add unicode declaration
* Fix unicode declaration
* Update textcat test
* Try to get model training on less memory
* Print json loc for now
* Try rapidjson to reduce memory use
* Remove rapidjson requirement
* Try rapidjson for reduced mem usage
* Handle None heads when projectivising
* Stream json docs
* Fix train script
* Handle projectivity in GoldParse
* Fix projectivity handling
* Add minibatch_by_words util from ud_train
* Minibatch by number of words in spacy.cli.train
* Move minibatch_by_words util to spacy.util
* Fix label handling
* More hacking at label management in parser
* Fix encoding in msgpack serialization in GoldParse
* Adjust batch sizes in parser training
* Fix minibatch_by_words
* Add merge_subtokens function to pipeline.pyx
* Register merge_subtokens factory
* Restore use of msgpack tmp directory
* Use minibatch-by-words in train
* Handle retokenization in scorer
* Change back-off approach for missing labels. Use 'dep' label
* Update NER for new label management
* Set NER tags for over-segmented words
* Fix label alignment in gold
* Fix label back-off for infrequent labels
* Fix int type in labels dict key
* Fix int type in labels dict key
* Update feature definition for 8 feature set
* Update ud-train script for new label stuff
* Fix json streamer
* Print the line number if conll eval fails
* Update children and sentence boundaries after deprojectivisation
* Export set_children_from_heads from doc.pxd
* Render parses during UD training
* Remove print statement
* Require thinc 6.11.1.dev6. Try adding wheel as install_requires
* Set different dev version, to flush pip cache
* Update thinc version
* Update GoldCorpus docs
* Remove print statements
* Fix formatting and links [ci skip]
2018-03-19 01:58:08 +00:00
|
|
|
file_.write(html)
|
2018-03-10 22:41:55 +00:00
|
|
|
|
|
|
|
|
2018-11-30 19:16:14 +00:00
|
|
|
if __name__ == "__main__":
|
2018-03-10 22:41:55 +00:00
|
|
|
plac.call(main)
|