2017-04-15 10:05:47 +00:00
|
|
|
# coding: utf8
|
|
|
|
from __future__ import absolute_import, unicode_literals
|
2015-08-27 07:16:11 +00:00
|
|
|
|
2017-05-25 01:10:54 +00:00
|
|
|
import random
|
2017-05-29 11:42:55 +00:00
|
|
|
import ujson
|
2017-07-25 16:57:59 +00:00
|
|
|
import itertools
|
2017-10-16 17:22:40 +00:00
|
|
|
import weakref
|
2017-10-17 16:18:10 +00:00
|
|
|
import functools
|
2017-10-27 19:07:59 +00:00
|
|
|
from collections import OrderedDict
|
|
|
|
from contextlib import contextmanager
|
|
|
|
from copy import copy
|
|
|
|
from thinc.neural import Model
|
|
|
|
from thinc.neural.optimizers import Adam
|
2017-05-18 09:25:19 +00:00
|
|
|
|
2015-08-26 17:16:09 +00:00
|
|
|
from .tokenizer import Tokenizer
|
|
|
|
from .vocab import Vocab
|
2016-09-25 13:37:33 +00:00
|
|
|
from .lemmatizer import Lemmatizer
|
2017-10-27 19:07:59 +00:00
|
|
|
from .pipeline import DependencyParser, Tensorizer, Tagger, EntityRecognizer
|
2017-11-05 17:45:57 +00:00
|
|
|
from .pipeline import SimilarityHook, TextCategorizer, SentenceSegmenter
|
Improve label management in parser and NER (#2108)
This patch does a few smallish things that tighten up the training workflow a little, and allow memory use during training to be reduced by letting the GoldCorpus stream data properly.
Previously, the parser and entity recognizer read and saved labels as lists, with extra labels noted separately. Lists were used becaue ordering is very important, to ensure that the label-to-class mapping is stable.
We now manage labels as nested dictionaries, first keyed by the action, and then keyed by the label. Values are frequencies. The trick is, how do we save new labels? We need to make sure we iterate over these in the same order they're added. Otherwise, we'll get different class IDs, and the model's predictions won't make sense.
To allow stable sorting, we map the new labels to negative values. If we have two new labels, they'll be noted as having "frequency" -1 and -2. The next new label will then have "frequency" -3. When we sort by (frequency, label), we then get a stable sort.
Storing frequencies then allows us to make the next nice improvement. Previously we had to iterate over the whole training set, to pre-process it for the deprojectivisation. This led to storing the whole training set in memory. This was most of the required memory during training.
To prevent this, we now store the frequencies as we stream in the data, and deprojectivize as we go. Once we've built the frequencies, we can then apply a frequency cut-off when we decide how many classes to make.
Finally, to allow proper data streaming, we also have to have some way of shuffling the iterator. This is awkward if the training files have multiple documents in them. To solve this, the GoldCorpus class now writes the training data to disk in msgpack files, one per document. We can then shuffle the data by shuffling the paths.
This is a squash merge, as I made a lot of very small commits. Individual commit messages below.
* Simplify label management for TransitionSystem and its subclasses
* Fix serialization for new label handling format in parser
* Simplify and improve GoldCorpus class. Reduce memory use, write to temp dir
* Set actions in transition system
* Require thinc 6.11.1.dev4
* Fix error in parser init
* Add unicode declaration
* Fix unicode declaration
* Update textcat test
* Try to get model training on less memory
* Print json loc for now
* Try rapidjson to reduce memory use
* Remove rapidjson requirement
* Try rapidjson for reduced mem usage
* Handle None heads when projectivising
* Stream json docs
* Fix train script
* Handle projectivity in GoldParse
* Fix projectivity handling
* Add minibatch_by_words util from ud_train
* Minibatch by number of words in spacy.cli.train
* Move minibatch_by_words util to spacy.util
* Fix label handling
* More hacking at label management in parser
* Fix encoding in msgpack serialization in GoldParse
* Adjust batch sizes in parser training
* Fix minibatch_by_words
* Add merge_subtokens function to pipeline.pyx
* Register merge_subtokens factory
* Restore use of msgpack tmp directory
* Use minibatch-by-words in train
* Handle retokenization in scorer
* Change back-off approach for missing labels. Use 'dep' label
* Update NER for new label management
* Set NER tags for over-segmented words
* Fix label alignment in gold
* Fix label back-off for infrequent labels
* Fix int type in labels dict key
* Fix int type in labels dict key
* Update feature definition for 8 feature set
* Update ud-train script for new label stuff
* Fix json streamer
* Print the line number if conll eval fails
* Update children and sentence boundaries after deprojectivisation
* Export set_children_from_heads from doc.pxd
* Render parses during UD training
* Remove print statement
* Require thinc 6.11.1.dev6. Try adding wheel as install_requires
* Set different dev version, to flush pip cache
* Update thinc version
* Update GoldCorpus docs
* Remove print statements
* Fix formatting and links [ci skip]
2018-03-19 01:58:08 +00:00
|
|
|
from .pipeline import merge_noun_chunks, merge_entities, merge_subtokens
|
2017-11-06 21:07:38 +00:00
|
|
|
from .compat import json_dumps, izip, basestring_
|
|
|
|
from .gold import GoldParse
|
2017-10-06 22:26:05 +00:00
|
|
|
from .scorer import Scorer
|
2017-11-06 14:06:27 +00:00
|
|
|
from ._ml import link_vectors_to_models, create_default_optimizer
|
2017-04-15 10:05:47 +00:00
|
|
|
from .attrs import IS_STOP
|
2017-10-27 12:40:14 +00:00
|
|
|
from .lang.punctuation import TOKENIZER_PREFIXES, TOKENIZER_SUFFIXES
|
|
|
|
from .lang.punctuation import TOKENIZER_INFIXES
|
2017-05-08 21:58:31 +00:00
|
|
|
from .lang.tokenizer_exceptions import TOKEN_MATCH
|
|
|
|
from .lang.tag_map import TAG_MAP
|
2017-10-17 16:18:10 +00:00
|
|
|
from .lang.lex_attrs import LEX_ATTRS, is_stop
|
2017-04-15 10:05:47 +00:00
|
|
|
from . import util
|
2017-10-06 22:26:05 +00:00
|
|
|
from . import about
|
2016-10-09 10:24:24 +00:00
|
|
|
|
2015-08-27 07:16:11 +00:00
|
|
|
|
2016-09-24 18:26:17 +00:00
|
|
|
class BaseDefaults(object):
|
2016-10-18 14:18:25 +00:00
|
|
|
@classmethod
|
|
|
|
def create_lemmatizer(cls, nlp=None):
|
2017-10-11 11:26:05 +00:00
|
|
|
return Lemmatizer(cls.lemma_index, cls.lemma_exc, cls.lemma_rules,
|
|
|
|
cls.lemma_lookup)
|
2016-10-18 14:18:25 +00:00
|
|
|
|
|
|
|
@classmethod
|
|
|
|
def create_vocab(cls, nlp=None):
|
|
|
|
lemmatizer = cls.create_lemmatizer(nlp)
|
2017-05-16 09:21:59 +00:00
|
|
|
lex_attr_getters = dict(cls.lex_attr_getters)
|
|
|
|
# This is messy, but it's the minimal working fix to Issue #639.
|
2017-10-17 16:18:10 +00:00
|
|
|
lex_attr_getters[IS_STOP] = functools.partial(is_stop,
|
|
|
|
stops=cls.stop_words)
|
2017-05-16 09:21:59 +00:00
|
|
|
vocab = Vocab(lex_attr_getters=lex_attr_getters, tag_map=cls.tag_map,
|
|
|
|
lemmatizer=lemmatizer)
|
2017-03-15 14:24:40 +00:00
|
|
|
for tag_str, exc in cls.morph_rules.items():
|
|
|
|
for orth_str, attrs in exc.items():
|
|
|
|
vocab.morphology.add_special_case(tag_str, orth_str, attrs)
|
|
|
|
return vocab
|
2016-12-18 15:54:52 +00:00
|
|
|
|
2016-10-18 14:18:25 +00:00
|
|
|
@classmethod
|
|
|
|
def create_tokenizer(cls, nlp=None):
|
|
|
|
rules = cls.tokenizer_exceptions
|
2017-05-16 09:21:59 +00:00
|
|
|
token_match = cls.token_match
|
2017-10-27 12:40:14 +00:00
|
|
|
prefix_search = (util.compile_prefix_regex(cls.prefixes).search
|
|
|
|
if cls.prefixes else None)
|
|
|
|
suffix_search = (util.compile_suffix_regex(cls.suffixes).search
|
|
|
|
if cls.suffixes else None)
|
|
|
|
infix_finditer = (util.compile_infix_regex(cls.infixes).finditer
|
|
|
|
if cls.infixes else None)
|
2016-10-18 14:18:25 +00:00
|
|
|
vocab = nlp.vocab if nlp is not None else cls.create_vocab(nlp)
|
2016-11-26 11:36:04 +00:00
|
|
|
return Tokenizer(vocab, rules=rules,
|
2017-10-27 12:40:14 +00:00
|
|
|
prefix_search=prefix_search,
|
|
|
|
suffix_search=suffix_search,
|
|
|
|
infix_finditer=infix_finditer,
|
|
|
|
token_match=token_match)
|
2016-09-24 12:08:53 +00:00
|
|
|
|
2017-11-01 18:48:33 +00:00
|
|
|
pipe_names = ['tagger', 'parser', 'ner']
|
2017-05-08 21:58:31 +00:00
|
|
|
token_match = TOKEN_MATCH
|
|
|
|
prefixes = tuple(TOKENIZER_PREFIXES)
|
|
|
|
suffixes = tuple(TOKENIZER_SUFFIXES)
|
|
|
|
infixes = tuple(TOKENIZER_INFIXES)
|
|
|
|
tag_map = dict(TAG_MAP)
|
2016-10-09 10:24:24 +00:00
|
|
|
tokenizer_exceptions = {}
|
2016-09-24 18:26:17 +00:00
|
|
|
stop_words = set()
|
2016-12-18 14:50:09 +00:00
|
|
|
lemma_rules = {}
|
2017-03-15 09:52:50 +00:00
|
|
|
lemma_exc = {}
|
|
|
|
lemma_index = {}
|
2017-10-11 11:26:05 +00:00
|
|
|
lemma_lookup = {}
|
2017-03-15 14:24:40 +00:00
|
|
|
morph_rules = {}
|
2017-05-08 22:58:10 +00:00
|
|
|
lex_attr_getters = LEX_ATTRS
|
2017-06-04 19:53:39 +00:00
|
|
|
syntax_iterators = {}
|
2015-09-14 07:48:51 +00:00
|
|
|
|
2015-08-26 17:16:09 +00:00
|
|
|
|
2016-09-24 12:08:53 +00:00
|
|
|
class Language(object):
|
2017-05-18 21:57:38 +00:00
|
|
|
"""A text-processing pipeline. Usually you'll load this once per process,
|
|
|
|
and pass the instance around your application.
|
2017-05-19 16:47:24 +00:00
|
|
|
|
|
|
|
Defaults (class): Settings, data and factory methods for creating the `nlp`
|
|
|
|
object and processing pipeline.
|
|
|
|
lang (unicode): Two-letter language ID, i.e. ISO code.
|
2017-04-15 09:59:21 +00:00
|
|
|
"""
|
2016-09-24 18:26:17 +00:00
|
|
|
Defaults = BaseDefaults
|
2016-09-24 12:08:53 +00:00
|
|
|
lang = None
|
2015-08-25 13:37:17 +00:00
|
|
|
|
2017-10-06 22:25:54 +00:00
|
|
|
factories = {
|
|
|
|
'tokenizer': lambda nlp: nlp.Defaults.create_tokenizer(nlp),
|
2017-10-27 12:40:14 +00:00
|
|
|
'tensorizer': lambda nlp, **cfg: Tensorizer(nlp.vocab, **cfg),
|
2017-10-26 10:38:23 +00:00
|
|
|
'tagger': lambda nlp, **cfg: Tagger(nlp.vocab, **cfg),
|
|
|
|
'parser': lambda nlp, **cfg: DependencyParser(nlp.vocab, **cfg),
|
|
|
|
'ner': lambda nlp, **cfg: EntityRecognizer(nlp.vocab, **cfg),
|
2017-10-06 22:25:54 +00:00
|
|
|
'similarity': lambda nlp, **cfg: SimilarityHook(nlp.vocab, **cfg),
|
2017-11-05 17:45:57 +00:00
|
|
|
'textcat': lambda nlp, **cfg: TextCategorizer(nlp.vocab, **cfg),
|
|
|
|
'sbd': lambda nlp, **cfg: SentenceSegmenter(nlp.vocab, **cfg),
|
2018-03-14 23:18:51 +00:00
|
|
|
'sentencizer': lambda nlp, **cfg: SentenceSegmenter(nlp.vocab, **cfg),
|
|
|
|
'merge_noun_chunks': lambda nlp, **cfg: merge_noun_chunks,
|
Improve label management in parser and NER (#2108)
This patch does a few smallish things that tighten up the training workflow a little, and allow memory use during training to be reduced by letting the GoldCorpus stream data properly.
Previously, the parser and entity recognizer read and saved labels as lists, with extra labels noted separately. Lists were used becaue ordering is very important, to ensure that the label-to-class mapping is stable.
We now manage labels as nested dictionaries, first keyed by the action, and then keyed by the label. Values are frequencies. The trick is, how do we save new labels? We need to make sure we iterate over these in the same order they're added. Otherwise, we'll get different class IDs, and the model's predictions won't make sense.
To allow stable sorting, we map the new labels to negative values. If we have two new labels, they'll be noted as having "frequency" -1 and -2. The next new label will then have "frequency" -3. When we sort by (frequency, label), we then get a stable sort.
Storing frequencies then allows us to make the next nice improvement. Previously we had to iterate over the whole training set, to pre-process it for the deprojectivisation. This led to storing the whole training set in memory. This was most of the required memory during training.
To prevent this, we now store the frequencies as we stream in the data, and deprojectivize as we go. Once we've built the frequencies, we can then apply a frequency cut-off when we decide how many classes to make.
Finally, to allow proper data streaming, we also have to have some way of shuffling the iterator. This is awkward if the training files have multiple documents in them. To solve this, the GoldCorpus class now writes the training data to disk in msgpack files, one per document. We can then shuffle the data by shuffling the paths.
This is a squash merge, as I made a lot of very small commits. Individual commit messages below.
* Simplify label management for TransitionSystem and its subclasses
* Fix serialization for new label handling format in parser
* Simplify and improve GoldCorpus class. Reduce memory use, write to temp dir
* Set actions in transition system
* Require thinc 6.11.1.dev4
* Fix error in parser init
* Add unicode declaration
* Fix unicode declaration
* Update textcat test
* Try to get model training on less memory
* Print json loc for now
* Try rapidjson to reduce memory use
* Remove rapidjson requirement
* Try rapidjson for reduced mem usage
* Handle None heads when projectivising
* Stream json docs
* Fix train script
* Handle projectivity in GoldParse
* Fix projectivity handling
* Add minibatch_by_words util from ud_train
* Minibatch by number of words in spacy.cli.train
* Move minibatch_by_words util to spacy.util
* Fix label handling
* More hacking at label management in parser
* Fix encoding in msgpack serialization in GoldParse
* Adjust batch sizes in parser training
* Fix minibatch_by_words
* Add merge_subtokens function to pipeline.pyx
* Register merge_subtokens factory
* Restore use of msgpack tmp directory
* Use minibatch-by-words in train
* Handle retokenization in scorer
* Change back-off approach for missing labels. Use 'dep' label
* Update NER for new label management
* Set NER tags for over-segmented words
* Fix label alignment in gold
* Fix label back-off for infrequent labels
* Fix int type in labels dict key
* Fix int type in labels dict key
* Update feature definition for 8 feature set
* Update ud-train script for new label stuff
* Fix json streamer
* Print the line number if conll eval fails
* Update children and sentence boundaries after deprojectivisation
* Export set_children_from_heads from doc.pxd
* Render parses during UD training
* Remove print statement
* Require thinc 6.11.1.dev6. Try adding wheel as install_requires
* Set different dev version, to flush pip cache
* Update thinc version
* Update GoldCorpus docs
* Remove print statements
* Fix formatting and links [ci skip]
2018-03-19 01:58:08 +00:00
|
|
|
'merge_entities': lambda nlp, **cfg: merge_entities,
|
|
|
|
'merge_subtokens': lambda nlp, **cfg: merge_subtokens,
|
2017-10-06 22:25:54 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
def __init__(self, vocab=True, make_doc=True, meta={}, **kwargs):
|
2017-05-18 21:57:38 +00:00
|
|
|
"""Initialise a Language object.
|
|
|
|
|
|
|
|
vocab (Vocab): A `Vocab` object. If `True`, a vocab is created via
|
|
|
|
`Language.Defaults.create_vocab`.
|
2017-05-21 11:17:40 +00:00
|
|
|
make_doc (callable): A function that takes text and returns a `Doc`
|
2017-05-18 21:57:38 +00:00
|
|
|
object. Usually a `Tokenizer`.
|
|
|
|
pipeline (list): A list of annotation processes or IDs of annotation,
|
|
|
|
processes, e.g. a `Tagger` object, or `'tagger'`. IDs are looked
|
|
|
|
up in `Language.Defaults.factories`.
|
2017-06-05 11:13:07 +00:00
|
|
|
disable (list): A list of component names to exclude from the pipeline.
|
|
|
|
The disable list has priority over the pipeline list -- if the same
|
|
|
|
string occurs in both, the component is not loaded.
|
2017-05-18 21:57:38 +00:00
|
|
|
meta (dict): Custom meta data for the Language class. Is written to by
|
|
|
|
models to add model meta data.
|
|
|
|
RETURNS (Language): The newly constructed object.
|
|
|
|
"""
|
2017-07-22 22:50:18 +00:00
|
|
|
self._meta = dict(meta)
|
2017-10-25 09:57:43 +00:00
|
|
|
self._path = None
|
2017-05-16 09:21:59 +00:00
|
|
|
if vocab is True:
|
|
|
|
factory = self.Defaults.create_vocab
|
|
|
|
vocab = factory(self, **meta.get('vocab', {}))
|
|
|
|
self.vocab = vocab
|
|
|
|
if make_doc is True:
|
|
|
|
factory = self.Defaults.create_tokenizer
|
|
|
|
make_doc = factory(self, **meta.get('tokenizer', {}))
|
2017-05-29 13:40:45 +00:00
|
|
|
self.tokenizer = make_doc
|
2017-10-06 22:25:54 +00:00
|
|
|
self.pipeline = []
|
2017-08-20 12:42:07 +00:00
|
|
|
self._optimizer = None
|
2015-10-12 08:33:11 +00:00
|
|
|
|
2017-10-25 09:57:43 +00:00
|
|
|
@property
|
|
|
|
def path(self):
|
|
|
|
return self._path
|
|
|
|
|
2017-07-22 22:50:18 +00:00
|
|
|
@property
|
|
|
|
def meta(self):
|
|
|
|
self._meta.setdefault('lang', self.vocab.lang)
|
2017-10-26 14:12:23 +00:00
|
|
|
self._meta.setdefault('name', 'model')
|
2017-07-22 22:50:18 +00:00
|
|
|
self._meta.setdefault('version', '0.0.0')
|
2017-11-26 20:02:07 +00:00
|
|
|
self._meta.setdefault('spacy_version', '>={}'.format(about.__version__))
|
2017-07-22 22:50:18 +00:00
|
|
|
self._meta.setdefault('description', '')
|
|
|
|
self._meta.setdefault('author', '')
|
|
|
|
self._meta.setdefault('email', '')
|
|
|
|
self._meta.setdefault('url', '')
|
|
|
|
self._meta.setdefault('license', '')
|
2017-10-30 17:39:48 +00:00
|
|
|
self._meta['vectors'] = {'width': self.vocab.vectors_length,
|
2017-11-01 00:25:09 +00:00
|
|
|
'vectors': len(self.vocab.vectors),
|
|
|
|
'keys': self.vocab.vectors.n_keys}
|
2017-10-06 22:25:54 +00:00
|
|
|
self._meta['pipeline'] = self.pipe_names
|
2017-07-22 22:50:18 +00:00
|
|
|
return self._meta
|
|
|
|
|
|
|
|
@meta.setter
|
|
|
|
def meta(self, value):
|
|
|
|
self._meta = value
|
|
|
|
|
2017-06-04 20:52:09 +00:00
|
|
|
# Conveniences to access pipeline components
|
|
|
|
@property
|
|
|
|
def tensorizer(self):
|
2017-10-06 22:25:54 +00:00
|
|
|
return self.get_pipe('tensorizer')
|
2017-06-04 20:52:09 +00:00
|
|
|
|
|
|
|
@property
|
|
|
|
def tagger(self):
|
2017-10-06 22:25:54 +00:00
|
|
|
return self.get_pipe('tagger')
|
2017-06-04 20:52:09 +00:00
|
|
|
|
|
|
|
@property
|
|
|
|
def parser(self):
|
2017-10-06 22:25:54 +00:00
|
|
|
return self.get_pipe('parser')
|
2017-06-04 20:52:09 +00:00
|
|
|
|
|
|
|
@property
|
|
|
|
def entity(self):
|
2017-10-06 22:25:54 +00:00
|
|
|
return self.get_pipe('ner')
|
2017-06-04 20:52:09 +00:00
|
|
|
|
|
|
|
@property
|
|
|
|
def matcher(self):
|
2017-10-06 22:25:54 +00:00
|
|
|
return self.get_pipe('matcher')
|
|
|
|
|
|
|
|
@property
|
|
|
|
def pipe_names(self):
|
|
|
|
"""Get names of available pipeline components.
|
|
|
|
|
|
|
|
RETURNS (list): List of component name strings, in order.
|
|
|
|
"""
|
|
|
|
return [pipe_name for pipe_name, _ in self.pipeline]
|
|
|
|
|
|
|
|
def get_pipe(self, name):
|
|
|
|
"""Get a pipeline component for a given component name.
|
|
|
|
|
|
|
|
name (unicode): Name of pipeline component to get.
|
|
|
|
RETURNS (callable): The pipeline component.
|
|
|
|
"""
|
|
|
|
for pipe_name, component in self.pipeline:
|
|
|
|
if pipe_name == name:
|
|
|
|
return component
|
|
|
|
msg = "No component '{}' found in pipeline. Available names: {}"
|
|
|
|
raise KeyError(msg.format(name, self.pipe_names))
|
|
|
|
|
|
|
|
def create_pipe(self, name, config=dict()):
|
|
|
|
"""Create a pipeline component from a factory.
|
|
|
|
|
|
|
|
name (unicode): Factory name to look up in `Language.factories`.
|
2017-10-06 23:04:50 +00:00
|
|
|
config (dict): Configuration parameters to initialise component.
|
2017-10-06 22:25:54 +00:00
|
|
|
RETURNS (callable): Pipeline component.
|
|
|
|
"""
|
|
|
|
if name not in self.factories:
|
|
|
|
raise KeyError("Can't find factory for '{}'.".format(name))
|
|
|
|
factory = self.factories[name]
|
|
|
|
return factory(self, **config)
|
|
|
|
|
|
|
|
def add_pipe(self, component, name=None, before=None, after=None,
|
|
|
|
first=None, last=None):
|
|
|
|
"""Add a component to the processing pipeline. Valid components are
|
2017-10-27 12:40:14 +00:00
|
|
|
callables that take a `Doc` object, modify it and return it. Only one
|
|
|
|
of before/after/first/last can be set. Default behaviour is "last".
|
2017-10-06 22:25:54 +00:00
|
|
|
|
|
|
|
component (callable): The pipeline component.
|
|
|
|
name (unicode): Name of pipeline component. Overwrites existing
|
|
|
|
component.name attribute if available. If no name is set and
|
|
|
|
the component exposes no name attribute, component.__name__ is
|
2017-10-27 12:40:14 +00:00
|
|
|
used. An error is raised if a name already exists in the pipeline.
|
2017-10-06 22:25:54 +00:00
|
|
|
before (unicode): Component name to insert component directly before.
|
|
|
|
after (unicode): Component name to insert component directly after.
|
|
|
|
first (bool): Insert component first / not first in the pipeline.
|
|
|
|
last (bool): Insert component last / not last in the pipeline.
|
|
|
|
|
|
|
|
EXAMPLE:
|
|
|
|
>>> nlp.add_pipe(component, before='ner')
|
|
|
|
>>> nlp.add_pipe(component, name='custom_name', last=True)
|
|
|
|
"""
|
2018-01-30 14:43:03 +00:00
|
|
|
if not hasattr(component, '__call__'):
|
|
|
|
msg = ("Not a valid pipeline component. Expected callable, but "
|
|
|
|
"got {}. ".format(repr(component)))
|
2018-01-30 15:29:07 +00:00
|
|
|
if isinstance(component, basestring_) and component in self.factories:
|
2018-01-30 14:43:03 +00:00
|
|
|
msg += ("If you meant to add a built-in component, use "
|
|
|
|
"create_pipe: nlp.add_pipe(nlp.create_pipe('{}'))"
|
|
|
|
.format(component))
|
|
|
|
raise ValueError(msg)
|
2017-10-06 22:25:54 +00:00
|
|
|
if name is None:
|
2017-10-10 02:23:05 +00:00
|
|
|
if hasattr(component, 'name'):
|
|
|
|
name = component.name
|
|
|
|
elif hasattr(component, '__name__'):
|
|
|
|
name = component.__name__
|
2017-10-27 12:40:14 +00:00
|
|
|
elif (hasattr(component, '__class__') and
|
|
|
|
hasattr(component.__class__, '__name__')):
|
2017-10-10 02:23:05 +00:00
|
|
|
name = component.__class__.__name__
|
|
|
|
else:
|
|
|
|
name = repr(component)
|
2017-10-06 22:25:54 +00:00
|
|
|
if name in self.pipe_names:
|
|
|
|
raise ValueError("'{}' already exists in pipeline.".format(name))
|
|
|
|
if sum([bool(before), bool(after), bool(first), bool(last)]) >= 2:
|
|
|
|
msg = ("Invalid constraints. You can only set one of the "
|
|
|
|
"following: before, after, first, last.")
|
|
|
|
raise ValueError(msg)
|
|
|
|
pipe = (name, component)
|
|
|
|
if last or not any([first, before, after]):
|
|
|
|
self.pipeline.append(pipe)
|
|
|
|
elif first:
|
|
|
|
self.pipeline.insert(0, pipe)
|
|
|
|
elif before and before in self.pipe_names:
|
|
|
|
self.pipeline.insert(self.pipe_names.index(before), pipe)
|
|
|
|
elif after and after in self.pipe_names:
|
2017-11-28 19:37:55 +00:00
|
|
|
self.pipeline.insert(self.pipe_names.index(after) + 1, pipe)
|
2017-10-06 22:25:54 +00:00
|
|
|
else:
|
|
|
|
msg = "Can't find '{}' in pipeline. Available names: {}"
|
|
|
|
unfound = before or after
|
|
|
|
raise ValueError(msg.format(unfound, self.pipe_names))
|
2017-06-04 20:52:09 +00:00
|
|
|
|
2017-10-17 09:20:07 +00:00
|
|
|
def has_pipe(self, name):
|
|
|
|
"""Check if a component name is present in the pipeline. Equivalent to
|
|
|
|
`name in nlp.pipe_names`.
|
|
|
|
|
|
|
|
name (unicode): Name of the component.
|
2017-10-27 12:40:14 +00:00
|
|
|
RETURNS (bool): Whether a component of the name exists in the pipeline.
|
2017-10-17 09:20:07 +00:00
|
|
|
"""
|
|
|
|
return name in self.pipe_names
|
|
|
|
|
2017-10-06 22:25:54 +00:00
|
|
|
def replace_pipe(self, name, component):
|
|
|
|
"""Replace a component in the pipeline.
|
|
|
|
|
|
|
|
name (unicode): Name of the component to replace.
|
|
|
|
component (callable): Pipeline component.
|
|
|
|
"""
|
|
|
|
if name not in self.pipe_names:
|
|
|
|
msg = "Can't find '{}' in pipeline. Available names: {}"
|
|
|
|
raise ValueError(msg.format(name, self.pipe_names))
|
|
|
|
self.pipeline[self.pipe_names.index(name)] = (name, component)
|
|
|
|
|
|
|
|
def rename_pipe(self, old_name, new_name):
|
|
|
|
"""Rename a pipeline component.
|
|
|
|
|
|
|
|
old_name (unicode): Name of the component to rename.
|
|
|
|
new_name (unicode): New name of the component.
|
|
|
|
"""
|
|
|
|
if old_name not in self.pipe_names:
|
|
|
|
msg = "Can't find '{}' in pipeline. Available names: {}"
|
|
|
|
raise ValueError(msg.format(old_name, self.pipe_names))
|
|
|
|
if new_name in self.pipe_names:
|
|
|
|
msg = "'{}' already exists in pipeline. Existing names: {}"
|
|
|
|
raise ValueError(msg.format(new_name, self.pipe_names))
|
|
|
|
i = self.pipe_names.index(old_name)
|
|
|
|
self.pipeline[i] = (new_name, self.pipeline[i][1])
|
|
|
|
|
|
|
|
def remove_pipe(self, name):
|
|
|
|
"""Remove a component from the pipeline.
|
|
|
|
|
|
|
|
name (unicode): Name of the component to remove.
|
2017-10-06 23:04:50 +00:00
|
|
|
RETURNS (tuple): A `(name, component)` tuple of the removed component.
|
2017-10-06 22:25:54 +00:00
|
|
|
"""
|
|
|
|
if name not in self.pipe_names:
|
|
|
|
msg = "Can't find '{}' in pipeline. Available names: {}"
|
|
|
|
raise ValueError(msg.format(name, self.pipe_names))
|
|
|
|
return self.pipeline.pop(self.pipe_names.index(name))
|
2017-06-04 20:52:09 +00:00
|
|
|
|
2017-05-26 10:33:54 +00:00
|
|
|
def __call__(self, text, disable=[]):
|
2017-10-06 22:26:05 +00:00
|
|
|
"""Apply the pipeline to some text. The text can span multiple sentences,
|
2017-05-18 21:57:38 +00:00
|
|
|
and can contain arbtrary whitespace. Alignment into the original string
|
2015-08-25 13:37:17 +00:00
|
|
|
is preserved.
|
2016-12-18 15:54:52 +00:00
|
|
|
|
2017-05-18 21:57:38 +00:00
|
|
|
text (unicode): The text to be processed.
|
2017-05-26 10:33:54 +00:00
|
|
|
disable (list): Names of the pipeline components to disable.
|
2017-05-18 21:57:38 +00:00
|
|
|
RETURNS (Doc): A container for accessing the annotations.
|
2016-11-01 11:25:36 +00:00
|
|
|
|
2017-05-18 21:57:38 +00:00
|
|
|
EXAMPLE:
|
2016-11-01 11:25:36 +00:00
|
|
|
>>> tokens = nlp('An example sentence. Another example sentence.')
|
2017-05-18 21:57:38 +00:00
|
|
|
>>> tokens[0].text, tokens[0].head.tag_
|
2016-11-01 11:25:36 +00:00
|
|
|
('An', 'NN')
|
2015-08-25 13:37:17 +00:00
|
|
|
"""
|
2016-10-14 15:38:29 +00:00
|
|
|
doc = self.make_doc(text)
|
2017-10-06 22:25:54 +00:00
|
|
|
for name, proc in self.pipeline:
|
2017-05-26 10:33:54 +00:00
|
|
|
if name in disable:
|
2017-05-16 09:21:59 +00:00
|
|
|
continue
|
2017-05-28 13:11:58 +00:00
|
|
|
doc = proc(doc)
|
2016-05-17 14:55:42 +00:00
|
|
|
return doc
|
2015-08-25 13:37:17 +00:00
|
|
|
|
2017-10-25 11:46:41 +00:00
|
|
|
def disable_pipes(self, *names):
|
2017-10-27 12:40:14 +00:00
|
|
|
"""Disable one or more pipeline components. If used as a context
|
|
|
|
manager, the pipeline will be restored to the initial state at the end
|
|
|
|
of the block. Otherwise, a DisabledPipes object is returned, that has
|
|
|
|
a `.restore()` method you can use to undo your changes.
|
2017-10-25 11:46:41 +00:00
|
|
|
|
|
|
|
EXAMPLE:
|
|
|
|
>>> nlp.add_pipe('parser')
|
|
|
|
>>> nlp.add_pipe('tagger')
|
|
|
|
>>> with nlp.disable_pipes('parser', 'tagger'):
|
|
|
|
>>> assert not nlp.has_pipe('parser')
|
|
|
|
>>> assert nlp.has_pipe('parser')
|
|
|
|
>>> disabled = nlp.disable_pipes('parser')
|
|
|
|
>>> assert len(disabled) == 1
|
|
|
|
>>> assert not nlp.has_pipe('parser')
|
|
|
|
>>> disabled.restore()
|
|
|
|
>>> assert nlp.has_pipe('parser')
|
2017-10-27 12:40:14 +00:00
|
|
|
"""
|
2017-10-25 11:46:41 +00:00
|
|
|
return DisabledPipes(self, *names)
|
|
|
|
|
2017-05-29 13:40:45 +00:00
|
|
|
def make_doc(self, text):
|
|
|
|
return self.tokenizer(text)
|
|
|
|
|
2017-09-26 10:41:35 +00:00
|
|
|
def update(self, docs, golds, drop=0., sgd=None, losses=None):
|
2017-05-18 21:57:38 +00:00
|
|
|
"""Update the models in the pipeline.
|
|
|
|
|
|
|
|
docs (iterable): A batch of `Doc` objects.
|
|
|
|
golds (iterable): A batch of `GoldParse` objects.
|
|
|
|
drop (float): The droput rate.
|
2017-05-21 11:17:40 +00:00
|
|
|
sgd (callable): An optimizer.
|
2017-05-18 21:57:38 +00:00
|
|
|
RETURNS (dict): Results from the update.
|
|
|
|
|
|
|
|
EXAMPLE:
|
2017-10-27 12:40:14 +00:00
|
|
|
>>> with nlp.begin_training(gold) as (trainer, optimizer):
|
2017-05-18 21:57:38 +00:00
|
|
|
>>> for epoch in trainer.epochs(gold):
|
|
|
|
>>> for docs, golds in epoch:
|
|
|
|
>>> state = nlp.update(docs, golds, sgd=optimizer)
|
|
|
|
"""
|
2017-08-01 20:10:17 +00:00
|
|
|
if len(docs) != len(golds):
|
|
|
|
raise IndexError("Update expects same number of docs and golds "
|
2017-10-27 12:40:14 +00:00
|
|
|
"Got: %d, %d" % (len(docs), len(golds)))
|
2017-08-01 20:10:17 +00:00
|
|
|
if len(docs) == 0:
|
|
|
|
return
|
2017-08-20 12:42:07 +00:00
|
|
|
if sgd is None:
|
|
|
|
if self._optimizer is None:
|
2017-11-06 21:07:38 +00:00
|
|
|
self._optimizer = create_default_optimizer(Model.ops)
|
2017-08-20 12:42:07 +00:00
|
|
|
sgd = self._optimizer
|
2017-11-06 21:07:38 +00:00
|
|
|
|
|
|
|
# Allow dict of args to GoldParse, instead of GoldParse objects.
|
|
|
|
gold_objs = []
|
|
|
|
doc_objs = []
|
|
|
|
for doc, gold in zip(docs, golds):
|
|
|
|
if isinstance(doc, basestring_):
|
|
|
|
doc = self.make_doc(doc)
|
|
|
|
if not isinstance(gold, GoldParse):
|
|
|
|
gold = GoldParse(doc, **gold)
|
|
|
|
doc_objs.append(doc)
|
|
|
|
gold_objs.append(gold)
|
|
|
|
golds = gold_objs
|
|
|
|
docs = doc_objs
|
2017-05-25 01:10:54 +00:00
|
|
|
grads = {}
|
2017-10-27 12:40:14 +00:00
|
|
|
|
2017-05-25 01:10:54 +00:00
|
|
|
def get_grads(W, dW, key=None):
|
|
|
|
grads[key] = (W, dW)
|
2017-10-27 12:40:14 +00:00
|
|
|
|
2017-09-21 12:59:48 +00:00
|
|
|
pipes = list(self.pipeline)
|
2017-05-27 23:32:21 +00:00
|
|
|
random.shuffle(pipes)
|
2017-10-06 22:25:54 +00:00
|
|
|
for name, proc in pipes:
|
2017-05-21 23:43:31 +00:00
|
|
|
if not hasattr(proc, 'update'):
|
|
|
|
continue
|
2017-11-03 19:20:01 +00:00
|
|
|
grads = {}
|
2017-09-21 12:59:48 +00:00
|
|
|
proc.update(docs, golds, drop=drop, sgd=get_grads, losses=losses)
|
2017-11-03 19:20:01 +00:00
|
|
|
for key, (W, dW) in grads.items():
|
|
|
|
sgd(W, dW, key=key)
|
2017-05-16 14:17:30 +00:00
|
|
|
|
2017-05-21 14:07:06 +00:00
|
|
|
def preprocess_gold(self, docs_golds):
|
2017-05-22 10:29:30 +00:00
|
|
|
"""Can be called before training to pre-process gold data. By default,
|
|
|
|
it handles nonprojectivity and adds missing tags to the tag map.
|
|
|
|
|
|
|
|
docs_golds (iterable): Tuples of `Doc` and `GoldParse` objects.
|
|
|
|
YIELDS (tuple): Tuples of preprocessed `Doc` and `GoldParse` objects.
|
|
|
|
"""
|
2017-10-06 22:25:54 +00:00
|
|
|
for name, proc in self.pipeline:
|
2017-05-21 14:07:06 +00:00
|
|
|
if hasattr(proc, 'preprocess_gold'):
|
|
|
|
docs_golds = proc.preprocess_gold(docs_golds)
|
|
|
|
for doc, gold in docs_golds:
|
|
|
|
yield doc, gold
|
|
|
|
|
2017-11-06 13:26:00 +00:00
|
|
|
def begin_training(self, get_gold_tuples=None, sgd=None, **cfg):
|
2017-05-18 21:57:38 +00:00
|
|
|
"""Allocate models, pre-process training data and acquire a trainer and
|
|
|
|
optimizer. Used as a contextmanager.
|
|
|
|
|
2017-09-14 14:18:30 +00:00
|
|
|
get_gold_tuples (function): Function returning gold data
|
2017-05-18 21:57:38 +00:00
|
|
|
**cfg: Config parameters.
|
2017-10-06 22:26:05 +00:00
|
|
|
RETURNS: An optimizer
|
2017-05-18 21:57:38 +00:00
|
|
|
"""
|
2017-11-01 12:14:31 +00:00
|
|
|
if get_gold_tuples is None:
|
|
|
|
get_gold_tuples = lambda: []
|
2017-05-17 10:04:50 +00:00
|
|
|
# Populate vocab
|
2017-11-01 12:14:31 +00:00
|
|
|
else:
|
2017-09-21 00:15:20 +00:00
|
|
|
for _, annots_brackets in get_gold_tuples():
|
|
|
|
for annots, _ in annots_brackets:
|
|
|
|
for word in annots[1]:
|
|
|
|
_ = self.vocab[word]
|
2017-05-16 09:21:59 +00:00
|
|
|
contexts = []
|
2017-06-03 21:10:23 +00:00
|
|
|
if cfg.get('device', -1) >= 0:
|
2017-09-21 00:15:20 +00:00
|
|
|
device = util.use_gpu(cfg['device'])
|
2017-09-18 23:04:16 +00:00
|
|
|
if self.vocab.vectors.data.shape[1] >= 1:
|
|
|
|
self.vocab.vectors.data = Model.ops.asarray(
|
|
|
|
self.vocab.vectors.data)
|
2017-06-03 21:10:23 +00:00
|
|
|
else:
|
|
|
|
device = None
|
2017-09-23 01:11:52 +00:00
|
|
|
link_vectors_to_models(self.vocab)
|
2017-11-06 13:26:00 +00:00
|
|
|
if sgd is None:
|
|
|
|
sgd = create_default_optimizer(Model.ops)
|
|
|
|
self._optimizer = sgd
|
2017-10-06 22:25:54 +00:00
|
|
|
for name, proc in self.pipeline:
|
2017-05-16 09:21:59 +00:00
|
|
|
if hasattr(proc, 'begin_training'):
|
2017-11-06 13:26:00 +00:00
|
|
|
proc.begin_training(get_gold_tuples(),
|
|
|
|
pipeline=self.pipeline,
|
2018-01-31 02:29:54 +00:00
|
|
|
sgd=self._optimizer,
|
|
|
|
**cfg)
|
2017-08-20 12:42:07 +00:00
|
|
|
return self._optimizer
|
2017-05-21 14:07:06 +00:00
|
|
|
|
2017-10-03 14:14:57 +00:00
|
|
|
def evaluate(self, docs_golds, verbose=False):
|
2017-08-14 11:00:23 +00:00
|
|
|
scorer = Scorer()
|
2017-08-18 20:26:12 +00:00
|
|
|
docs, golds = zip(*docs_golds)
|
|
|
|
docs = list(docs)
|
|
|
|
golds = list(golds)
|
2017-10-06 22:25:54 +00:00
|
|
|
for name, pipe in self.pipeline:
|
2017-08-18 20:26:12 +00:00
|
|
|
if not hasattr(pipe, 'pipe'):
|
2017-10-18 19:46:12 +00:00
|
|
|
docs = (pipe(doc) for doc in docs)
|
2017-08-18 20:26:12 +00:00
|
|
|
else:
|
2017-10-18 19:46:12 +00:00
|
|
|
docs = pipe.pipe(docs, batch_size=256)
|
2017-08-18 20:26:12 +00:00
|
|
|
for doc, gold in zip(docs, golds):
|
2017-10-03 14:14:57 +00:00
|
|
|
if verbose:
|
|
|
|
print(doc)
|
|
|
|
scorer.score(doc, gold, verbose=verbose)
|
2017-05-21 14:07:06 +00:00
|
|
|
return scorer
|
2017-05-16 09:21:59 +00:00
|
|
|
|
2017-05-18 09:25:19 +00:00
|
|
|
@contextmanager
|
|
|
|
def use_params(self, params, **cfg):
|
2017-05-18 21:57:38 +00:00
|
|
|
"""Replace weights of models in the pipeline with those provided in the
|
|
|
|
params dictionary. Can be used as a contextmanager, in which case,
|
|
|
|
models go back to their original weights after the block.
|
|
|
|
|
|
|
|
params (dict): A dictionary of parameters keyed by model ID.
|
|
|
|
**cfg: Config parameters.
|
|
|
|
|
|
|
|
EXAMPLE:
|
|
|
|
>>> with nlp.use_params(optimizer.averages):
|
|
|
|
>>> nlp.to_disk('/tmp/checkpoint')
|
|
|
|
"""
|
2017-10-06 22:25:54 +00:00
|
|
|
contexts = [pipe.use_params(params) for name, pipe
|
2017-05-18 13:30:59 +00:00
|
|
|
in self.pipeline if hasattr(pipe, 'use_params')]
|
|
|
|
# TODO: Having trouble with contextlib
|
|
|
|
# Workaround: these aren't actually context managers atm.
|
|
|
|
for context in contexts:
|
|
|
|
try:
|
|
|
|
next(context)
|
|
|
|
except StopIteration:
|
|
|
|
pass
|
2017-05-18 09:25:19 +00:00
|
|
|
yield
|
|
|
|
for context in contexts:
|
|
|
|
try:
|
2017-05-18 13:30:59 +00:00
|
|
|
next(context)
|
2017-05-18 09:25:19 +00:00
|
|
|
except StopIteration:
|
|
|
|
pass
|
|
|
|
|
2017-08-19 10:21:33 +00:00
|
|
|
def pipe(self, texts, as_tuples=False, n_threads=2, batch_size=1000,
|
2017-11-23 12:18:59 +00:00
|
|
|
disable=[], cleanup=False):
|
2017-10-27 12:40:14 +00:00
|
|
|
"""Process texts as a stream, and yield `Doc` objects in order.
|
2017-05-18 21:57:38 +00:00
|
|
|
|
|
|
|
texts (iterator): A sequence of texts to process.
|
2017-08-19 10:21:33 +00:00
|
|
|
as_tuples (bool):
|
|
|
|
If set to True, inputs should be a sequence of
|
|
|
|
(text, context) tuples. Output will then be a sequence of
|
|
|
|
(doc, context) tuples. Defaults to False.
|
2017-11-23 12:18:59 +00:00
|
|
|
n_threads (int): Currently inactive.
|
2017-05-18 21:57:38 +00:00
|
|
|
batch_size (int): The number of texts to buffer.
|
2017-05-26 10:33:54 +00:00
|
|
|
disable (list): Names of the pipeline components to disable.
|
2017-11-23 12:18:59 +00:00
|
|
|
cleanup (bool): If True, unneeded strings are freed,
|
|
|
|
to control memory use. Experimental.
|
2017-05-18 21:57:38 +00:00
|
|
|
YIELDS (Doc): Documents in the order of the original text.
|
|
|
|
|
|
|
|
EXAMPLE:
|
|
|
|
>>> texts = [u'One document.', u'...', u'Lots of documents']
|
|
|
|
>>> for doc in nlp.pipe(texts, batch_size=50, n_threads=4):
|
|
|
|
>>> assert doc.is_parsed
|
2017-04-15 09:59:21 +00:00
|
|
|
"""
|
2017-08-19 10:21:33 +00:00
|
|
|
if as_tuples:
|
2017-07-25 16:57:59 +00:00
|
|
|
text_context1, text_context2 = itertools.tee(texts)
|
|
|
|
texts = (tc[0] for tc in text_context1)
|
|
|
|
contexts = (tc[1] for tc in text_context2)
|
|
|
|
docs = self.pipe(texts, n_threads=n_threads, batch_size=batch_size,
|
|
|
|
disable=disable)
|
|
|
|
for doc, context in izip(docs, contexts):
|
|
|
|
yield (doc, context)
|
|
|
|
return
|
2017-05-23 08:06:53 +00:00
|
|
|
docs = (self.make_doc(text) for text in texts)
|
2017-10-06 22:25:54 +00:00
|
|
|
for name, proc in self.pipeline:
|
2017-05-26 10:33:54 +00:00
|
|
|
if name in disable:
|
2017-05-16 09:21:59 +00:00
|
|
|
continue
|
|
|
|
if hasattr(proc, 'pipe'):
|
2017-10-27 12:40:14 +00:00
|
|
|
docs = proc.pipe(docs, n_threads=n_threads,
|
|
|
|
batch_size=batch_size)
|
2017-05-16 09:21:59 +00:00
|
|
|
else:
|
2017-05-21 23:43:31 +00:00
|
|
|
# Apply the function, but yield the doc
|
|
|
|
docs = _pipe(proc, docs)
|
2017-10-16 17:22:40 +00:00
|
|
|
# Track weakrefs of "recent" documents, so that we can see when they
|
|
|
|
# expire from memory. When they do, we know we don't need old strings.
|
|
|
|
# This way, we avoid maintaining an unbounded growth in string entries
|
|
|
|
# in the string store.
|
|
|
|
recent_refs = weakref.WeakSet()
|
|
|
|
old_refs = weakref.WeakSet()
|
2017-11-23 12:19:18 +00:00
|
|
|
# Keep track of the original string data, so that if we flush old strings,
|
|
|
|
# we can recover the original ones. However, we only want to do this if we're
|
|
|
|
# really adding strings, to save up-front costs.
|
|
|
|
original_strings_data = None
|
2017-10-16 17:22:40 +00:00
|
|
|
nr_seen = 0
|
2017-05-19 18:25:42 +00:00
|
|
|
for doc in docs:
|
2016-02-03 01:04:55 +00:00
|
|
|
yield doc
|
2017-11-23 12:19:18 +00:00
|
|
|
if cleanup:
|
|
|
|
recent_refs.add(doc)
|
|
|
|
if nr_seen < 10000:
|
|
|
|
old_refs.add(doc)
|
|
|
|
nr_seen += 1
|
|
|
|
elif len(old_refs) == 0:
|
|
|
|
old_refs, recent_refs = recent_refs, old_refs
|
|
|
|
if original_strings_data is None:
|
|
|
|
original_strings_data = list(self.vocab.strings)
|
|
|
|
else:
|
|
|
|
keys, strings = self.vocab.strings._cleanup_stale_strings(original_strings_data)
|
|
|
|
self.vocab._reset_cache(keys, strings)
|
|
|
|
self.tokenizer._reset_cache(keys)
|
|
|
|
nr_seen = 0
|
2016-02-01 08:01:13 +00:00
|
|
|
|
2017-05-31 11:42:39 +00:00
|
|
|
def to_disk(self, path, disable=tuple()):
|
2017-05-26 10:33:54 +00:00
|
|
|
"""Save the current state to a directory. If a model is loaded, this
|
|
|
|
will include the model.
|
2017-04-16 23:40:26 +00:00
|
|
|
|
2017-05-18 21:57:38 +00:00
|
|
|
path (unicode or Path): A path to a directory, which will be created if
|
2017-10-27 12:40:14 +00:00
|
|
|
it doesn't exist. Paths may be strings or `Path`-like objects.
|
2017-05-31 11:42:39 +00:00
|
|
|
disable (list): Names of pipeline components to disable and prevent
|
2017-05-26 10:33:54 +00:00
|
|
|
from being saved.
|
2017-05-18 21:57:38 +00:00
|
|
|
|
|
|
|
EXAMPLE:
|
|
|
|
>>> nlp.to_disk('/path/to/models')
|
2017-05-17 10:04:50 +00:00
|
|
|
"""
|
|
|
|
path = util.ensure_path(path)
|
2017-05-31 11:42:39 +00:00
|
|
|
serializers = OrderedDict((
|
|
|
|
('tokenizer', lambda p: self.tokenizer.to_disk(p, vocab=False)),
|
|
|
|
('meta.json', lambda p: p.open('w').write(json_dumps(self.meta)))
|
|
|
|
))
|
2017-10-06 22:25:54 +00:00
|
|
|
for name, proc in self.pipeline:
|
2017-05-31 11:42:39 +00:00
|
|
|
if not hasattr(proc, 'name'):
|
|
|
|
continue
|
2017-10-06 22:25:54 +00:00
|
|
|
if name in disable:
|
2017-05-31 11:42:39 +00:00
|
|
|
continue
|
|
|
|
if not hasattr(proc, 'to_disk'):
|
|
|
|
continue
|
2017-10-06 22:25:54 +00:00
|
|
|
serializers[name] = lambda p, proc=proc: proc.to_disk(p, vocab=False)
|
2017-09-24 10:01:45 +00:00
|
|
|
serializers['vocab'] = lambda p: self.vocab.to_disk(p)
|
2017-05-31 11:42:39 +00:00
|
|
|
util.to_disk(path, serializers, {p: False for p in disable})
|
|
|
|
|
|
|
|
def from_disk(self, path, disable=tuple()):
|
2017-05-18 21:57:38 +00:00
|
|
|
"""Loads state from a directory. Modifies the object in place and
|
2017-05-26 10:33:54 +00:00
|
|
|
returns it. If the saved `Language` object contains a model, the
|
|
|
|
model will be loaded.
|
2017-05-17 10:04:50 +00:00
|
|
|
|
2017-05-18 21:57:38 +00:00
|
|
|
path (unicode or Path): A path to a directory. Paths may be either
|
|
|
|
strings or `Path`-like objects.
|
2017-05-26 10:33:54 +00:00
|
|
|
disable (list): Names of the pipeline components to disable.
|
2017-05-18 21:57:38 +00:00
|
|
|
RETURNS (Language): The modified `Language` object.
|
2017-05-17 10:04:50 +00:00
|
|
|
|
2017-05-18 21:57:38 +00:00
|
|
|
EXAMPLE:
|
|
|
|
>>> from spacy.language import Language
|
|
|
|
>>> nlp = Language().from_disk('/path/to/models')
|
2017-05-17 10:04:50 +00:00
|
|
|
"""
|
|
|
|
path = util.ensure_path(path)
|
2017-05-31 11:42:39 +00:00
|
|
|
deserializers = OrderedDict((
|
|
|
|
('vocab', lambda p: self.vocab.from_disk(p)),
|
|
|
|
('tokenizer', lambda p: self.tokenizer.from_disk(p, vocab=False)),
|
2018-02-13 19:44:33 +00:00
|
|
|
('meta.json', lambda p: self.meta.update(util.read_json(p)))
|
2017-05-31 11:42:39 +00:00
|
|
|
))
|
2017-10-06 22:25:54 +00:00
|
|
|
for name, proc in self.pipeline:
|
|
|
|
if name in disable:
|
2017-05-31 11:42:39 +00:00
|
|
|
continue
|
|
|
|
if not hasattr(proc, 'to_disk'):
|
|
|
|
continue
|
2017-10-06 22:25:54 +00:00
|
|
|
deserializers[name] = lambda p, proc=proc: proc.from_disk(p, vocab=False)
|
2017-06-01 12:38:35 +00:00
|
|
|
exclude = {p: False for p in disable}
|
|
|
|
if not (path / 'vocab').exists():
|
|
|
|
exclude['vocab'] = True
|
|
|
|
util.from_disk(path, deserializers, exclude)
|
2017-10-25 09:57:43 +00:00
|
|
|
self._path = path
|
2017-05-31 11:42:39 +00:00
|
|
|
return self
|
2017-05-17 10:04:50 +00:00
|
|
|
|
2017-10-17 16:18:10 +00:00
|
|
|
def to_bytes(self, disable=[], **exclude):
|
2017-05-17 10:04:50 +00:00
|
|
|
"""Serialize the current state to a binary string.
|
2016-12-18 15:54:52 +00:00
|
|
|
|
2017-05-26 10:33:54 +00:00
|
|
|
disable (list): Nameds of pipeline components to disable and prevent
|
|
|
|
from being serialized.
|
2017-05-18 21:57:38 +00:00
|
|
|
RETURNS (bytes): The serialized form of the `Language` object.
|
2017-05-17 10:04:50 +00:00
|
|
|
"""
|
2017-05-29 13:40:45 +00:00
|
|
|
serializers = OrderedDict((
|
|
|
|
('vocab', lambda: self.vocab.to_bytes()),
|
|
|
|
('tokenizer', lambda: self.tokenizer.to_bytes(vocab=False)),
|
2017-10-27 19:07:59 +00:00
|
|
|
('meta', lambda: json_dumps(self.meta))
|
2017-05-29 13:40:45 +00:00
|
|
|
))
|
2017-10-06 22:25:54 +00:00
|
|
|
for i, (name, proc) in enumerate(self.pipeline):
|
|
|
|
if name in disable:
|
2017-05-29 09:45:45 +00:00
|
|
|
continue
|
|
|
|
if not hasattr(proc, 'to_bytes'):
|
|
|
|
continue
|
2017-05-29 18:23:28 +00:00
|
|
|
serializers[i] = lambda proc=proc: proc.to_bytes(vocab=False)
|
2017-10-17 16:18:10 +00:00
|
|
|
return util.to_bytes(serializers, exclude)
|
2017-04-15 10:05:47 +00:00
|
|
|
|
2017-05-26 10:33:54 +00:00
|
|
|
def from_bytes(self, bytes_data, disable=[]):
|
2017-05-17 10:04:50 +00:00
|
|
|
"""Load state from a binary string.
|
|
|
|
|
2017-05-18 21:57:38 +00:00
|
|
|
bytes_data (bytes): The data to load from.
|
2017-05-26 10:33:54 +00:00
|
|
|
disable (list): Names of the pipeline components to disable.
|
2017-05-18 21:57:38 +00:00
|
|
|
RETURNS (Language): The `Language` object.
|
2017-05-17 10:04:50 +00:00
|
|
|
"""
|
2017-05-29 13:40:45 +00:00
|
|
|
deserializers = OrderedDict((
|
|
|
|
('vocab', lambda b: self.vocab.from_bytes(b)),
|
|
|
|
('tokenizer', lambda b: self.tokenizer.from_bytes(b, vocab=False)),
|
|
|
|
('meta', lambda b: self.meta.update(ujson.loads(b)))
|
|
|
|
))
|
2017-10-06 22:25:54 +00:00
|
|
|
for i, (name, proc) in enumerate(self.pipeline):
|
|
|
|
if name in disable:
|
2017-05-29 09:45:45 +00:00
|
|
|
continue
|
2017-05-29 13:40:45 +00:00
|
|
|
if not hasattr(proc, 'from_bytes'):
|
2017-05-29 09:45:45 +00:00
|
|
|
continue
|
2017-05-29 18:23:28 +00:00
|
|
|
deserializers[i] = lambda b, proc=proc: proc.from_bytes(b, vocab=False)
|
|
|
|
msg = util.from_bytes(bytes_data, deserializers, {})
|
2017-05-17 10:04:50 +00:00
|
|
|
return self
|
2017-05-21 23:43:31 +00:00
|
|
|
|
2017-05-29 09:45:45 +00:00
|
|
|
|
2017-10-25 11:46:41 +00:00
|
|
|
class DisabledPipes(list):
|
2017-10-27 12:40:14 +00:00
|
|
|
"""Manager for temporary pipeline disabling."""
|
2017-10-25 11:46:41 +00:00
|
|
|
def __init__(self, nlp, *names):
|
|
|
|
self.nlp = nlp
|
|
|
|
self.names = names
|
|
|
|
# Important! Not deep copy -- we just want the container (but we also
|
|
|
|
# want to support people providing arbitrarily typed nlp.pipeline
|
|
|
|
# objects.)
|
2017-10-27 19:07:59 +00:00
|
|
|
self.original_pipeline = copy(nlp.pipeline)
|
2017-10-25 11:46:41 +00:00
|
|
|
list.__init__(self)
|
|
|
|
self.extend(nlp.remove_pipe(name) for name in names)
|
|
|
|
|
|
|
|
def __enter__(self):
|
2017-10-25 12:56:16 +00:00
|
|
|
return self
|
2017-10-25 11:46:41 +00:00
|
|
|
|
|
|
|
def __exit__(self, *args):
|
|
|
|
self.restore()
|
|
|
|
|
|
|
|
def restore(self):
|
|
|
|
'''Restore the pipeline to its state when DisabledPipes was created.'''
|
|
|
|
current, self.nlp.pipeline = self.nlp.pipeline, self.original_pipeline
|
2017-10-27 12:40:14 +00:00
|
|
|
unexpected = [name for name, pipe in current
|
|
|
|
if not self.nlp.has_pipe(name)]
|
2017-10-25 11:46:41 +00:00
|
|
|
if unexpected:
|
|
|
|
# Don't change the pipeline if we're raising an error.
|
|
|
|
self.nlp.pipeline = current
|
|
|
|
msg = (
|
|
|
|
"Some current components would be lost when restoring "
|
|
|
|
"previous pipeline state. If you added components after "
|
|
|
|
"calling nlp.disable_pipes(), you should remove them "
|
|
|
|
"explicitly with nlp.remove_pipe() before the pipeline is "
|
|
|
|
"restore. Names of the new components: %s"
|
|
|
|
)
|
|
|
|
raise ValueError(msg % unexpected)
|
|
|
|
self[:] = []
|
|
|
|
|
|
|
|
|
2017-05-21 23:43:31 +00:00
|
|
|
def _pipe(func, docs):
|
|
|
|
for doc in docs:
|
2018-02-15 20:51:49 +00:00
|
|
|
doc = func(doc)
|
2017-05-21 23:43:31 +00:00
|
|
|
yield doc
|