spaCy/spacy/pipeline/trainable_pipe.pyx

345 lines
14 KiB
Cython
Raw Normal View History

# cython: infer_types=True, binding=True
from typing import Callable, Dict, Iterable, Iterator, Optional, Tuple
import srsly
from thinc.api import Model, Optimizer, set_dropout_rate
from ..tokens.doc cimport Doc
from .. import util
from ..errors import Errors
from ..language import Language
from ..training import Example, validate_examples
from ..vocab import Vocab
from .pipe import Pipe, deserialize_config
2020-10-10 16:55:07 +00:00
cdef class TrainablePipe(Pipe):
"""This class is a base class and not instantiated directly. Trainable
pipeline components like the EntityRecognizer or TextCategorizer inherit
from it and it defines the interface that components should follow to
function as trainable components in a spaCy pipeline.
DOCS: https://spacy.io/api/pipe
"""
def __init__(self, vocab: Vocab, model: Model, name: str, **cfg):
"""Initialize a pipeline component.
vocab (Vocab): The shared vocabulary.
model (thinc.api.Model): The Thinc Model powering the pipeline component.
name (str): The component instance name.
**cfg: Additional settings and config parameters.
DOCS: https://spacy.io/api/pipe#init
"""
self.vocab = vocab
self.model = model
self.name = name
self.cfg = dict(cfg)
def __call__(self, Doc doc) -> Doc:
"""Apply the pipe to one document. The document is modified in place,
and returned. This usually happens under the hood when the nlp object
is called on a text and all components are applied to the Doc.
docs (Doc): The Doc to process.
RETURNS (Doc): The processed Doc.
DOCS: https://spacy.io/api/pipe#call
"""
error_handler = self.get_error_handler()
try:
scores = self.predict([doc])
self.set_annotations([doc], scores)
return doc
except Exception as e:
error_handler(self.name, self, [doc], e)
def pipe(self, stream: Iterable[Doc], *, batch_size: int = 128) -> Iterator[Doc]:
"""Apply the pipe to a stream of documents. This usually happens under
the hood when the nlp object is called on a text and all components are
applied to the Doc.
stream (Iterable[Doc]): A stream of documents.
batch_size (int): The number of documents to buffer.
error_handler (Callable[[str, List[Doc], Exception], Any]): Function that
deals with a failing batch of documents. The default function just reraises
the exception.
YIELDS (Doc): Processed documents in order.
DOCS: https://spacy.io/api/pipe#pipe
"""
error_handler = self.get_error_handler()
for docs in util.minibatch(stream, size=batch_size):
try:
scores = self.predict(docs)
self.set_annotations(docs, scores)
yield from docs
except Exception as e:
error_handler(self.name, self, docs, e)
def predict(self, docs: Iterable[Doc]):
"""Apply the pipeline's model to a batch of docs, without modifying them.
Returns a single tensor for a batch of documents.
docs (Iterable[Doc]): The documents to predict.
RETURNS: Vector representations of the predictions.
DOCS: https://spacy.io/api/pipe#predict
"""
raise NotImplementedError(Errors.E931.format(parent="TrainablePipe", method="predict", name=self.name))
def set_annotations(self, docs: Iterable[Doc], scores):
"""Modify a batch of documents, using pre-computed scores.
docs (Iterable[Doc]): The documents to modify.
scores: The scores to assign.
DOCS: https://spacy.io/api/pipe#set_annotations
"""
raise NotImplementedError(Errors.E931.format(parent="TrainablePipe", method="set_annotations", name=self.name))
def update(self,
examples: Iterable["Example"],
Add SpanCategorizer component (#6747) * Draft spancat model * Add spancat model * Add test for extract_spans * Add extract_spans layer * Upd extract_spans * Add spancat model * Add test for spancat model * Upd spancat model * Update spancat component * Upd spancat * Update spancat model * Add quick spancat test * Import SpanCategorizer * Fix SpanCategorizer component * Import SpanGroup * Fix span extraction * Fix import * Fix import * Upd model * Update spancat models * Add scoring, update defaults * Update and add docs * Fix type * Update spacy/ml/extract_spans.py * Auto-format and fix import * Fix comment * Fix type * Fix type * Update website/docs/api/spancategorizer.md * Fix comment Co-authored-by: Sofie Van Landeghem <svlandeg@users.noreply.github.com> * Better defense Co-authored-by: Sofie Van Landeghem <svlandeg@users.noreply.github.com> * Fix labels list Co-authored-by: Sofie Van Landeghem <svlandeg@users.noreply.github.com> * Update spacy/ml/extract_spans.py Co-authored-by: Sofie Van Landeghem <svlandeg@users.noreply.github.com> * Update spacy/pipeline/spancat.py Co-authored-by: Sofie Van Landeghem <svlandeg@users.noreply.github.com> * Set annotations during update * Set annotations in spancat * fix imports in test * Update spacy/pipeline/spancat.py * replace MaxoutLogistic with LinearLogistic * fix config * various small fixes * remove set_annotations parameter in update * use our beloved tupley format with recent support for doc.spans * bugfix to allow renaming the default span_key (scores weren't showing up) * use different key in docs example * change defaults to better-working parameters from project (WIP) * register spacy.extract_spans.v1 for legacy purposes * Upd dev version so can build wheel * layers instead of architectures for smaller building blocks * Update website/docs/api/spancategorizer.md Co-authored-by: Adriane Boyd <adrianeboyd@gmail.com> * Update website/docs/api/spancategorizer.md Co-authored-by: Adriane Boyd <adrianeboyd@gmail.com> * Include additional scores from overrides in combined score weights * Parameterize spans key in scoring Parameterize the `SpanCategorizer` `spans_key` for scoring purposes so that it's possible to evaluate multiple `spancat` components in the same pipeline. * Use the (intentionally very short) default spans key `sc` in the `SpanCategorizer` * Adjust the default score weights to include the default key * Adjust the scorer to use `spans_{spans_key}` as the prefix for the returned score * Revert addition of `attr_name` argument to `score_spans` and adjust the key in the `getter` instead. Note that for `spancat` components with a custom `span_key`, the score weights currently need to be modified manually in `[training.score_weights]` for them to be available during training. To suppress the default score weights `spans_sc_p/r/f` during training, set them to `null` in `[training.score_weights]`. * Update website/docs/api/scorer.md * Fix scorer for spans key containing underscore * Increment version * Add Spans to Evaluate CLI (#8439) * Add Spans to Evaluate CLI * Change to spans_key * Add spans per_type output Co-authored-by: Adriane Boyd <adrianeboyd@gmail.com> * Fix spancat GPU issues (#8455) * Fix GPU issues * Require thinc >=8.0.6 * Switch to glorot_uniform_init * Fix and test ngram suggester * Include final ngram in doc for all sizes * Fix ngrams for docs of the same length as ngram size * Handle batches of docs that result in no ngrams * Add tests Co-authored-by: Ines Montani <ines@ines.io> Co-authored-by: Sofie Van Landeghem <svlandeg@users.noreply.github.com> Co-authored-by: svlandeg <sofie.vanlandeghem@gmail.com> Co-authored-by: Adriane Boyd <adrianeboyd@gmail.com> Co-authored-by: Nirant <NirantK@users.noreply.github.com>
2021-06-24 10:35:27 +00:00
*,
drop: float = 0.0,
sgd: Optimizer = None,
losses: Optional[Dict[str, float]] = None) -> Dict[str, float]:
"""Learn from a batch of documents and gold-standard information,
Revert "Set annotations in update" (#6810) * Revert "Set annotations in update (#6767)" This reverts commit e680efc7cc365a31c1c7f9d5eb8733c1e61e558d. * Fix version * Update spacy/pipeline/entity_linker.py * Update spacy/pipeline/entity_linker.py * Update spacy/pipeline/tagger.pyx * Update spacy/pipeline/tok2vec.py * Update spacy/pipeline/tok2vec.py * Update spacy/pipeline/transition_parser.pyx * Update spacy/pipeline/transition_parser.pyx * Update website/docs/api/multilabel_textcategorizer.md * Update website/docs/api/tok2vec.md * Update website/docs/usage/layers-architectures.md * Update website/docs/usage/layers-architectures.md * Update website/docs/api/transformer.md * Update website/docs/api/textcategorizer.md * Update website/docs/api/tagger.md * Update spacy/pipeline/entity_linker.py * Update website/docs/api/sentencerecognizer.md * Update website/docs/api/pipe.md * Update website/docs/api/morphologizer.md * Update website/docs/api/entityrecognizer.md * Update spacy/pipeline/entity_linker.py * Update spacy/pipeline/multitask.pyx * Update spacy/pipeline/tagger.pyx * Update spacy/pipeline/tagger.pyx * Update spacy/pipeline/textcat.py * Update spacy/pipeline/textcat.py * Update spacy/pipeline/textcat.py * Update spacy/pipeline/tok2vec.py * Update spacy/pipeline/trainable_pipe.pyx * Update spacy/pipeline/trainable_pipe.pyx * Update spacy/pipeline/transition_parser.pyx * Update spacy/pipeline/transition_parser.pyx * Update website/docs/api/entitylinker.md * Update website/docs/api/dependencyparser.md * Update spacy/pipeline/trainable_pipe.pyx
2021-01-25 14:18:45 +00:00
updating the pipe's model. Delegates to predict and get_loss.
examples (Iterable[Example]): A batch of Example objects.
drop (float): The dropout rate.
sgd (thinc.api.Optimizer): The optimizer.
losses (Dict[str, float]): Optional record of the loss during training.
Updated using the component name as the key.
RETURNS (Dict[str, float]): The updated losses dictionary.
DOCS: https://spacy.io/api/pipe#update
"""
if losses is None:
losses = {}
if not hasattr(self, "model") or self.model in (None, True, False):
return losses
losses.setdefault(self.name, 0.0)
validate_examples(examples, "TrainablePipe.update")
if not any(len(eg.predicted) if eg.predicted else 0 for eg in examples):
# Handle cases where there are no tokens in any docs.
2020-10-14 13:00:49 +00:00
return losses
set_dropout_rate(self.model, drop)
scores, bp_scores = self.model.begin_update([eg.predicted for eg in examples])
loss, d_scores = self.get_loss(examples, scores)
bp_scores(d_scores)
if sgd not in (None, False):
self.finish_update(sgd)
losses[self.name] += loss
return losses
def rehearse(self,
examples: Iterable[Example],
*,
sgd: Optimizer = None,
losses: Dict[str, float] = None,
**config) -> Dict[str, float]:
"""Perform a "rehearsal" update from a batch of data. Rehearsal updates
teach the current model to make predictions similar to an initial model,
to try to address the "catastrophic forgetting" problem. This feature is
experimental.
examples (Iterable[Example]): A batch of Example objects.
sgd (thinc.api.Optimizer): The optimizer.
losses (Dict[str, float]): Optional record of the loss during training.
Updated using the component name as the key.
RETURNS (Dict[str, float]): The updated losses dictionary.
DOCS: https://spacy.io/api/pipe#rehearse
"""
pass
def get_loss(self, examples: Iterable[Example], scores) -> Tuple[float, float]:
"""Find the loss and gradient of loss for the batch of documents and
their predicted scores.
examples (Iterable[Examples]): The batch of examples.
scores: Scores representing the model's predictions.
RETURNS (Tuple[float, float]): The loss and the gradient.
DOCS: https://spacy.io/api/pipe#get_loss
"""
raise NotImplementedError(Errors.E931.format(parent="TrainablePipe", method="get_loss", name=self.name))
def create_optimizer(self) -> Optimizer:
"""Create an optimizer for the pipeline component.
RETURNS (thinc.api.Optimizer): The optimizer.
DOCS: https://spacy.io/api/pipe#create_optimizer
"""
return util.create_default_optimizer()
def initialize(self, get_examples: Callable[[], Iterable[Example]], *, nlp: Language = None):
"""Initialize the pipe for training, using data examples if available.
This method needs to be implemented by each TrainablePipe component,
ensuring the internal model (if available) is initialized properly
using the provided sample of Example objects.
get_examples (Callable[[], Iterable[Example]]): Function that
returns a representative sample of gold-standard Example objects.
nlp (Language): The current nlp object the component is part of.
DOCS: https://spacy.io/api/pipe#initialize
"""
raise NotImplementedError(Errors.E931.format(parent="TrainablePipe", method="initialize", name=self.name))
def add_label(self, label: str) -> int:
"""Add an output label.
For TrainablePipe components, it is possible to
extend pretrained models with new labels, but care should be taken to
avoid the "catastrophic forgetting" problem.
label (str): The label to add.
RETURNS (int): 0 if label is already present, otherwise 1.
DOCS: https://spacy.io/api/pipe#add_label
"""
raise NotImplementedError(Errors.E931.format(parent="Pipe", method="add_label", name=self.name))
@property
def is_trainable(self) -> bool:
return True
@property
def is_resizable(self) -> bool:
return getattr(self, "model", None) and "resize_output" in self.model.attrs
def _allow_extra_label(self) -> None:
"""Raise an error if the component can not add any more labels."""
nO = None
if self.model.has_dim("nO"):
nO = self.model.get_dim("nO")
elif self.model.has_ref("output_layer") and self.model.get_ref("output_layer").has_dim("nO"):
nO = self.model.get_ref("output_layer").get_dim("nO")
if nO is not None and nO == len(self.labels):
if not self.is_resizable:
raise ValueError(Errors.E922.format(name=self.name, nO=self.model.get_dim("nO")))
def set_output(self, nO: int) -> None:
if self.is_resizable:
self.model.attrs["resize_output"](self.model, nO)
else:
raise NotImplementedError(Errors.E921)
def use_params(self, params: dict):
"""Modify the pipe's model, to use the given parameter values. At the
end of the context, the original parameters are restored.
params (dict): The parameter values to use in the model.
DOCS: https://spacy.io/api/pipe#use_params
"""
with self.model.use_params(params):
yield
def finish_update(self, sgd: Optimizer) -> None:
"""Update parameters using the current parameter gradients.
The Optimizer instance contains the functionality to perform
the stochastic gradient descent.
sgd (thinc.api.Optimizer): The optimizer.
DOCS: https://spacy.io/api/pipe#finish_update
"""
self.model.finish_update(sgd)
2020-10-10 16:55:07 +00:00
def _validate_serialization_attrs(self):
"""Check that the pipe implements the required attributes. If a subclass
implements a custom __init__ method but doesn't set these attributes,
they currently default to None, so we need to perform additonal checks.
2020-10-10 16:55:07 +00:00
"""
if not hasattr(self, "vocab") or self.vocab is None:
raise ValueError(Errors.E899.format(name=util.get_object_name(self)))
if not hasattr(self, "model") or self.model is None:
raise ValueError(Errors.E898.format(name=util.get_object_name(self)))
def to_bytes(self, *, exclude=tuple()):
"""Serialize the pipe to a bytestring.
exclude (Iterable[str]): String names of serialization fields to exclude.
RETURNS (bytes): The serialized object.
DOCS: https://spacy.io/api/pipe#to_bytes
"""
2020-10-10 16:55:07 +00:00
self._validate_serialization_attrs()
serialize = {}
2020-10-10 16:55:07 +00:00
if hasattr(self, "cfg") and self.cfg is not None:
serialize["cfg"] = lambda: srsly.json_dumps(self.cfg)
serialize["vocab"] = lambda: self.vocab.to_bytes(exclude=exclude)
serialize["model"] = self.model.to_bytes
return util.to_bytes(serialize, exclude)
def from_bytes(self, bytes_data, *, exclude=tuple()):
"""Load the pipe from a bytestring.
exclude (Iterable[str]): String names of serialization fields to exclude.
RETURNS (TrainablePipe): The loaded object.
DOCS: https://spacy.io/api/pipe#from_bytes
"""
2020-10-10 16:55:07 +00:00
self._validate_serialization_attrs()
def load_model(b):
try:
self.model.from_bytes(b)
except AttributeError:
raise ValueError(Errors.E149) from None
deserialize = {}
2020-10-10 16:55:07 +00:00
if hasattr(self, "cfg") and self.cfg is not None:
deserialize["cfg"] = lambda b: self.cfg.update(srsly.json_loads(b))
deserialize["vocab"] = lambda b: self.vocab.from_bytes(b, exclude=exclude)
deserialize["model"] = load_model
util.from_bytes(bytes_data, deserialize, exclude)
return self
def to_disk(self, path, *, exclude=tuple()):
"""Serialize the pipe to disk.
path (str / Path): Path to a directory.
exclude (Iterable[str]): String names of serialization fields to exclude.
DOCS: https://spacy.io/api/pipe#to_disk
"""
2020-10-10 16:55:07 +00:00
self._validate_serialization_attrs()
serialize = {}
2020-10-10 16:55:07 +00:00
if hasattr(self, "cfg") and self.cfg is not None:
serialize["cfg"] = lambda p: srsly.write_json(p, self.cfg)
serialize["vocab"] = lambda p: self.vocab.to_disk(p, exclude=exclude)
serialize["model"] = lambda p: self.model.to_disk(p)
util.to_disk(path, serialize, exclude)
def from_disk(self, path, *, exclude=tuple()):
"""Load the pipe from disk.
path (str / Path): Path to a directory.
exclude (Iterable[str]): String names of serialization fields to exclude.
RETURNS (TrainablePipe): The loaded object.
DOCS: https://spacy.io/api/pipe#from_disk
"""
2020-10-10 16:55:07 +00:00
self._validate_serialization_attrs()
def load_model(p):
try:
with open(p, "rb") as mfile:
self.model.from_bytes(mfile.read())
except AttributeError:
raise ValueError(Errors.E149) from None
deserialize = {}
2020-10-10 16:55:07 +00:00
if hasattr(self, "cfg") and self.cfg is not None:
deserialize["cfg"] = lambda p: self.cfg.update(deserialize_config(p))
deserialize["vocab"] = lambda p: self.vocab.from_disk(p, exclude=exclude)
deserialize["model"] = load_model
util.from_disk(path, deserialize, exclude)
return self