2020-07-22 11:42:59 +00:00
|
|
|
# cython: infer_types=True, profile=True, binding=True
|
2020-09-08 20:44:25 +00:00
|
|
|
from itertools import islice
|
|
|
|
|
2020-07-22 11:42:59 +00:00
|
|
|
import srsly
|
|
|
|
from thinc.api import Model, SequenceCategoricalCrossentropy, Config
|
|
|
|
|
|
|
|
from ..tokens.doc cimport Doc
|
|
|
|
|
|
|
|
from .tagger import Tagger
|
|
|
|
from ..language import Language
|
|
|
|
from ..errors import Errors
|
Refactor the Scorer to improve flexibility (#5731)
* Refactor the Scorer to improve flexibility
Refactor the `Scorer` to improve flexibility for arbitrary pipeline
components.
* Individual pipeline components provide their own `evaluate` methods
that score a list of `Example`s and return a dictionary of scores
* `Scorer` is initialized either:
* with a provided pipeline containing components to be scored
* with a default pipeline containing the built-in statistical
components (senter, tagger, morphologizer, parser, ner)
* `Scorer.score` evaluates a list of `Example`s and returns a dictionary
of scores referring to the scores provided by the components in the
pipeline
Significant differences:
* `tags_acc` is renamed to `tag_acc` to be consistent with `token_acc`
and the new `morph_acc`, `pos_acc`, and `lemma_acc`
* Scoring is no longer cumulative: `Scorer.score` scores a list of
examples rather than a single example and does not retain any state
about previously scored examples
* PRF values in the returned scores are no longer multiplied by 100
* Add kwargs to Morphologizer.evaluate
* Create generalized scoring methods in Scorer
* Generalized static scoring methods are added to `Scorer`
* Methods require an attribute (either on Token or Doc) that is
used to key the returned scores
Naming differences:
* `uas`, `las`, and `las_per_type` in the scores dict are renamed to
`dep_uas`, `dep_las`, and `dep_las_per_type`
Scoring differences:
* `Doc.sents` is now scored as spans rather than on sentence-initial
token positions so that `Doc.sents` and `Doc.ents` can be scored with
the same method (this lowers scores since a single incorrect sentence
start results in two incorrect spans)
* Simplify / extend hasattr check for eval method
* Add hasattr check to tokenizer scoring
* Simplify to hasattr check for component scoring
* Reset Example alignment if docs are set
Reset the Example alignment if either doc is set in case the
tokenization has changed.
* Add PRF tokenization scoring for tokens as spans
Add PRF scores for tokens as character spans. The scores are:
* token_acc: # correct tokens / # gold tokens
* token_p/r/f: PRF for (token.idx, token.idx + len(token))
* Add docstring to Scorer.score_tokenization
* Rename component.evaluate() to component.score()
* Update Scorer API docs
* Update scoring for positive_label in textcat
* Fix TextCategorizer.score kwargs
* Update Language.evaluate docs
* Update score names in default config
2020-07-25 10:53:02 +00:00
|
|
|
from ..scorer import Scorer
|
2020-10-08 19:33:49 +00:00
|
|
|
from ..training import validate_examples, validate_get_examples
|
2020-07-22 11:42:59 +00:00
|
|
|
from .. import util
|
|
|
|
|
|
|
|
|
|
|
|
default_model_config = """
|
|
|
|
[model]
|
|
|
|
@architectures = "spacy.Tagger.v1"
|
|
|
|
|
|
|
|
[model.tok2vec]
|
|
|
|
@architectures = "spacy.HashEmbedCNN.v1"
|
|
|
|
pretrained_vectors = null
|
|
|
|
width = 12
|
|
|
|
depth = 1
|
|
|
|
embed_size = 2000
|
|
|
|
window_size = 1
|
|
|
|
maxout_pieces = 2
|
|
|
|
subword_features = true
|
|
|
|
"""
|
|
|
|
DEFAULT_SENTER_MODEL = Config().from_str(default_model_config)["model"]
|
|
|
|
|
|
|
|
|
|
|
|
@Language.factory(
|
|
|
|
"senter",
|
|
|
|
assigns=["token.is_sent_start"],
|
2020-07-26 11:18:43 +00:00
|
|
|
default_config={"model": DEFAULT_SENTER_MODEL},
|
2020-07-27 10:27:40 +00:00
|
|
|
default_score_weights={"sents_f": 1.0, "sents_p": 0.0, "sents_r": 0.0},
|
2020-07-22 11:42:59 +00:00
|
|
|
)
|
|
|
|
def make_senter(nlp: Language, name: str, model: Model):
|
|
|
|
return SentenceRecognizer(nlp.vocab, model, name)
|
|
|
|
|
|
|
|
|
|
|
|
class SentenceRecognizer(Tagger):
|
|
|
|
"""Pipeline component for sentence segmentation.
|
|
|
|
|
2020-09-04 10:58:50 +00:00
|
|
|
DOCS: https://nightly.spacy.io/api/sentencerecognizer
|
2020-07-22 11:42:59 +00:00
|
|
|
"""
|
|
|
|
def __init__(self, vocab, model, name="senter"):
|
2020-07-27 16:11:45 +00:00
|
|
|
"""Initialize a sentence recognizer.
|
|
|
|
|
|
|
|
vocab (Vocab): The shared vocabulary.
|
|
|
|
model (thinc.api.Model): The Thinc Model powering the pipeline component.
|
|
|
|
name (str): The component instance name, used to add entries to the
|
|
|
|
losses during training.
|
|
|
|
|
2020-09-04 10:58:50 +00:00
|
|
|
DOCS: https://nightly.spacy.io/api/sentencerecognizer#init
|
2020-07-27 16:11:45 +00:00
|
|
|
"""
|
2020-07-22 11:42:59 +00:00
|
|
|
self.vocab = vocab
|
|
|
|
self.model = model
|
|
|
|
self.name = name
|
|
|
|
self._rehearsal_model = None
|
|
|
|
self.cfg = {}
|
|
|
|
|
|
|
|
@property
|
|
|
|
def labels(self):
|
2020-07-27 16:11:45 +00:00
|
|
|
"""RETURNS (Tuple[str]): The labels."""
|
2020-07-22 11:42:59 +00:00
|
|
|
# labels are numbered by index internally, so this matches GoldParse
|
|
|
|
# and Example where the sentence-initial tag is 1 and other positions
|
|
|
|
# are 0
|
|
|
|
return tuple(["I", "S"])
|
|
|
|
|
2020-10-03 16:54:09 +00:00
|
|
|
@property
|
|
|
|
def label_data(self):
|
|
|
|
return None
|
|
|
|
|
2020-07-22 11:42:59 +00:00
|
|
|
def set_annotations(self, docs, batch_tag_ids):
|
2020-07-27 16:11:45 +00:00
|
|
|
"""Modify a batch of documents, using pre-computed scores.
|
|
|
|
|
|
|
|
docs (Iterable[Doc]): The documents to modify.
|
|
|
|
batch_tag_ids: The IDs to set, produced by SentenceRecognizer.predict.
|
|
|
|
|
2020-09-04 10:58:50 +00:00
|
|
|
DOCS: https://nightly.spacy.io/api/sentencerecognizer#set_annotations
|
2020-07-27 16:11:45 +00:00
|
|
|
"""
|
2020-07-22 11:42:59 +00:00
|
|
|
if isinstance(docs, Doc):
|
|
|
|
docs = [docs]
|
|
|
|
cdef Doc doc
|
|
|
|
for i, doc in enumerate(docs):
|
|
|
|
doc_tag_ids = batch_tag_ids[i]
|
|
|
|
if hasattr(doc_tag_ids, "get"):
|
|
|
|
doc_tag_ids = doc_tag_ids.get()
|
|
|
|
for j, tag_id in enumerate(doc_tag_ids):
|
|
|
|
# Don't clobber existing sentence boundaries
|
|
|
|
if doc.c[j].sent_start == 0:
|
|
|
|
if tag_id == 1:
|
|
|
|
doc.c[j].sent_start = 1
|
|
|
|
else:
|
|
|
|
doc.c[j].sent_start = -1
|
|
|
|
|
|
|
|
def get_loss(self, examples, scores):
|
2020-07-27 16:11:45 +00:00
|
|
|
"""Find the loss and gradient of loss for the batch of documents and
|
|
|
|
their predicted scores.
|
|
|
|
|
|
|
|
examples (Iterable[Examples]): The batch of examples.
|
|
|
|
scores: Scores representing the model's predictions.
|
2020-10-05 12:58:56 +00:00
|
|
|
RETURNS (Tuple[float, float]): The loss and the gradient.
|
2020-07-27 16:11:45 +00:00
|
|
|
|
2020-09-04 10:58:50 +00:00
|
|
|
DOCS: https://nightly.spacy.io/api/sentencerecognizer#get_loss
|
2020-07-27 16:11:45 +00:00
|
|
|
"""
|
2020-08-11 21:29:31 +00:00
|
|
|
validate_examples(examples, "SentenceRecognizer.get_loss")
|
2020-07-22 11:42:59 +00:00
|
|
|
labels = self.labels
|
|
|
|
loss_func = SequenceCategoricalCrossentropy(names=labels, normalize=False)
|
|
|
|
truths = []
|
|
|
|
for eg in examples:
|
|
|
|
eg_truth = []
|
2020-08-04 20:22:26 +00:00
|
|
|
for x in eg.get_aligned("SENT_START"):
|
2020-07-30 21:30:54 +00:00
|
|
|
if x is None:
|
2020-07-22 11:42:59 +00:00
|
|
|
eg_truth.append(None)
|
|
|
|
elif x == 1:
|
|
|
|
eg_truth.append(labels[1])
|
|
|
|
else:
|
|
|
|
# anything other than 1: 0, -1, -1 as uint64
|
|
|
|
eg_truth.append(labels[0])
|
|
|
|
truths.append(eg_truth)
|
|
|
|
d_scores, loss = loss_func(scores, truths)
|
|
|
|
if self.model.ops.xp.isnan(loss):
|
2020-10-04 09:16:31 +00:00
|
|
|
raise ValueError(Errors.E910.format(name=self.name))
|
2020-07-22 11:42:59 +00:00
|
|
|
return float(loss), d_scores
|
|
|
|
|
2020-09-29 10:20:26 +00:00
|
|
|
def initialize(self, get_examples, *, nlp=None):
|
2020-09-08 20:44:25 +00:00
|
|
|
"""Initialize the pipe for training, using a representative set
|
|
|
|
of data examples.
|
2020-07-27 16:11:45 +00:00
|
|
|
|
2020-09-08 20:44:25 +00:00
|
|
|
get_examples (Callable[[], Iterable[Example]]): Function that
|
|
|
|
returns a representative sample of gold-standard Example objects.
|
2020-09-29 10:20:26 +00:00
|
|
|
nlp (Language): The current nlp object the component is part of.
|
2020-07-27 16:11:45 +00:00
|
|
|
|
2020-09-28 19:35:09 +00:00
|
|
|
DOCS: https://nightly.spacy.io/api/sentencerecognizer#initialize
|
2020-07-27 16:11:45 +00:00
|
|
|
"""
|
2020-10-08 19:33:49 +00:00
|
|
|
validate_get_examples(get_examples, "SentenceRecognizer.initialize")
|
2020-09-08 20:44:25 +00:00
|
|
|
doc_sample = []
|
|
|
|
label_sample = []
|
|
|
|
assert self.labels, Errors.E924.format(name=self.name)
|
|
|
|
for example in islice(get_examples(), 10):
|
|
|
|
doc_sample.append(example.x)
|
|
|
|
gold_tags = example.get_aligned("SENT_START")
|
|
|
|
gold_array = [[1.0 if tag == gold_tag else 0.0 for tag in self.labels] for gold_tag in gold_tags]
|
|
|
|
label_sample.append(self.model.ops.asarray(gold_array, dtype="float32"))
|
|
|
|
assert len(doc_sample) > 0, Errors.E923.format(name=self.name)
|
|
|
|
assert len(label_sample) > 0, Errors.E923.format(name=self.name)
|
|
|
|
self.model.initialize(X=doc_sample, Y=label_sample)
|
2020-07-22 11:42:59 +00:00
|
|
|
|
|
|
|
def add_label(self, label, values=None):
|
|
|
|
raise NotImplementedError
|
|
|
|
|
Refactor the Scorer to improve flexibility (#5731)
* Refactor the Scorer to improve flexibility
Refactor the `Scorer` to improve flexibility for arbitrary pipeline
components.
* Individual pipeline components provide their own `evaluate` methods
that score a list of `Example`s and return a dictionary of scores
* `Scorer` is initialized either:
* with a provided pipeline containing components to be scored
* with a default pipeline containing the built-in statistical
components (senter, tagger, morphologizer, parser, ner)
* `Scorer.score` evaluates a list of `Example`s and returns a dictionary
of scores referring to the scores provided by the components in the
pipeline
Significant differences:
* `tags_acc` is renamed to `tag_acc` to be consistent with `token_acc`
and the new `morph_acc`, `pos_acc`, and `lemma_acc`
* Scoring is no longer cumulative: `Scorer.score` scores a list of
examples rather than a single example and does not retain any state
about previously scored examples
* PRF values in the returned scores are no longer multiplied by 100
* Add kwargs to Morphologizer.evaluate
* Create generalized scoring methods in Scorer
* Generalized static scoring methods are added to `Scorer`
* Methods require an attribute (either on Token or Doc) that is
used to key the returned scores
Naming differences:
* `uas`, `las`, and `las_per_type` in the scores dict are renamed to
`dep_uas`, `dep_las`, and `dep_las_per_type`
Scoring differences:
* `Doc.sents` is now scored as spans rather than on sentence-initial
token positions so that `Doc.sents` and `Doc.ents` can be scored with
the same method (this lowers scores since a single incorrect sentence
start results in two incorrect spans)
* Simplify / extend hasattr check for eval method
* Add hasattr check to tokenizer scoring
* Simplify to hasattr check for component scoring
* Reset Example alignment if docs are set
Reset the Example alignment if either doc is set in case the
tokenization has changed.
* Add PRF tokenization scoring for tokens as spans
Add PRF scores for tokens as character spans. The scores are:
* token_acc: # correct tokens / # gold tokens
* token_p/r/f: PRF for (token.idx, token.idx + len(token))
* Add docstring to Scorer.score_tokenization
* Rename component.evaluate() to component.score()
* Update Scorer API docs
* Update scoring for positive_label in textcat
* Fix TextCategorizer.score kwargs
* Update Language.evaluate docs
* Update score names in default config
2020-07-25 10:53:02 +00:00
|
|
|
def score(self, examples, **kwargs):
|
2020-07-27 16:11:45 +00:00
|
|
|
"""Score a batch of examples.
|
|
|
|
|
|
|
|
examples (Iterable[Example]): The examples to score.
|
|
|
|
RETURNS (Dict[str, Any]): The scores, produced by Scorer.score_spans.
|
2020-09-04 10:58:50 +00:00
|
|
|
DOCS: https://nightly.spacy.io/api/sentencerecognizer#score
|
2020-07-27 16:11:45 +00:00
|
|
|
"""
|
2020-11-03 14:47:18 +00:00
|
|
|
def has_sents(doc):
|
|
|
|
return doc.has_annotation("SENT_START")
|
|
|
|
|
2020-08-11 21:29:31 +00:00
|
|
|
validate_examples(examples, "SentenceRecognizer.score")
|
2020-11-03 14:47:18 +00:00
|
|
|
results = Scorer.score_spans(examples, "sents", has_annotation=has_sents, **kwargs)
|
2020-07-27 10:27:40 +00:00
|
|
|
del results["sents_per_type"]
|
|
|
|
return results
|