2023-06-14 15:48:41 +00:00
|
|
|
import random
|
2021-01-29 00:51:21 +00:00
|
|
|
from itertools import islice
|
2023-06-14 15:48:41 +00:00
|
|
|
from pathlib import Path
|
|
|
|
from typing import Any, Callable, Dict, Iterable, List, Optional, Union
|
|
|
|
|
2020-07-22 11:42:59 +00:00
|
|
|
import srsly
|
2023-06-14 15:48:41 +00:00
|
|
|
from thinc.api import Config, CosineDistance, Model, Optimizer, set_dropout_rate
|
|
|
|
from thinc.types import Floats2d
|
2020-07-22 11:42:59 +00:00
|
|
|
|
2023-06-14 15:48:41 +00:00
|
|
|
from .. import util
|
|
|
|
from ..errors import Errors
|
|
|
|
from ..kb import Candidate, KnowledgeBase
|
|
|
|
from ..language import Language
|
|
|
|
from ..scorer import Scorer
|
2021-06-28 09:29:29 +00:00
|
|
|
from ..tokens import Doc, Span
|
2020-10-08 19:33:49 +00:00
|
|
|
from ..training import Example, validate_examples, validate_get_examples
|
2021-08-10 13:13:39 +00:00
|
|
|
from ..util import SimpleFrozenList, registry
|
2023-06-14 15:48:41 +00:00
|
|
|
from ..vocab import Vocab
|
|
|
|
from .legacy.entity_linker import EntityLinker_v1
|
|
|
|
from .pipe import deserialize_config
|
|
|
|
from .trainable_pipe import TrainablePipe
|
2020-07-22 11:42:59 +00:00
|
|
|
|
Add overwrite settings for more components (#9050)
* Add overwrite settings for more components
For pipeline components where it's relevant and not already implemented,
add an explicit `overwrite` setting that controls whether
`set_annotations` overwrites existing annotation.
For the `morphologizer`, add an additional setting `extend`, which
controls whether the existing features are preserved.
* +overwrite, +extend: overwrite values of existing features, add any new
features
* +overwrite, -extend: overwrite completely, removing any existing
features
* -overwrite, +extend: keep values of existing features, add any new
features
* -overwrite, -extend: do not modify the existing value if set
In all cases an unset value will be set by `set_annotations`.
Preserve current overwrite defaults:
* True: morphologizer, entity linker
* False: tagger, sentencizer, senter
* Add backwards compat overwrite settings
* Put empty line back
Removed by accident in last commit
* Set backwards-compatible defaults in __init__
Because the `TrainablePipe` serialization methods update `cfg`, there's
no straightforward way to detect whether models serialized with a
previous version are missing the overwrite settings.
It would be possible in the sentencizer due to its separate
serialization methods, however to keep the changes parallel, this also
sets the default in `__init__`.
* Remove traces
Co-authored-by: Paul O'Leary McCann <polm@dampfkraft.com>
2021-09-30 13:35:55 +00:00
|
|
|
# See #9050
|
|
|
|
BACKWARD_OVERWRITE = True
|
2020-07-22 11:42:59 +00:00
|
|
|
|
|
|
|
default_model_config = """
|
|
|
|
[model]
|
Fix entity linker batching (#9669)
* Partial fix of entity linker batching
* Add import
* Better name
* Add `use_gold_ents` option, docs
* Change to v2, create stub v1, update docs etc.
* Fix error type
Honestly no idea what the right type to use here is.
ConfigValidationError seems wrong. Maybe a NotImplementedError?
* Make mypy happy
* Add hacky fix for init issue
* Add legacy pipeline entity linker
* Fix references to class name
* Add __init__.py for legacy
* Attempted fix for loss issue
* Remove placeholder V1
* formatting
* slightly more interesting train data
* Handle batches with no usable examples
This adds a test for batches that have docs but not entities, and a
check in the component that detects such cases and skips the update step
as thought the batch were empty.
* Remove todo about data verification
Check for empty data was moved further up so this should be OK now - the
case in question shouldn't be possible.
* Fix gradient calculation
The model doesn't know which entities are not in the kb, so it generates
embeddings for the context of all of them.
However, the loss does know which entities aren't in the kb, and it
ignores them, as there's no sensible gradient.
This has the issue that the gradient will not be calculated for some of
the input embeddings, which causes a dimension mismatch in backprop.
That should have caused a clear error, but with numpyops it was causing
nans to happen, which is another problem that should be addressed
separately.
This commit changes the loss to give a zero gradient for entities not in
the kb.
* add failing test for v1 EL legacy architecture
* Add nasty but simple working check for legacy arch
* Clarify why init hack works the way it does
* Clarify use_gold_ents use case
* Fix use gold ents related handling
* Add tests for no gold ents and fix other tests
* Use aligned ents function (not working)
This doesn't actually work because the "aligned" ents are gold-only. But
if I have a different function that returns the intersection, *then*
this will work as desired.
* Use proper matching ent check
This changes the process when gold ents are not used so that the
intersection of ents in the pred and gold is used.
* Move get_matching_ents to Example
* Use model attribute to check for legacy arch
* Rename flag
* bump spacy-legacy to lower 3.0.9
Co-authored-by: svlandeg <svlandeg@github.com>
2022-03-04 08:17:36 +00:00
|
|
|
@architectures = "spacy.EntityLinker.v2"
|
2020-07-22 11:42:59 +00:00
|
|
|
|
|
|
|
[model.tok2vec]
|
2021-04-22 08:04:15 +00:00
|
|
|
@architectures = "spacy.HashEmbedCNN.v2"
|
2020-07-22 11:42:59 +00:00
|
|
|
pretrained_vectors = null
|
|
|
|
width = 96
|
|
|
|
depth = 2
|
2020-11-04 21:20:13 +00:00
|
|
|
embed_size = 2000
|
2020-07-22 11:42:59 +00:00
|
|
|
window_size = 1
|
|
|
|
maxout_pieces = 3
|
|
|
|
subword_features = true
|
|
|
|
"""
|
|
|
|
DEFAULT_NEL_MODEL = Config().from_str(default_model_config)["model"]
|
|
|
|
|
|
|
|
|
|
|
|
@Language.factory(
|
|
|
|
"entity_linker",
|
|
|
|
requires=["doc.ents", "doc.sents", "token.ent_iob", "token.ent_type"],
|
|
|
|
assigns=["token.ent_kb_id"],
|
|
|
|
default_config={
|
2020-08-04 12:34:09 +00:00
|
|
|
"model": DEFAULT_NEL_MODEL,
|
2020-07-22 11:42:59 +00:00
|
|
|
"labels_discard": [],
|
2021-02-22 03:49:52 +00:00
|
|
|
"n_sents": 0,
|
2020-07-22 11:42:59 +00:00
|
|
|
"incl_prior": True,
|
|
|
|
"incl_context": True,
|
2020-10-07 12:56:00 +00:00
|
|
|
"entity_vector_length": 64,
|
2020-09-03 15:31:14 +00:00
|
|
|
"get_candidates": {"@misc": "spacy.CandidateGenerator.v1"},
|
2022-09-08 08:38:07 +00:00
|
|
|
"get_candidates_batch": {"@misc": "spacy.CandidateBatchGenerator.v1"},
|
2023-03-01 15:02:55 +00:00
|
|
|
"generate_empty_kb": {"@misc": "spacy.EmptyKB.v2"},
|
Add overwrite settings for more components (#9050)
* Add overwrite settings for more components
For pipeline components where it's relevant and not already implemented,
add an explicit `overwrite` setting that controls whether
`set_annotations` overwrites existing annotation.
For the `morphologizer`, add an additional setting `extend`, which
controls whether the existing features are preserved.
* +overwrite, +extend: overwrite values of existing features, add any new
features
* +overwrite, -extend: overwrite completely, removing any existing
features
* -overwrite, +extend: keep values of existing features, add any new
features
* -overwrite, -extend: do not modify the existing value if set
In all cases an unset value will be set by `set_annotations`.
Preserve current overwrite defaults:
* True: morphologizer, entity linker
* False: tagger, sentencizer, senter
* Add backwards compat overwrite settings
* Put empty line back
Removed by accident in last commit
* Set backwards-compatible defaults in __init__
Because the `TrainablePipe` serialization methods update `cfg`, there's
no straightforward way to detect whether models serialized with a
previous version are missing the overwrite settings.
It would be possible in the sentencizer due to its separate
serialization methods, however to keep the changes parallel, this also
sets the default in `__init__`.
* Remove traces
Co-authored-by: Paul O'Leary McCann <polm@dampfkraft.com>
2021-09-30 13:35:55 +00:00
|
|
|
"overwrite": True,
|
2021-08-10 13:13:39 +00:00
|
|
|
"scorer": {"@scorers": "spacy.entity_linker_scorer.v1"},
|
Fix entity linker batching (#9669)
* Partial fix of entity linker batching
* Add import
* Better name
* Add `use_gold_ents` option, docs
* Change to v2, create stub v1, update docs etc.
* Fix error type
Honestly no idea what the right type to use here is.
ConfigValidationError seems wrong. Maybe a NotImplementedError?
* Make mypy happy
* Add hacky fix for init issue
* Add legacy pipeline entity linker
* Fix references to class name
* Add __init__.py for legacy
* Attempted fix for loss issue
* Remove placeholder V1
* formatting
* slightly more interesting train data
* Handle batches with no usable examples
This adds a test for batches that have docs but not entities, and a
check in the component that detects such cases and skips the update step
as thought the batch were empty.
* Remove todo about data verification
Check for empty data was moved further up so this should be OK now - the
case in question shouldn't be possible.
* Fix gradient calculation
The model doesn't know which entities are not in the kb, so it generates
embeddings for the context of all of them.
However, the loss does know which entities aren't in the kb, and it
ignores them, as there's no sensible gradient.
This has the issue that the gradient will not be calculated for some of
the input embeddings, which causes a dimension mismatch in backprop.
That should have caused a clear error, but with numpyops it was causing
nans to happen, which is another problem that should be addressed
separately.
This commit changes the loss to give a zero gradient for entities not in
the kb.
* add failing test for v1 EL legacy architecture
* Add nasty but simple working check for legacy arch
* Clarify why init hack works the way it does
* Clarify use_gold_ents use case
* Fix use gold ents related handling
* Add tests for no gold ents and fix other tests
* Use aligned ents function (not working)
This doesn't actually work because the "aligned" ents are gold-only. But
if I have a different function that returns the intersection, *then*
this will work as desired.
* Use proper matching ent check
This changes the process when gold ents are not used so that the
intersection of ents in the pred and gold is used.
* Move get_matching_ents to Example
* Use model attribute to check for legacy arch
* Rename flag
* bump spacy-legacy to lower 3.0.9
Co-authored-by: svlandeg <svlandeg@github.com>
2022-03-04 08:17:36 +00:00
|
|
|
"use_gold_ents": True,
|
2022-09-08 08:38:07 +00:00
|
|
|
"candidates_batch_size": 1,
|
2022-07-04 15:05:21 +00:00
|
|
|
"threshold": None,
|
2020-07-22 11:42:59 +00:00
|
|
|
},
|
2020-09-24 15:10:35 +00:00
|
|
|
default_score_weights={
|
|
|
|
"nel_micro_f": 1.0,
|
|
|
|
"nel_micro_r": None,
|
|
|
|
"nel_micro_p": None,
|
|
|
|
},
|
2020-07-22 11:42:59 +00:00
|
|
|
)
|
|
|
|
def make_entity_linker(
|
|
|
|
nlp: Language,
|
|
|
|
name: str,
|
|
|
|
model: Model,
|
|
|
|
*,
|
|
|
|
labels_discard: Iterable[str],
|
2021-02-22 03:49:52 +00:00
|
|
|
n_sents: int,
|
2020-07-22 11:42:59 +00:00
|
|
|
incl_prior: bool,
|
|
|
|
incl_context: bool,
|
2020-10-07 12:56:00 +00:00
|
|
|
entity_vector_length: int,
|
2021-06-28 09:29:29 +00:00
|
|
|
get_candidates: Callable[[KnowledgeBase, Span], Iterable[Candidate]],
|
2022-09-08 08:38:07 +00:00
|
|
|
get_candidates_batch: Callable[
|
|
|
|
[KnowledgeBase, Iterable[Span]], Iterable[Iterable[Candidate]]
|
|
|
|
],
|
2023-03-01 15:02:55 +00:00
|
|
|
generate_empty_kb: Callable[[Vocab, int], KnowledgeBase],
|
Add overwrite settings for more components (#9050)
* Add overwrite settings for more components
For pipeline components where it's relevant and not already implemented,
add an explicit `overwrite` setting that controls whether
`set_annotations` overwrites existing annotation.
For the `morphologizer`, add an additional setting `extend`, which
controls whether the existing features are preserved.
* +overwrite, +extend: overwrite values of existing features, add any new
features
* +overwrite, -extend: overwrite completely, removing any existing
features
* -overwrite, +extend: keep values of existing features, add any new
features
* -overwrite, -extend: do not modify the existing value if set
In all cases an unset value will be set by `set_annotations`.
Preserve current overwrite defaults:
* True: morphologizer, entity linker
* False: tagger, sentencizer, senter
* Add backwards compat overwrite settings
* Put empty line back
Removed by accident in last commit
* Set backwards-compatible defaults in __init__
Because the `TrainablePipe` serialization methods update `cfg`, there's
no straightforward way to detect whether models serialized with a
previous version are missing the overwrite settings.
It would be possible in the sentencizer due to its separate
serialization methods, however to keep the changes parallel, this also
sets the default in `__init__`.
* Remove traces
Co-authored-by: Paul O'Leary McCann <polm@dampfkraft.com>
2021-09-30 13:35:55 +00:00
|
|
|
overwrite: bool,
|
2021-08-10 13:13:39 +00:00
|
|
|
scorer: Optional[Callable],
|
Fix entity linker batching (#9669)
* Partial fix of entity linker batching
* Add import
* Better name
* Add `use_gold_ents` option, docs
* Change to v2, create stub v1, update docs etc.
* Fix error type
Honestly no idea what the right type to use here is.
ConfigValidationError seems wrong. Maybe a NotImplementedError?
* Make mypy happy
* Add hacky fix for init issue
* Add legacy pipeline entity linker
* Fix references to class name
* Add __init__.py for legacy
* Attempted fix for loss issue
* Remove placeholder V1
* formatting
* slightly more interesting train data
* Handle batches with no usable examples
This adds a test for batches that have docs but not entities, and a
check in the component that detects such cases and skips the update step
as thought the batch were empty.
* Remove todo about data verification
Check for empty data was moved further up so this should be OK now - the
case in question shouldn't be possible.
* Fix gradient calculation
The model doesn't know which entities are not in the kb, so it generates
embeddings for the context of all of them.
However, the loss does know which entities aren't in the kb, and it
ignores them, as there's no sensible gradient.
This has the issue that the gradient will not be calculated for some of
the input embeddings, which causes a dimension mismatch in backprop.
That should have caused a clear error, but with numpyops it was causing
nans to happen, which is another problem that should be addressed
separately.
This commit changes the loss to give a zero gradient for entities not in
the kb.
* add failing test for v1 EL legacy architecture
* Add nasty but simple working check for legacy arch
* Clarify why init hack works the way it does
* Clarify use_gold_ents use case
* Fix use gold ents related handling
* Add tests for no gold ents and fix other tests
* Use aligned ents function (not working)
This doesn't actually work because the "aligned" ents are gold-only. But
if I have a different function that returns the intersection, *then*
this will work as desired.
* Use proper matching ent check
This changes the process when gold ents are not used so that the
intersection of ents in the pred and gold is used.
* Move get_matching_ents to Example
* Use model attribute to check for legacy arch
* Rename flag
* bump spacy-legacy to lower 3.0.9
Co-authored-by: svlandeg <svlandeg@github.com>
2022-03-04 08:17:36 +00:00
|
|
|
use_gold_ents: bool,
|
2022-09-08 08:38:07 +00:00
|
|
|
candidates_batch_size: int,
|
2022-07-04 15:05:21 +00:00
|
|
|
threshold: Optional[float] = None,
|
2020-07-22 11:42:59 +00:00
|
|
|
):
|
2020-08-09 13:19:28 +00:00
|
|
|
"""Construct an EntityLinker component.
|
|
|
|
|
|
|
|
model (Model[List[Doc], Floats2d]): A model that learns document vector
|
|
|
|
representations. Given a batch of Doc objects, it should return a single
|
|
|
|
array, with one row per item in the batch.
|
|
|
|
labels_discard (Iterable[str]): NER labels that will automatically get a "NIL" prediction.
|
2021-02-22 03:49:52 +00:00
|
|
|
n_sents (int): The number of neighbouring sentences to take into account.
|
2020-08-09 13:19:28 +00:00
|
|
|
incl_prior (bool): Whether or not to include prior probabilities from the KB in the model.
|
|
|
|
incl_context (bool): Whether or not to include the local context in the model.
|
2020-10-07 12:56:00 +00:00
|
|
|
entity_vector_length (int): Size of encoding vectors in the KB.
|
2022-09-08 08:38:07 +00:00
|
|
|
get_candidates (Callable[[KnowledgeBase, Span], Iterable[Candidate]]): Function that
|
2020-10-07 12:56:00 +00:00
|
|
|
produces a list of candidates, given a certain knowledge base and a textual mention.
|
2022-09-08 08:38:07 +00:00
|
|
|
get_candidates_batch (
|
|
|
|
Callable[[KnowledgeBase, Iterable[Span]], Iterable[Iterable[Candidate]]], Iterable[Candidate]]
|
|
|
|
): Function that produces a list of candidates, given a certain knowledge base and several textual mentions.
|
2023-03-01 15:02:55 +00:00
|
|
|
generate_empty_kb (Callable[[Vocab, int], KnowledgeBase]): Callable returning empty KnowledgeBase.
|
2021-08-12 10:50:03 +00:00
|
|
|
scorer (Optional[Callable]): The scoring method.
|
2024-04-16 10:00:22 +00:00
|
|
|
use_gold_ents (bool): Whether to copy entities from gold docs during training or not. If false, another
|
2022-07-04 15:05:21 +00:00
|
|
|
component must provide entity annotations.
|
2022-09-08 08:38:07 +00:00
|
|
|
candidates_batch_size (int): Size of batches for entity candidate generation.
|
2022-07-04 15:05:21 +00:00
|
|
|
threshold (Optional[float]): Confidence threshold for entity predictions. If confidence is below the threshold,
|
|
|
|
prediction is discarded. If None, predictions are not filtered by any threshold.
|
2020-08-09 13:19:28 +00:00
|
|
|
"""
|
Fix entity linker batching (#9669)
* Partial fix of entity linker batching
* Add import
* Better name
* Add `use_gold_ents` option, docs
* Change to v2, create stub v1, update docs etc.
* Fix error type
Honestly no idea what the right type to use here is.
ConfigValidationError seems wrong. Maybe a NotImplementedError?
* Make mypy happy
* Add hacky fix for init issue
* Add legacy pipeline entity linker
* Fix references to class name
* Add __init__.py for legacy
* Attempted fix for loss issue
* Remove placeholder V1
* formatting
* slightly more interesting train data
* Handle batches with no usable examples
This adds a test for batches that have docs but not entities, and a
check in the component that detects such cases and skips the update step
as thought the batch were empty.
* Remove todo about data verification
Check for empty data was moved further up so this should be OK now - the
case in question shouldn't be possible.
* Fix gradient calculation
The model doesn't know which entities are not in the kb, so it generates
embeddings for the context of all of them.
However, the loss does know which entities aren't in the kb, and it
ignores them, as there's no sensible gradient.
This has the issue that the gradient will not be calculated for some of
the input embeddings, which causes a dimension mismatch in backprop.
That should have caused a clear error, but with numpyops it was causing
nans to happen, which is another problem that should be addressed
separately.
This commit changes the loss to give a zero gradient for entities not in
the kb.
* add failing test for v1 EL legacy architecture
* Add nasty but simple working check for legacy arch
* Clarify why init hack works the way it does
* Clarify use_gold_ents use case
* Fix use gold ents related handling
* Add tests for no gold ents and fix other tests
* Use aligned ents function (not working)
This doesn't actually work because the "aligned" ents are gold-only. But
if I have a different function that returns the intersection, *then*
this will work as desired.
* Use proper matching ent check
This changes the process when gold ents are not used so that the
intersection of ents in the pred and gold is used.
* Move get_matching_ents to Example
* Use model attribute to check for legacy arch
* Rename flag
* bump spacy-legacy to lower 3.0.9
Co-authored-by: svlandeg <svlandeg@github.com>
2022-03-04 08:17:36 +00:00
|
|
|
|
|
|
|
if not model.attrs.get("include_span_maker", False):
|
2022-09-08 08:38:07 +00:00
|
|
|
# The only difference in arguments here is that use_gold_ents and threshold aren't available.
|
Fix entity linker batching (#9669)
* Partial fix of entity linker batching
* Add import
* Better name
* Add `use_gold_ents` option, docs
* Change to v2, create stub v1, update docs etc.
* Fix error type
Honestly no idea what the right type to use here is.
ConfigValidationError seems wrong. Maybe a NotImplementedError?
* Make mypy happy
* Add hacky fix for init issue
* Add legacy pipeline entity linker
* Fix references to class name
* Add __init__.py for legacy
* Attempted fix for loss issue
* Remove placeholder V1
* formatting
* slightly more interesting train data
* Handle batches with no usable examples
This adds a test for batches that have docs but not entities, and a
check in the component that detects such cases and skips the update step
as thought the batch were empty.
* Remove todo about data verification
Check for empty data was moved further up so this should be OK now - the
case in question shouldn't be possible.
* Fix gradient calculation
The model doesn't know which entities are not in the kb, so it generates
embeddings for the context of all of them.
However, the loss does know which entities aren't in the kb, and it
ignores them, as there's no sensible gradient.
This has the issue that the gradient will not be calculated for some of
the input embeddings, which causes a dimension mismatch in backprop.
That should have caused a clear error, but with numpyops it was causing
nans to happen, which is another problem that should be addressed
separately.
This commit changes the loss to give a zero gradient for entities not in
the kb.
* add failing test for v1 EL legacy architecture
* Add nasty but simple working check for legacy arch
* Clarify why init hack works the way it does
* Clarify use_gold_ents use case
* Fix use gold ents related handling
* Add tests for no gold ents and fix other tests
* Use aligned ents function (not working)
This doesn't actually work because the "aligned" ents are gold-only. But
if I have a different function that returns the intersection, *then*
this will work as desired.
* Use proper matching ent check
This changes the process when gold ents are not used so that the
intersection of ents in the pred and gold is used.
* Move get_matching_ents to Example
* Use model attribute to check for legacy arch
* Rename flag
* bump spacy-legacy to lower 3.0.9
Co-authored-by: svlandeg <svlandeg@github.com>
2022-03-04 08:17:36 +00:00
|
|
|
return EntityLinker_v1(
|
|
|
|
nlp.vocab,
|
|
|
|
model,
|
|
|
|
name,
|
|
|
|
labels_discard=labels_discard,
|
|
|
|
n_sents=n_sents,
|
|
|
|
incl_prior=incl_prior,
|
|
|
|
incl_context=incl_context,
|
|
|
|
entity_vector_length=entity_vector_length,
|
|
|
|
get_candidates=get_candidates,
|
|
|
|
overwrite=overwrite,
|
|
|
|
scorer=scorer,
|
|
|
|
)
|
2020-07-22 11:42:59 +00:00
|
|
|
return EntityLinker(
|
|
|
|
nlp.vocab,
|
|
|
|
model,
|
|
|
|
name,
|
|
|
|
labels_discard=labels_discard,
|
2021-02-22 03:49:52 +00:00
|
|
|
n_sents=n_sents,
|
2020-07-22 11:42:59 +00:00
|
|
|
incl_prior=incl_prior,
|
|
|
|
incl_context=incl_context,
|
2020-10-07 12:56:00 +00:00
|
|
|
entity_vector_length=entity_vector_length,
|
2020-08-18 14:10:36 +00:00
|
|
|
get_candidates=get_candidates,
|
2022-09-08 08:38:07 +00:00
|
|
|
get_candidates_batch=get_candidates_batch,
|
2023-03-01 15:02:55 +00:00
|
|
|
generate_empty_kb=generate_empty_kb,
|
Add overwrite settings for more components (#9050)
* Add overwrite settings for more components
For pipeline components where it's relevant and not already implemented,
add an explicit `overwrite` setting that controls whether
`set_annotations` overwrites existing annotation.
For the `morphologizer`, add an additional setting `extend`, which
controls whether the existing features are preserved.
* +overwrite, +extend: overwrite values of existing features, add any new
features
* +overwrite, -extend: overwrite completely, removing any existing
features
* -overwrite, +extend: keep values of existing features, add any new
features
* -overwrite, -extend: do not modify the existing value if set
In all cases an unset value will be set by `set_annotations`.
Preserve current overwrite defaults:
* True: morphologizer, entity linker
* False: tagger, sentencizer, senter
* Add backwards compat overwrite settings
* Put empty line back
Removed by accident in last commit
* Set backwards-compatible defaults in __init__
Because the `TrainablePipe` serialization methods update `cfg`, there's
no straightforward way to detect whether models serialized with a
previous version are missing the overwrite settings.
It would be possible in the sentencizer due to its separate
serialization methods, however to keep the changes parallel, this also
sets the default in `__init__`.
* Remove traces
Co-authored-by: Paul O'Leary McCann <polm@dampfkraft.com>
2021-09-30 13:35:55 +00:00
|
|
|
overwrite=overwrite,
|
2021-08-10 13:13:39 +00:00
|
|
|
scorer=scorer,
|
Fix entity linker batching (#9669)
* Partial fix of entity linker batching
* Add import
* Better name
* Add `use_gold_ents` option, docs
* Change to v2, create stub v1, update docs etc.
* Fix error type
Honestly no idea what the right type to use here is.
ConfigValidationError seems wrong. Maybe a NotImplementedError?
* Make mypy happy
* Add hacky fix for init issue
* Add legacy pipeline entity linker
* Fix references to class name
* Add __init__.py for legacy
* Attempted fix for loss issue
* Remove placeholder V1
* formatting
* slightly more interesting train data
* Handle batches with no usable examples
This adds a test for batches that have docs but not entities, and a
check in the component that detects such cases and skips the update step
as thought the batch were empty.
* Remove todo about data verification
Check for empty data was moved further up so this should be OK now - the
case in question shouldn't be possible.
* Fix gradient calculation
The model doesn't know which entities are not in the kb, so it generates
embeddings for the context of all of them.
However, the loss does know which entities aren't in the kb, and it
ignores them, as there's no sensible gradient.
This has the issue that the gradient will not be calculated for some of
the input embeddings, which causes a dimension mismatch in backprop.
That should have caused a clear error, but with numpyops it was causing
nans to happen, which is another problem that should be addressed
separately.
This commit changes the loss to give a zero gradient for entities not in
the kb.
* add failing test for v1 EL legacy architecture
* Add nasty but simple working check for legacy arch
* Clarify why init hack works the way it does
* Clarify use_gold_ents use case
* Fix use gold ents related handling
* Add tests for no gold ents and fix other tests
* Use aligned ents function (not working)
This doesn't actually work because the "aligned" ents are gold-only. But
if I have a different function that returns the intersection, *then*
this will work as desired.
* Use proper matching ent check
This changes the process when gold ents are not used so that the
intersection of ents in the pred and gold is used.
* Move get_matching_ents to Example
* Use model attribute to check for legacy arch
* Rename flag
* bump spacy-legacy to lower 3.0.9
Co-authored-by: svlandeg <svlandeg@github.com>
2022-03-04 08:17:36 +00:00
|
|
|
use_gold_ents=use_gold_ents,
|
2022-09-08 08:38:07 +00:00
|
|
|
candidates_batch_size=candidates_batch_size,
|
2022-07-04 15:05:21 +00:00
|
|
|
threshold=threshold,
|
2020-07-22 11:42:59 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
|
2021-08-10 13:13:39 +00:00
|
|
|
def entity_linker_score(examples, **kwargs):
|
|
|
|
return Scorer.score_links(examples, negative_labels=[EntityLinker.NIL], **kwargs)
|
|
|
|
|
|
|
|
|
|
|
|
@registry.scorers("spacy.entity_linker_scorer.v1")
|
|
|
|
def make_entity_linker_scorer():
|
|
|
|
return entity_linker_score
|
|
|
|
|
|
|
|
|
2020-10-08 19:33:49 +00:00
|
|
|
class EntityLinker(TrainablePipe):
|
2020-07-22 11:42:59 +00:00
|
|
|
"""Pipeline component for named entity linking.
|
|
|
|
|
2021-01-30 09:09:38 +00:00
|
|
|
DOCS: https://spacy.io/api/entitylinker
|
2020-07-22 11:42:59 +00:00
|
|
|
"""
|
|
|
|
|
|
|
|
NIL = "NIL" # string used to refer to a non-existing link
|
|
|
|
|
|
|
|
def __init__(
|
|
|
|
self,
|
|
|
|
vocab: Vocab,
|
|
|
|
model: Model,
|
|
|
|
name: str = "entity_linker",
|
|
|
|
*,
|
|
|
|
labels_discard: Iterable[str],
|
2021-02-22 03:49:52 +00:00
|
|
|
n_sents: int,
|
2020-07-22 11:42:59 +00:00
|
|
|
incl_prior: bool,
|
|
|
|
incl_context: bool,
|
2020-10-07 12:56:00 +00:00
|
|
|
entity_vector_length: int,
|
2021-06-28 09:29:29 +00:00
|
|
|
get_candidates: Callable[[KnowledgeBase, Span], Iterable[Candidate]],
|
2022-09-08 08:38:07 +00:00
|
|
|
get_candidates_batch: Callable[
|
|
|
|
[KnowledgeBase, Iterable[Span]], Iterable[Iterable[Candidate]]
|
|
|
|
],
|
2023-03-01 15:02:55 +00:00
|
|
|
generate_empty_kb: Callable[[Vocab, int], KnowledgeBase],
|
Add overwrite settings for more components (#9050)
* Add overwrite settings for more components
For pipeline components where it's relevant and not already implemented,
add an explicit `overwrite` setting that controls whether
`set_annotations` overwrites existing annotation.
For the `morphologizer`, add an additional setting `extend`, which
controls whether the existing features are preserved.
* +overwrite, +extend: overwrite values of existing features, add any new
features
* +overwrite, -extend: overwrite completely, removing any existing
features
* -overwrite, +extend: keep values of existing features, add any new
features
* -overwrite, -extend: do not modify the existing value if set
In all cases an unset value will be set by `set_annotations`.
Preserve current overwrite defaults:
* True: morphologizer, entity linker
* False: tagger, sentencizer, senter
* Add backwards compat overwrite settings
* Put empty line back
Removed by accident in last commit
* Set backwards-compatible defaults in __init__
Because the `TrainablePipe` serialization methods update `cfg`, there's
no straightforward way to detect whether models serialized with a
previous version are missing the overwrite settings.
It would be possible in the sentencizer due to its separate
serialization methods, however to keep the changes parallel, this also
sets the default in `__init__`.
* Remove traces
Co-authored-by: Paul O'Leary McCann <polm@dampfkraft.com>
2021-09-30 13:35:55 +00:00
|
|
|
overwrite: bool = BACKWARD_OVERWRITE,
|
2021-08-10 13:13:39 +00:00
|
|
|
scorer: Optional[Callable] = entity_linker_score,
|
Fix entity linker batching (#9669)
* Partial fix of entity linker batching
* Add import
* Better name
* Add `use_gold_ents` option, docs
* Change to v2, create stub v1, update docs etc.
* Fix error type
Honestly no idea what the right type to use here is.
ConfigValidationError seems wrong. Maybe a NotImplementedError?
* Make mypy happy
* Add hacky fix for init issue
* Add legacy pipeline entity linker
* Fix references to class name
* Add __init__.py for legacy
* Attempted fix for loss issue
* Remove placeholder V1
* formatting
* slightly more interesting train data
* Handle batches with no usable examples
This adds a test for batches that have docs but not entities, and a
check in the component that detects such cases and skips the update step
as thought the batch were empty.
* Remove todo about data verification
Check for empty data was moved further up so this should be OK now - the
case in question shouldn't be possible.
* Fix gradient calculation
The model doesn't know which entities are not in the kb, so it generates
embeddings for the context of all of them.
However, the loss does know which entities aren't in the kb, and it
ignores them, as there's no sensible gradient.
This has the issue that the gradient will not be calculated for some of
the input embeddings, which causes a dimension mismatch in backprop.
That should have caused a clear error, but with numpyops it was causing
nans to happen, which is another problem that should be addressed
separately.
This commit changes the loss to give a zero gradient for entities not in
the kb.
* add failing test for v1 EL legacy architecture
* Add nasty but simple working check for legacy arch
* Clarify why init hack works the way it does
* Clarify use_gold_ents use case
* Fix use gold ents related handling
* Add tests for no gold ents and fix other tests
* Use aligned ents function (not working)
This doesn't actually work because the "aligned" ents are gold-only. But
if I have a different function that returns the intersection, *then*
this will work as desired.
* Use proper matching ent check
This changes the process when gold ents are not used so that the
intersection of ents in the pred and gold is used.
* Move get_matching_ents to Example
* Use model attribute to check for legacy arch
* Rename flag
* bump spacy-legacy to lower 3.0.9
Co-authored-by: svlandeg <svlandeg@github.com>
2022-03-04 08:17:36 +00:00
|
|
|
use_gold_ents: bool,
|
2022-09-08 08:38:07 +00:00
|
|
|
candidates_batch_size: int,
|
2022-07-04 15:05:21 +00:00
|
|
|
threshold: Optional[float] = None,
|
2020-07-22 11:42:59 +00:00
|
|
|
) -> None:
|
2020-07-27 16:11:45 +00:00
|
|
|
"""Initialize an entity linker.
|
|
|
|
|
|
|
|
vocab (Vocab): The shared vocabulary.
|
|
|
|
model (thinc.api.Model): The Thinc Model powering the pipeline component.
|
|
|
|
name (str): The component instance name, used to add entries to the
|
|
|
|
losses during training.
|
2020-08-04 12:34:09 +00:00
|
|
|
labels_discard (Iterable[str]): NER labels that will automatically get a "NIL" prediction.
|
2021-02-22 03:49:52 +00:00
|
|
|
n_sents (int): The number of neighbouring sentences to take into account.
|
2020-08-04 12:34:09 +00:00
|
|
|
incl_prior (bool): Whether or not to include prior probabilities from the KB in the model.
|
|
|
|
incl_context (bool): Whether or not to include the local context in the model.
|
2020-10-07 12:56:00 +00:00
|
|
|
entity_vector_length (int): Size of encoding vectors in the KB.
|
2021-06-28 09:29:29 +00:00
|
|
|
get_candidates (Callable[[KnowledgeBase, Span], Iterable[Candidate]]): Function that
|
2020-10-07 12:56:00 +00:00
|
|
|
produces a list of candidates, given a certain knowledge base and a textual mention.
|
2022-09-08 08:38:07 +00:00
|
|
|
get_candidates_batch (
|
|
|
|
Callable[[KnowledgeBase, Iterable[Span]], Iterable[Iterable[Candidate]]],
|
|
|
|
Iterable[Candidate]]
|
|
|
|
): Function that produces a list of candidates, given a certain knowledge base and several textual mentions.
|
2023-03-01 15:02:55 +00:00
|
|
|
generate_empty_kb (Callable[[Vocab, int], KnowledgeBase]): Callable returning empty KnowledgeBase.
|
2022-09-08 08:38:07 +00:00
|
|
|
scorer (Optional[Callable]): The scoring method. Defaults to Scorer.score_links.
|
Fix entity linker batching (#9669)
* Partial fix of entity linker batching
* Add import
* Better name
* Add `use_gold_ents` option, docs
* Change to v2, create stub v1, update docs etc.
* Fix error type
Honestly no idea what the right type to use here is.
ConfigValidationError seems wrong. Maybe a NotImplementedError?
* Make mypy happy
* Add hacky fix for init issue
* Add legacy pipeline entity linker
* Fix references to class name
* Add __init__.py for legacy
* Attempted fix for loss issue
* Remove placeholder V1
* formatting
* slightly more interesting train data
* Handle batches with no usable examples
This adds a test for batches that have docs but not entities, and a
check in the component that detects such cases and skips the update step
as thought the batch were empty.
* Remove todo about data verification
Check for empty data was moved further up so this should be OK now - the
case in question shouldn't be possible.
* Fix gradient calculation
The model doesn't know which entities are not in the kb, so it generates
embeddings for the context of all of them.
However, the loss does know which entities aren't in the kb, and it
ignores them, as there's no sensible gradient.
This has the issue that the gradient will not be calculated for some of
the input embeddings, which causes a dimension mismatch in backprop.
That should have caused a clear error, but with numpyops it was causing
nans to happen, which is another problem that should be addressed
separately.
This commit changes the loss to give a zero gradient for entities not in
the kb.
* add failing test for v1 EL legacy architecture
* Add nasty but simple working check for legacy arch
* Clarify why init hack works the way it does
* Clarify use_gold_ents use case
* Fix use gold ents related handling
* Add tests for no gold ents and fix other tests
* Use aligned ents function (not working)
This doesn't actually work because the "aligned" ents are gold-only. But
if I have a different function that returns the intersection, *then*
this will work as desired.
* Use proper matching ent check
This changes the process when gold ents are not used so that the
intersection of ents in the pred and gold is used.
* Move get_matching_ents to Example
* Use model attribute to check for legacy arch
* Rename flag
* bump spacy-legacy to lower 3.0.9
Co-authored-by: svlandeg <svlandeg@github.com>
2022-03-04 08:17:36 +00:00
|
|
|
use_gold_ents (bool): Whether to copy entities from gold docs or not. If false, another
|
|
|
|
component must provide entity annotations.
|
2022-09-08 08:38:07 +00:00
|
|
|
candidates_batch_size (int): Size of batches for entity candidate generation.
|
2022-07-04 15:05:21 +00:00
|
|
|
threshold (Optional[float]): Confidence threshold for entity predictions. If confidence is below the
|
|
|
|
threshold, prediction is discarded. If None, predictions are not filtered by any threshold.
|
2021-01-30 09:09:38 +00:00
|
|
|
DOCS: https://spacy.io/api/entitylinker#init
|
2020-07-27 16:11:45 +00:00
|
|
|
"""
|
2022-07-04 15:05:21 +00:00
|
|
|
|
|
|
|
if threshold is not None and not (0 <= threshold <= 1):
|
|
|
|
raise ValueError(
|
|
|
|
Errors.E1043.format(
|
|
|
|
range_start=0,
|
|
|
|
range_end=1,
|
|
|
|
value=threshold,
|
|
|
|
)
|
|
|
|
)
|
|
|
|
|
2020-07-22 11:42:59 +00:00
|
|
|
self.vocab = vocab
|
|
|
|
self.model = model
|
|
|
|
self.name = name
|
2021-02-22 03:49:52 +00:00
|
|
|
self.labels_discard = list(labels_discard)
|
2023-03-01 15:02:55 +00:00
|
|
|
# how many neighbour sentences to take into account
|
2021-02-22 03:49:52 +00:00
|
|
|
self.n_sents = n_sents
|
|
|
|
self.incl_prior = incl_prior
|
|
|
|
self.incl_context = incl_context
|
2020-08-18 14:10:36 +00:00
|
|
|
self.get_candidates = get_candidates
|
2022-09-08 08:38:07 +00:00
|
|
|
self.get_candidates_batch = get_candidates_batch
|
2021-10-26 09:53:50 +00:00
|
|
|
self.cfg: Dict[str, Any] = {"overwrite": overwrite}
|
2020-07-22 11:42:59 +00:00
|
|
|
self.distance = CosineDistance(normalize=False)
|
2023-03-01 15:02:55 +00:00
|
|
|
self.kb = generate_empty_kb(self.vocab, entity_vector_length)
|
Fix entity linker batching (#9669)
* Partial fix of entity linker batching
* Add import
* Better name
* Add `use_gold_ents` option, docs
* Change to v2, create stub v1, update docs etc.
* Fix error type
Honestly no idea what the right type to use here is.
ConfigValidationError seems wrong. Maybe a NotImplementedError?
* Make mypy happy
* Add hacky fix for init issue
* Add legacy pipeline entity linker
* Fix references to class name
* Add __init__.py for legacy
* Attempted fix for loss issue
* Remove placeholder V1
* formatting
* slightly more interesting train data
* Handle batches with no usable examples
This adds a test for batches that have docs but not entities, and a
check in the component that detects such cases and skips the update step
as thought the batch were empty.
* Remove todo about data verification
Check for empty data was moved further up so this should be OK now - the
case in question shouldn't be possible.
* Fix gradient calculation
The model doesn't know which entities are not in the kb, so it generates
embeddings for the context of all of them.
However, the loss does know which entities aren't in the kb, and it
ignores them, as there's no sensible gradient.
This has the issue that the gradient will not be calculated for some of
the input embeddings, which causes a dimension mismatch in backprop.
That should have caused a clear error, but with numpyops it was causing
nans to happen, which is another problem that should be addressed
separately.
This commit changes the loss to give a zero gradient for entities not in
the kb.
* add failing test for v1 EL legacy architecture
* Add nasty but simple working check for legacy arch
* Clarify why init hack works the way it does
* Clarify use_gold_ents use case
* Fix use gold ents related handling
* Add tests for no gold ents and fix other tests
* Use aligned ents function (not working)
This doesn't actually work because the "aligned" ents are gold-only. But
if I have a different function that returns the intersection, *then*
this will work as desired.
* Use proper matching ent check
This changes the process when gold ents are not used so that the
intersection of ents in the pred and gold is used.
* Move get_matching_ents to Example
* Use model attribute to check for legacy arch
* Rename flag
* bump spacy-legacy to lower 3.0.9
Co-authored-by: svlandeg <svlandeg@github.com>
2022-03-04 08:17:36 +00:00
|
|
|
self.use_gold_ents = use_gold_ents
|
2022-09-08 08:38:07 +00:00
|
|
|
self.candidates_batch_size = candidates_batch_size
|
2022-07-04 15:05:21 +00:00
|
|
|
self.threshold = threshold
|
2020-07-22 11:42:59 +00:00
|
|
|
|
2022-09-08 08:38:07 +00:00
|
|
|
if candidates_batch_size < 1:
|
|
|
|
raise ValueError(Errors.E1044)
|
|
|
|
|
2024-04-16 10:00:22 +00:00
|
|
|
def _score_with_ents_set(examples: Iterable[Example], **kwargs):
|
|
|
|
# Because of how spaCy works, we can't just score immediately, because Language.evaluate
|
|
|
|
# calls pipe() on the predicted docs, which won't have entities if there is no NER in the pipeline.
|
|
|
|
if not scorer:
|
|
|
|
return scorer
|
|
|
|
if not self.use_gold_ents:
|
|
|
|
return scorer(examples, **kwargs)
|
|
|
|
else:
|
|
|
|
examples = self._ensure_ents(examples)
|
|
|
|
docs = self.pipe(
|
|
|
|
(eg.predicted for eg in examples),
|
|
|
|
)
|
|
|
|
for eg, doc in zip(examples, docs):
|
|
|
|
eg.predicted = doc
|
|
|
|
return scorer(examples, **kwargs)
|
|
|
|
|
|
|
|
self.scorer = _score_with_ents_set
|
|
|
|
|
|
|
|
def _ensure_ents(self, examples: Iterable[Example]) -> Iterable[Example]:
|
|
|
|
"""If use_gold_ents is true, set the gold entities to (a copy of) eg.predicted."""
|
|
|
|
if not self.use_gold_ents:
|
|
|
|
return examples
|
|
|
|
|
|
|
|
new_examples = []
|
|
|
|
for eg in examples:
|
|
|
|
ents, _ = eg.get_aligned_ents_and_ner()
|
|
|
|
new_eg = eg.copy()
|
|
|
|
new_eg.predicted.ents = ents
|
|
|
|
new_examples.append(new_eg)
|
|
|
|
return new_examples
|
|
|
|
|
2020-10-08 08:34:01 +00:00
|
|
|
def set_kb(self, kb_loader: Callable[[Vocab], KnowledgeBase]):
|
|
|
|
"""Define the KB of this pipe by providing a function that will
|
|
|
|
create it using this object's vocab."""
|
2021-02-22 00:04:22 +00:00
|
|
|
if not callable(kb_loader):
|
|
|
|
raise ValueError(Errors.E885.format(arg_type=type(kb_loader)))
|
|
|
|
|
2022-09-08 08:38:07 +00:00
|
|
|
self.kb = kb_loader(self.vocab) # type: ignore
|
2020-10-08 08:34:01 +00:00
|
|
|
|
2020-10-07 12:56:00 +00:00
|
|
|
def validate_kb(self) -> None:
|
2020-07-22 11:42:59 +00:00
|
|
|
# Raise an error if the knowledge base is not initialized.
|
2021-06-25 21:04:00 +00:00
|
|
|
if self.kb is None:
|
|
|
|
raise ValueError(Errors.E1018.format(name=self.name))
|
2023-03-01 11:06:07 +00:00
|
|
|
if hasattr(self.kb, "is_empty") and self.kb.is_empty():
|
2020-07-22 11:42:59 +00:00
|
|
|
raise ValueError(Errors.E139.format(name=self.name))
|
|
|
|
|
2020-09-28 19:35:09 +00:00
|
|
|
def initialize(
|
2020-07-22 11:42:59 +00:00
|
|
|
self,
|
2020-08-11 21:29:31 +00:00
|
|
|
get_examples: Callable[[], Iterable[Example]],
|
2020-07-28 11:37:31 +00:00
|
|
|
*,
|
2020-09-29 10:20:26 +00:00
|
|
|
nlp: Optional[Language] = None,
|
🏷 Add Mypy check to CI and ignore all existing Mypy errors (#9167)
* 🚨 Ignore all existing Mypy errors
* 🏗 Add Mypy check to CI
* Add types-mock and types-requests as dev requirements
* Add additional type ignore directives
* Add types packages to dev-only list in reqs test
* Add types-dataclasses for python 3.6
* Add ignore to pretrain
* 🏷 Improve type annotation on `run_command` helper
The `run_command` helper previously declared that it returned an
`Optional[subprocess.CompletedProcess]`, but it isn't actually possible
for the function to return `None`. These changes modify the type
annotation of the `run_command` helper and remove all now-unnecessary
`# type: ignore` directives.
* 🔧 Allow variable type redefinition in limited contexts
These changes modify how Mypy is configured to allow variables to have
their type automatically redefined under certain conditions. The Mypy
documentation contains the following example:
```python
def process(items: List[str]) -> None:
# 'items' has type List[str]
items = [item.split() for item in items]
# 'items' now has type List[List[str]]
...
```
This configuration change is especially helpful in reducing the number
of `# type: ignore` directives needed to handle the common pattern of:
* Accepting a filepath as a string
* Overwriting the variable using `filepath = ensure_path(filepath)`
These changes enable redefinition and remove all `# type: ignore`
directives rendered redundant by this change.
* 🏷 Add type annotation to converters mapping
* 🚨 Fix Mypy error in convert CLI argument verification
* 🏷 Improve type annotation on `resolve_dot_names` helper
* 🏷 Add type annotations for `Vocab` attributes `strings` and `vectors`
* 🏷 Add type annotations for more `Vocab` attributes
* 🏷 Add loose type annotation for gold data compilation
* 🏷 Improve `_format_labels` type annotation
* 🏷 Fix `get_lang_class` type annotation
* 🏷 Loosen return type of `Language.evaluate`
* 🏷 Don't accept `Scorer` in `handle_scores_per_type`
* 🏷 Add `string_to_list` overloads
* 🏷 Fix non-Optional command-line options
* 🙈 Ignore redefinition of `wandb_logger` in `loggers.py`
* ➕ Install `typing_extensions` in Python 3.8+
The `typing_extensions` package states that it should be used when
"writing code that must be compatible with multiple Python versions".
Since SpaCy needs to support multiple Python versions, it should be used
when newer `typing` module members are required. One example of this is
`Literal`, which is available starting with Python 3.8.
Previously SpaCy tried to import `Literal` from `typing`, falling back
to `typing_extensions` if the import failed. However, Mypy doesn't seem
to be able to understand what `Literal` means when the initial import
means. Therefore, these changes modify how `compat` imports `Literal` by
always importing it from `typing_extensions`.
These changes also modify how `typing_extensions` is installed, so that
it is a requirement for all Python versions, including those greater
than or equal to 3.8.
* 🏷 Improve type annotation for `Language.pipe`
These changes add a missing overload variant to the type signature of
`Language.pipe`. Additionally, the type signature is enhanced to allow
type checkers to differentiate between the two overload variants based
on the `as_tuple` parameter.
Fixes #8772
* ➖ Don't install `typing-extensions` in Python 3.8+
After more detailed analysis of how to implement Python version-specific
type annotations using SpaCy, it has been determined that by branching
on a comparison against `sys.version_info` can be statically analyzed by
Mypy well enough to enable us to conditionally use
`typing_extensions.Literal`. This means that we no longer need to
install `typing_extensions` for Python versions greater than or equal to
3.8! 🎉
These changes revert previous changes installing `typing-extensions`
regardless of Python version and modify how we import the `Literal` type
to ensure that Mypy treats it properly.
* resolve mypy errors for Strict pydantic types
* refactor code to avoid missing return statement
* fix types of convert CLI command
* avoid list-set confustion in debug_data
* fix typo and formatting
* small fixes to avoid type ignores
* fix types in profile CLI command and make it more efficient
* type fixes in projects CLI
* put one ignore back
* type fixes for render
* fix render types - the sequel
* fix BaseDefault in language definitions
* fix type of noun_chunks iterator - yields tuple instead of span
* fix types in language-specific modules
* 🏷 Expand accepted inputs of `get_string_id`
`get_string_id` accepts either a string (in which case it returns its
ID) or an ID (in which case it immediately returns the ID). These
changes extend the type annotation of `get_string_id` to indicate that
it can accept either strings or IDs.
* 🏷 Handle override types in `combine_score_weights`
The `combine_score_weights` function allows users to pass an `overrides`
mapping to override data extracted from the `weights` argument. Since it
allows `Optional` dictionary values, the return value may also include
`Optional` dictionary values.
These changes update the type annotations for `combine_score_weights` to
reflect this fact.
* 🏷 Fix tokenizer serialization method signatures in `DummyTokenizer`
* 🏷 Fix redefinition of `wandb_logger`
These changes fix the redefinition of `wandb_logger` by giving a
separate name to each `WandbLogger` version. For
backwards-compatibility, `spacy.train` still exports `wandb_logger_v3`
as `wandb_logger` for now.
* more fixes for typing in language
* type fixes in model definitions
* 🏷 Annotate `_RandomWords.probs` as `NDArray`
* 🏷 Annotate `tok2vec` layers to help Mypy
* 🐛 Fix `_RandomWords.probs` type annotations for Python 3.6
Also remove an import that I forgot to move to the top of the module 😅
* more fixes for matchers and other pipeline components
* quick fix for entity linker
* fixing types for spancat, textcat, etc
* bugfix for tok2vec
* type annotations for scorer
* add runtime_checkable for Protocol
* type and import fixes in tests
* mypy fixes for training utilities
* few fixes in util
* fix import
* 🐵 Remove unused `# type: ignore` directives
* 🏷 Annotate `Language._components`
* 🏷 Annotate `spacy.pipeline.Pipe`
* add doc as property to span.pyi
* small fixes and cleanup
* explicit type annotations instead of via comment
Co-authored-by: Adriane Boyd <adrianeboyd@gmail.com>
Co-authored-by: svlandeg <sofie.vanlandeghem@gmail.com>
Co-authored-by: svlandeg <svlandeg@github.com>
2021-10-14 13:21:40 +00:00
|
|
|
kb_loader: Optional[Callable[[Vocab], KnowledgeBase]] = None,
|
2020-09-29 10:20:26 +00:00
|
|
|
):
|
2020-09-08 20:44:25 +00:00
|
|
|
"""Initialize the pipe for training, using a representative set
|
|
|
|
of data examples.
|
2020-07-27 16:11:45 +00:00
|
|
|
|
2020-09-08 20:44:25 +00:00
|
|
|
get_examples (Callable[[], Iterable[Example]]): Function that
|
|
|
|
returns a representative sample of gold-standard Example objects.
|
2020-09-29 10:20:26 +00:00
|
|
|
nlp (Language): The current nlp object the component is part of.
|
2022-09-08 08:38:07 +00:00
|
|
|
kb_loader (Callable[[Vocab], KnowledgeBase]): A function that creates a KnowledgeBase from a Vocab
|
|
|
|
instance. Note that providing this argument will overwrite all data accumulated in the current KB.
|
2020-10-07 12:56:00 +00:00
|
|
|
Use this only when loading a KB as-such from file.
|
2020-07-27 16:11:45 +00:00
|
|
|
|
2021-01-30 09:09:38 +00:00
|
|
|
DOCS: https://spacy.io/api/entitylinker#initialize
|
2020-07-27 16:11:45 +00:00
|
|
|
"""
|
2020-10-08 19:33:49 +00:00
|
|
|
validate_get_examples(get_examples, "EntityLinker.initialize")
|
2020-10-07 12:56:00 +00:00
|
|
|
if kb_loader is not None:
|
2020-10-08 08:34:01 +00:00
|
|
|
self.set_kb(kb_loader)
|
2020-10-07 12:56:00 +00:00
|
|
|
self.validate_kb()
|
2020-07-22 11:42:59 +00:00
|
|
|
nO = self.kb.entity_vector_length
|
2020-09-08 20:44:25 +00:00
|
|
|
doc_sample = []
|
|
|
|
vector_sample = []
|
2024-04-16 10:00:22 +00:00
|
|
|
examples = self._ensure_ents(islice(get_examples(), 10))
|
|
|
|
for eg in examples:
|
2022-05-23 18:42:26 +00:00
|
|
|
doc = eg.x
|
Fix entity linker batching (#9669)
* Partial fix of entity linker batching
* Add import
* Better name
* Add `use_gold_ents` option, docs
* Change to v2, create stub v1, update docs etc.
* Fix error type
Honestly no idea what the right type to use here is.
ConfigValidationError seems wrong. Maybe a NotImplementedError?
* Make mypy happy
* Add hacky fix for init issue
* Add legacy pipeline entity linker
* Fix references to class name
* Add __init__.py for legacy
* Attempted fix for loss issue
* Remove placeholder V1
* formatting
* slightly more interesting train data
* Handle batches with no usable examples
This adds a test for batches that have docs but not entities, and a
check in the component that detects such cases and skips the update step
as thought the batch were empty.
* Remove todo about data verification
Check for empty data was moved further up so this should be OK now - the
case in question shouldn't be possible.
* Fix gradient calculation
The model doesn't know which entities are not in the kb, so it generates
embeddings for the context of all of them.
However, the loss does know which entities aren't in the kb, and it
ignores them, as there's no sensible gradient.
This has the issue that the gradient will not be calculated for some of
the input embeddings, which causes a dimension mismatch in backprop.
That should have caused a clear error, but with numpyops it was causing
nans to happen, which is another problem that should be addressed
separately.
This commit changes the loss to give a zero gradient for entities not in
the kb.
* add failing test for v1 EL legacy architecture
* Add nasty but simple working check for legacy arch
* Clarify why init hack works the way it does
* Clarify use_gold_ents use case
* Fix use gold ents related handling
* Add tests for no gold ents and fix other tests
* Use aligned ents function (not working)
This doesn't actually work because the "aligned" ents are gold-only. But
if I have a different function that returns the intersection, *then*
this will work as desired.
* Use proper matching ent check
This changes the process when gold ents are not used so that the
intersection of ents in the pred and gold is used.
* Move get_matching_ents to Example
* Use model attribute to check for legacy arch
* Rename flag
* bump spacy-legacy to lower 3.0.9
Co-authored-by: svlandeg <svlandeg@github.com>
2022-03-04 08:17:36 +00:00
|
|
|
doc_sample.append(doc)
|
2020-09-08 20:44:25 +00:00
|
|
|
vector_sample.append(self.model.ops.alloc1f(nO))
|
|
|
|
assert len(doc_sample) > 0, Errors.E923.format(name=self.name)
|
|
|
|
assert len(vector_sample) > 0, Errors.E923.format(name=self.name)
|
Fix entity linker batching (#9669)
* Partial fix of entity linker batching
* Add import
* Better name
* Add `use_gold_ents` option, docs
* Change to v2, create stub v1, update docs etc.
* Fix error type
Honestly no idea what the right type to use here is.
ConfigValidationError seems wrong. Maybe a NotImplementedError?
* Make mypy happy
* Add hacky fix for init issue
* Add legacy pipeline entity linker
* Fix references to class name
* Add __init__.py for legacy
* Attempted fix for loss issue
* Remove placeholder V1
* formatting
* slightly more interesting train data
* Handle batches with no usable examples
This adds a test for batches that have docs but not entities, and a
check in the component that detects such cases and skips the update step
as thought the batch were empty.
* Remove todo about data verification
Check for empty data was moved further up so this should be OK now - the
case in question shouldn't be possible.
* Fix gradient calculation
The model doesn't know which entities are not in the kb, so it generates
embeddings for the context of all of them.
However, the loss does know which entities aren't in the kb, and it
ignores them, as there's no sensible gradient.
This has the issue that the gradient will not be calculated for some of
the input embeddings, which causes a dimension mismatch in backprop.
That should have caused a clear error, but with numpyops it was causing
nans to happen, which is another problem that should be addressed
separately.
This commit changes the loss to give a zero gradient for entities not in
the kb.
* add failing test for v1 EL legacy architecture
* Add nasty but simple working check for legacy arch
* Clarify why init hack works the way it does
* Clarify use_gold_ents use case
* Fix use gold ents related handling
* Add tests for no gold ents and fix other tests
* Use aligned ents function (not working)
This doesn't actually work because the "aligned" ents are gold-only. But
if I have a different function that returns the intersection, *then*
this will work as desired.
* Use proper matching ent check
This changes the process when gold ents are not used so that the
intersection of ents in the pred and gold is used.
* Move get_matching_ents to Example
* Use model attribute to check for legacy arch
* Rename flag
* bump spacy-legacy to lower 3.0.9
Co-authored-by: svlandeg <svlandeg@github.com>
2022-03-04 08:17:36 +00:00
|
|
|
|
|
|
|
# XXX In order for size estimation to work, there has to be at least
|
|
|
|
# one entity. It's not used for training so it doesn't have to be real,
|
|
|
|
# so we add a fake one if none are present.
|
|
|
|
# We can't use Doc.has_annotation here because it can be True for docs
|
|
|
|
# that have been through an NER component but got no entities.
|
|
|
|
has_annotations = any([doc.ents for doc in doc_sample])
|
|
|
|
if not has_annotations:
|
|
|
|
doc = doc_sample[0]
|
|
|
|
ent = doc[0:1]
|
|
|
|
ent.label_ = "XXX"
|
|
|
|
doc.ents = (ent,)
|
|
|
|
|
2020-09-08 20:44:25 +00:00
|
|
|
self.model.initialize(
|
|
|
|
X=doc_sample, Y=self.model.ops.asarray(vector_sample, dtype="float32")
|
|
|
|
)
|
2020-07-22 11:42:59 +00:00
|
|
|
|
Fix entity linker batching (#9669)
* Partial fix of entity linker batching
* Add import
* Better name
* Add `use_gold_ents` option, docs
* Change to v2, create stub v1, update docs etc.
* Fix error type
Honestly no idea what the right type to use here is.
ConfigValidationError seems wrong. Maybe a NotImplementedError?
* Make mypy happy
* Add hacky fix for init issue
* Add legacy pipeline entity linker
* Fix references to class name
* Add __init__.py for legacy
* Attempted fix for loss issue
* Remove placeholder V1
* formatting
* slightly more interesting train data
* Handle batches with no usable examples
This adds a test for batches that have docs but not entities, and a
check in the component that detects such cases and skips the update step
as thought the batch were empty.
* Remove todo about data verification
Check for empty data was moved further up so this should be OK now - the
case in question shouldn't be possible.
* Fix gradient calculation
The model doesn't know which entities are not in the kb, so it generates
embeddings for the context of all of them.
However, the loss does know which entities aren't in the kb, and it
ignores them, as there's no sensible gradient.
This has the issue that the gradient will not be calculated for some of
the input embeddings, which causes a dimension mismatch in backprop.
That should have caused a clear error, but with numpyops it was causing
nans to happen, which is another problem that should be addressed
separately.
This commit changes the loss to give a zero gradient for entities not in
the kb.
* add failing test for v1 EL legacy architecture
* Add nasty but simple working check for legacy arch
* Clarify why init hack works the way it does
* Clarify use_gold_ents use case
* Fix use gold ents related handling
* Add tests for no gold ents and fix other tests
* Use aligned ents function (not working)
This doesn't actually work because the "aligned" ents are gold-only. But
if I have a different function that returns the intersection, *then*
this will work as desired.
* Use proper matching ent check
This changes the process when gold ents are not used so that the
intersection of ents in the pred and gold is used.
* Move get_matching_ents to Example
* Use model attribute to check for legacy arch
* Rename flag
* bump spacy-legacy to lower 3.0.9
Co-authored-by: svlandeg <svlandeg@github.com>
2022-03-04 08:17:36 +00:00
|
|
|
if not has_annotations:
|
|
|
|
# Clean up dummy annotation
|
|
|
|
doc.ents = []
|
|
|
|
|
|
|
|
def batch_has_learnable_example(self, examples):
|
|
|
|
"""Check if a batch contains a learnable example.
|
|
|
|
|
|
|
|
If one isn't present, then the update step needs to be skipped.
|
|
|
|
"""
|
|
|
|
|
|
|
|
for eg in examples:
|
|
|
|
for ent in eg.predicted.ents:
|
|
|
|
candidates = list(self.get_candidates(self.kb, ent))
|
|
|
|
if candidates:
|
|
|
|
return True
|
|
|
|
|
|
|
|
return False
|
|
|
|
|
2020-07-22 11:42:59 +00:00
|
|
|
def update(
|
|
|
|
self,
|
|
|
|
examples: Iterable[Example],
|
|
|
|
*,
|
|
|
|
drop: float = 0.0,
|
|
|
|
sgd: Optional[Optimizer] = None,
|
|
|
|
losses: Optional[Dict[str, float]] = None,
|
|
|
|
) -> Dict[str, float]:
|
2020-07-27 16:11:45 +00:00
|
|
|
"""Learn from a batch of documents and gold-standard information,
|
2021-01-25 14:18:45 +00:00
|
|
|
updating the pipe's model. Delegates to predict and get_loss.
|
2020-07-27 16:11:45 +00:00
|
|
|
|
|
|
|
examples (Iterable[Example]): A batch of Example objects.
|
|
|
|
drop (float): The dropout rate.
|
|
|
|
sgd (thinc.api.Optimizer): The optimizer.
|
|
|
|
losses (Dict[str, float]): Optional record of the loss during training.
|
|
|
|
Updated using the component name as the key.
|
|
|
|
RETURNS (Dict[str, float]): The updated losses dictionary.
|
|
|
|
|
2021-01-30 09:09:38 +00:00
|
|
|
DOCS: https://spacy.io/api/entitylinker#update
|
2020-07-27 16:11:45 +00:00
|
|
|
"""
|
2020-10-07 12:56:00 +00:00
|
|
|
self.validate_kb()
|
2020-07-22 11:42:59 +00:00
|
|
|
if losses is None:
|
|
|
|
losses = {}
|
|
|
|
losses.setdefault(self.name, 0.0)
|
|
|
|
if not examples:
|
|
|
|
return losses
|
2024-04-16 10:00:22 +00:00
|
|
|
examples = self._ensure_ents(examples)
|
2020-08-11 21:29:31 +00:00
|
|
|
validate_examples(examples, "EntityLinker.update")
|
Fix entity linker batching (#9669)
* Partial fix of entity linker batching
* Add import
* Better name
* Add `use_gold_ents` option, docs
* Change to v2, create stub v1, update docs etc.
* Fix error type
Honestly no idea what the right type to use here is.
ConfigValidationError seems wrong. Maybe a NotImplementedError?
* Make mypy happy
* Add hacky fix for init issue
* Add legacy pipeline entity linker
* Fix references to class name
* Add __init__.py for legacy
* Attempted fix for loss issue
* Remove placeholder V1
* formatting
* slightly more interesting train data
* Handle batches with no usable examples
This adds a test for batches that have docs but not entities, and a
check in the component that detects such cases and skips the update step
as thought the batch were empty.
* Remove todo about data verification
Check for empty data was moved further up so this should be OK now - the
case in question shouldn't be possible.
* Fix gradient calculation
The model doesn't know which entities are not in the kb, so it generates
embeddings for the context of all of them.
However, the loss does know which entities aren't in the kb, and it
ignores them, as there's no sensible gradient.
This has the issue that the gradient will not be calculated for some of
the input embeddings, which causes a dimension mismatch in backprop.
That should have caused a clear error, but with numpyops it was causing
nans to happen, which is another problem that should be addressed
separately.
This commit changes the loss to give a zero gradient for entities not in
the kb.
* add failing test for v1 EL legacy architecture
* Add nasty but simple working check for legacy arch
* Clarify why init hack works the way it does
* Clarify use_gold_ents use case
* Fix use gold ents related handling
* Add tests for no gold ents and fix other tests
* Use aligned ents function (not working)
This doesn't actually work because the "aligned" ents are gold-only. But
if I have a different function that returns the intersection, *then*
this will work as desired.
* Use proper matching ent check
This changes the process when gold ents are not used so that the
intersection of ents in the pred and gold is used.
* Move get_matching_ents to Example
* Use model attribute to check for legacy arch
* Rename flag
* bump spacy-legacy to lower 3.0.9
Co-authored-by: svlandeg <svlandeg@github.com>
2022-03-04 08:17:36 +00:00
|
|
|
|
|
|
|
# make sure we have something to learn from, if not, short-circuit
|
|
|
|
if not self.batch_has_learnable_example(examples):
|
2020-07-30 21:30:54 +00:00
|
|
|
return losses
|
Fix entity linker batching (#9669)
* Partial fix of entity linker batching
* Add import
* Better name
* Add `use_gold_ents` option, docs
* Change to v2, create stub v1, update docs etc.
* Fix error type
Honestly no idea what the right type to use here is.
ConfigValidationError seems wrong. Maybe a NotImplementedError?
* Make mypy happy
* Add hacky fix for init issue
* Add legacy pipeline entity linker
* Fix references to class name
* Add __init__.py for legacy
* Attempted fix for loss issue
* Remove placeholder V1
* formatting
* slightly more interesting train data
* Handle batches with no usable examples
This adds a test for batches that have docs but not entities, and a
check in the component that detects such cases and skips the update step
as thought the batch were empty.
* Remove todo about data verification
Check for empty data was moved further up so this should be OK now - the
case in question shouldn't be possible.
* Fix gradient calculation
The model doesn't know which entities are not in the kb, so it generates
embeddings for the context of all of them.
However, the loss does know which entities aren't in the kb, and it
ignores them, as there's no sensible gradient.
This has the issue that the gradient will not be calculated for some of
the input embeddings, which causes a dimension mismatch in backprop.
That should have caused a clear error, but with numpyops it was causing
nans to happen, which is another problem that should be addressed
separately.
This commit changes the loss to give a zero gradient for entities not in
the kb.
* add failing test for v1 EL legacy architecture
* Add nasty but simple working check for legacy arch
* Clarify why init hack works the way it does
* Clarify use_gold_ents use case
* Fix use gold ents related handling
* Add tests for no gold ents and fix other tests
* Use aligned ents function (not working)
This doesn't actually work because the "aligned" ents are gold-only. But
if I have a different function that returns the intersection, *then*
this will work as desired.
* Use proper matching ent check
This changes the process when gold ents are not used so that the
intersection of ents in the pred and gold is used.
* Move get_matching_ents to Example
* Use model attribute to check for legacy arch
* Rename flag
* bump spacy-legacy to lower 3.0.9
Co-authored-by: svlandeg <svlandeg@github.com>
2022-03-04 08:17:36 +00:00
|
|
|
|
2024-04-16 10:00:22 +00:00
|
|
|
set_dropout_rate(self.model, drop)
|
|
|
|
docs = [eg.predicted for eg in examples]
|
Fix entity linker batching (#9669)
* Partial fix of entity linker batching
* Add import
* Better name
* Add `use_gold_ents` option, docs
* Change to v2, create stub v1, update docs etc.
* Fix error type
Honestly no idea what the right type to use here is.
ConfigValidationError seems wrong. Maybe a NotImplementedError?
* Make mypy happy
* Add hacky fix for init issue
* Add legacy pipeline entity linker
* Fix references to class name
* Add __init__.py for legacy
* Attempted fix for loss issue
* Remove placeholder V1
* formatting
* slightly more interesting train data
* Handle batches with no usable examples
This adds a test for batches that have docs but not entities, and a
check in the component that detects such cases and skips the update step
as thought the batch were empty.
* Remove todo about data verification
Check for empty data was moved further up so this should be OK now - the
case in question shouldn't be possible.
* Fix gradient calculation
The model doesn't know which entities are not in the kb, so it generates
embeddings for the context of all of them.
However, the loss does know which entities aren't in the kb, and it
ignores them, as there's no sensible gradient.
This has the issue that the gradient will not be calculated for some of
the input embeddings, which causes a dimension mismatch in backprop.
That should have caused a clear error, but with numpyops it was causing
nans to happen, which is another problem that should be addressed
separately.
This commit changes the loss to give a zero gradient for entities not in
the kb.
* add failing test for v1 EL legacy architecture
* Add nasty but simple working check for legacy arch
* Clarify why init hack works the way it does
* Clarify use_gold_ents use case
* Fix use gold ents related handling
* Add tests for no gold ents and fix other tests
* Use aligned ents function (not working)
This doesn't actually work because the "aligned" ents are gold-only. But
if I have a different function that returns the intersection, *then*
this will work as desired.
* Use proper matching ent check
This changes the process when gold ents are not used so that the
intersection of ents in the pred and gold is used.
* Move get_matching_ents to Example
* Use model attribute to check for legacy arch
* Rename flag
* bump spacy-legacy to lower 3.0.9
Co-authored-by: svlandeg <svlandeg@github.com>
2022-03-04 08:17:36 +00:00
|
|
|
sentence_encodings, bp_context = self.model.begin_update(docs)
|
|
|
|
|
2020-07-30 21:30:54 +00:00
|
|
|
loss, d_scores = self.get_loss(
|
2020-07-22 11:42:59 +00:00
|
|
|
sentence_encodings=sentence_encodings, examples=examples
|
|
|
|
)
|
|
|
|
bp_context(d_scores)
|
|
|
|
if sgd is not None:
|
2020-10-05 14:23:33 +00:00
|
|
|
self.finish_update(sgd)
|
2020-07-22 11:42:59 +00:00
|
|
|
losses[self.name] += loss
|
2024-04-16 10:00:22 +00:00
|
|
|
|
2020-07-22 11:42:59 +00:00
|
|
|
return losses
|
|
|
|
|
🏷 Add Mypy check to CI and ignore all existing Mypy errors (#9167)
* 🚨 Ignore all existing Mypy errors
* 🏗 Add Mypy check to CI
* Add types-mock and types-requests as dev requirements
* Add additional type ignore directives
* Add types packages to dev-only list in reqs test
* Add types-dataclasses for python 3.6
* Add ignore to pretrain
* 🏷 Improve type annotation on `run_command` helper
The `run_command` helper previously declared that it returned an
`Optional[subprocess.CompletedProcess]`, but it isn't actually possible
for the function to return `None`. These changes modify the type
annotation of the `run_command` helper and remove all now-unnecessary
`# type: ignore` directives.
* 🔧 Allow variable type redefinition in limited contexts
These changes modify how Mypy is configured to allow variables to have
their type automatically redefined under certain conditions. The Mypy
documentation contains the following example:
```python
def process(items: List[str]) -> None:
# 'items' has type List[str]
items = [item.split() for item in items]
# 'items' now has type List[List[str]]
...
```
This configuration change is especially helpful in reducing the number
of `# type: ignore` directives needed to handle the common pattern of:
* Accepting a filepath as a string
* Overwriting the variable using `filepath = ensure_path(filepath)`
These changes enable redefinition and remove all `# type: ignore`
directives rendered redundant by this change.
* 🏷 Add type annotation to converters mapping
* 🚨 Fix Mypy error in convert CLI argument verification
* 🏷 Improve type annotation on `resolve_dot_names` helper
* 🏷 Add type annotations for `Vocab` attributes `strings` and `vectors`
* 🏷 Add type annotations for more `Vocab` attributes
* 🏷 Add loose type annotation for gold data compilation
* 🏷 Improve `_format_labels` type annotation
* 🏷 Fix `get_lang_class` type annotation
* 🏷 Loosen return type of `Language.evaluate`
* 🏷 Don't accept `Scorer` in `handle_scores_per_type`
* 🏷 Add `string_to_list` overloads
* 🏷 Fix non-Optional command-line options
* 🙈 Ignore redefinition of `wandb_logger` in `loggers.py`
* ➕ Install `typing_extensions` in Python 3.8+
The `typing_extensions` package states that it should be used when
"writing code that must be compatible with multiple Python versions".
Since SpaCy needs to support multiple Python versions, it should be used
when newer `typing` module members are required. One example of this is
`Literal`, which is available starting with Python 3.8.
Previously SpaCy tried to import `Literal` from `typing`, falling back
to `typing_extensions` if the import failed. However, Mypy doesn't seem
to be able to understand what `Literal` means when the initial import
means. Therefore, these changes modify how `compat` imports `Literal` by
always importing it from `typing_extensions`.
These changes also modify how `typing_extensions` is installed, so that
it is a requirement for all Python versions, including those greater
than or equal to 3.8.
* 🏷 Improve type annotation for `Language.pipe`
These changes add a missing overload variant to the type signature of
`Language.pipe`. Additionally, the type signature is enhanced to allow
type checkers to differentiate between the two overload variants based
on the `as_tuple` parameter.
Fixes #8772
* ➖ Don't install `typing-extensions` in Python 3.8+
After more detailed analysis of how to implement Python version-specific
type annotations using SpaCy, it has been determined that by branching
on a comparison against `sys.version_info` can be statically analyzed by
Mypy well enough to enable us to conditionally use
`typing_extensions.Literal`. This means that we no longer need to
install `typing_extensions` for Python versions greater than or equal to
3.8! 🎉
These changes revert previous changes installing `typing-extensions`
regardless of Python version and modify how we import the `Literal` type
to ensure that Mypy treats it properly.
* resolve mypy errors for Strict pydantic types
* refactor code to avoid missing return statement
* fix types of convert CLI command
* avoid list-set confustion in debug_data
* fix typo and formatting
* small fixes to avoid type ignores
* fix types in profile CLI command and make it more efficient
* type fixes in projects CLI
* put one ignore back
* type fixes for render
* fix render types - the sequel
* fix BaseDefault in language definitions
* fix type of noun_chunks iterator - yields tuple instead of span
* fix types in language-specific modules
* 🏷 Expand accepted inputs of `get_string_id`
`get_string_id` accepts either a string (in which case it returns its
ID) or an ID (in which case it immediately returns the ID). These
changes extend the type annotation of `get_string_id` to indicate that
it can accept either strings or IDs.
* 🏷 Handle override types in `combine_score_weights`
The `combine_score_weights` function allows users to pass an `overrides`
mapping to override data extracted from the `weights` argument. Since it
allows `Optional` dictionary values, the return value may also include
`Optional` dictionary values.
These changes update the type annotations for `combine_score_weights` to
reflect this fact.
* 🏷 Fix tokenizer serialization method signatures in `DummyTokenizer`
* 🏷 Fix redefinition of `wandb_logger`
These changes fix the redefinition of `wandb_logger` by giving a
separate name to each `WandbLogger` version. For
backwards-compatibility, `spacy.train` still exports `wandb_logger_v3`
as `wandb_logger` for now.
* more fixes for typing in language
* type fixes in model definitions
* 🏷 Annotate `_RandomWords.probs` as `NDArray`
* 🏷 Annotate `tok2vec` layers to help Mypy
* 🐛 Fix `_RandomWords.probs` type annotations for Python 3.6
Also remove an import that I forgot to move to the top of the module 😅
* more fixes for matchers and other pipeline components
* quick fix for entity linker
* fixing types for spancat, textcat, etc
* bugfix for tok2vec
* type annotations for scorer
* add runtime_checkable for Protocol
* type and import fixes in tests
* mypy fixes for training utilities
* few fixes in util
* fix import
* 🐵 Remove unused `# type: ignore` directives
* 🏷 Annotate `Language._components`
* 🏷 Annotate `spacy.pipeline.Pipe`
* add doc as property to span.pyi
* small fixes and cleanup
* explicit type annotations instead of via comment
Co-authored-by: Adriane Boyd <adrianeboyd@gmail.com>
Co-authored-by: svlandeg <sofie.vanlandeghem@gmail.com>
Co-authored-by: svlandeg <svlandeg@github.com>
2021-10-14 13:21:40 +00:00
|
|
|
def get_loss(self, examples: Iterable[Example], sentence_encodings: Floats2d):
|
2020-08-11 21:29:31 +00:00
|
|
|
validate_examples(examples, "EntityLinker.get_loss")
|
2020-07-22 11:42:59 +00:00
|
|
|
entity_encodings = []
|
2024-04-16 10:00:22 +00:00
|
|
|
# We assume that get_loss is called with gold ents set in the examples if need be
|
Fix entity linker batching (#9669)
* Partial fix of entity linker batching
* Add import
* Better name
* Add `use_gold_ents` option, docs
* Change to v2, create stub v1, update docs etc.
* Fix error type
Honestly no idea what the right type to use here is.
ConfigValidationError seems wrong. Maybe a NotImplementedError?
* Make mypy happy
* Add hacky fix for init issue
* Add legacy pipeline entity linker
* Fix references to class name
* Add __init__.py for legacy
* Attempted fix for loss issue
* Remove placeholder V1
* formatting
* slightly more interesting train data
* Handle batches with no usable examples
This adds a test for batches that have docs but not entities, and a
check in the component that detects such cases and skips the update step
as thought the batch were empty.
* Remove todo about data verification
Check for empty data was moved further up so this should be OK now - the
case in question shouldn't be possible.
* Fix gradient calculation
The model doesn't know which entities are not in the kb, so it generates
embeddings for the context of all of them.
However, the loss does know which entities aren't in the kb, and it
ignores them, as there's no sensible gradient.
This has the issue that the gradient will not be calculated for some of
the input embeddings, which causes a dimension mismatch in backprop.
That should have caused a clear error, but with numpyops it was causing
nans to happen, which is another problem that should be addressed
separately.
This commit changes the loss to give a zero gradient for entities not in
the kb.
* add failing test for v1 EL legacy architecture
* Add nasty but simple working check for legacy arch
* Clarify why init hack works the way it does
* Clarify use_gold_ents use case
* Fix use gold ents related handling
* Add tests for no gold ents and fix other tests
* Use aligned ents function (not working)
This doesn't actually work because the "aligned" ents are gold-only. But
if I have a different function that returns the intersection, *then*
this will work as desired.
* Use proper matching ent check
This changes the process when gold ents are not used so that the
intersection of ents in the pred and gold is used.
* Move get_matching_ents to Example
* Use model attribute to check for legacy arch
* Rename flag
* bump spacy-legacy to lower 3.0.9
Co-authored-by: svlandeg <svlandeg@github.com>
2022-03-04 08:17:36 +00:00
|
|
|
eidx = 0 # indices in gold entities to keep
|
|
|
|
keep_ents = [] # indices in sentence_encodings to keep
|
|
|
|
|
2020-07-22 11:42:59 +00:00
|
|
|
for eg in examples:
|
|
|
|
kb_ids = eg.get_aligned("ENT_KB_ID", as_string=True)
|
Fix entity linker batching (#9669)
* Partial fix of entity linker batching
* Add import
* Better name
* Add `use_gold_ents` option, docs
* Change to v2, create stub v1, update docs etc.
* Fix error type
Honestly no idea what the right type to use here is.
ConfigValidationError seems wrong. Maybe a NotImplementedError?
* Make mypy happy
* Add hacky fix for init issue
* Add legacy pipeline entity linker
* Fix references to class name
* Add __init__.py for legacy
* Attempted fix for loss issue
* Remove placeholder V1
* formatting
* slightly more interesting train data
* Handle batches with no usable examples
This adds a test for batches that have docs but not entities, and a
check in the component that detects such cases and skips the update step
as thought the batch were empty.
* Remove todo about data verification
Check for empty data was moved further up so this should be OK now - the
case in question shouldn't be possible.
* Fix gradient calculation
The model doesn't know which entities are not in the kb, so it generates
embeddings for the context of all of them.
However, the loss does know which entities aren't in the kb, and it
ignores them, as there's no sensible gradient.
This has the issue that the gradient will not be calculated for some of
the input embeddings, which causes a dimension mismatch in backprop.
That should have caused a clear error, but with numpyops it was causing
nans to happen, which is another problem that should be addressed
separately.
This commit changes the loss to give a zero gradient for entities not in
the kb.
* add failing test for v1 EL legacy architecture
* Add nasty but simple working check for legacy arch
* Clarify why init hack works the way it does
* Clarify use_gold_ents use case
* Fix use gold ents related handling
* Add tests for no gold ents and fix other tests
* Use aligned ents function (not working)
This doesn't actually work because the "aligned" ents are gold-only. But
if I have a different function that returns the intersection, *then*
this will work as desired.
* Use proper matching ent check
This changes the process when gold ents are not used so that the
intersection of ents in the pred and gold is used.
* Move get_matching_ents to Example
* Use model attribute to check for legacy arch
* Rename flag
* bump spacy-legacy to lower 3.0.9
Co-authored-by: svlandeg <svlandeg@github.com>
2022-03-04 08:17:36 +00:00
|
|
|
|
2022-05-23 18:42:26 +00:00
|
|
|
for ent in eg.get_matching_ents():
|
2020-07-22 11:42:59 +00:00
|
|
|
kb_id = kb_ids[ent.start]
|
|
|
|
if kb_id:
|
|
|
|
entity_encoding = self.kb.get_vector(kb_id)
|
|
|
|
entity_encodings.append(entity_encoding)
|
Fix entity linker batching (#9669)
* Partial fix of entity linker batching
* Add import
* Better name
* Add `use_gold_ents` option, docs
* Change to v2, create stub v1, update docs etc.
* Fix error type
Honestly no idea what the right type to use here is.
ConfigValidationError seems wrong. Maybe a NotImplementedError?
* Make mypy happy
* Add hacky fix for init issue
* Add legacy pipeline entity linker
* Fix references to class name
* Add __init__.py for legacy
* Attempted fix for loss issue
* Remove placeholder V1
* formatting
* slightly more interesting train data
* Handle batches with no usable examples
This adds a test for batches that have docs but not entities, and a
check in the component that detects such cases and skips the update step
as thought the batch were empty.
* Remove todo about data verification
Check for empty data was moved further up so this should be OK now - the
case in question shouldn't be possible.
* Fix gradient calculation
The model doesn't know which entities are not in the kb, so it generates
embeddings for the context of all of them.
However, the loss does know which entities aren't in the kb, and it
ignores them, as there's no sensible gradient.
This has the issue that the gradient will not be calculated for some of
the input embeddings, which causes a dimension mismatch in backprop.
That should have caused a clear error, but with numpyops it was causing
nans to happen, which is another problem that should be addressed
separately.
This commit changes the loss to give a zero gradient for entities not in
the kb.
* add failing test for v1 EL legacy architecture
* Add nasty but simple working check for legacy arch
* Clarify why init hack works the way it does
* Clarify use_gold_ents use case
* Fix use gold ents related handling
* Add tests for no gold ents and fix other tests
* Use aligned ents function (not working)
This doesn't actually work because the "aligned" ents are gold-only. But
if I have a different function that returns the intersection, *then*
this will work as desired.
* Use proper matching ent check
This changes the process when gold ents are not used so that the
intersection of ents in the pred and gold is used.
* Move get_matching_ents to Example
* Use model attribute to check for legacy arch
* Rename flag
* bump spacy-legacy to lower 3.0.9
Co-authored-by: svlandeg <svlandeg@github.com>
2022-03-04 08:17:36 +00:00
|
|
|
keep_ents.append(eidx)
|
|
|
|
|
|
|
|
eidx += 1
|
2022-05-31 22:41:28 +00:00
|
|
|
entity_encodings = self.model.ops.asarray2f(entity_encodings, dtype="float32")
|
Fix entity linker batching (#9669)
* Partial fix of entity linker batching
* Add import
* Better name
* Add `use_gold_ents` option, docs
* Change to v2, create stub v1, update docs etc.
* Fix error type
Honestly no idea what the right type to use here is.
ConfigValidationError seems wrong. Maybe a NotImplementedError?
* Make mypy happy
* Add hacky fix for init issue
* Add legacy pipeline entity linker
* Fix references to class name
* Add __init__.py for legacy
* Attempted fix for loss issue
* Remove placeholder V1
* formatting
* slightly more interesting train data
* Handle batches with no usable examples
This adds a test for batches that have docs but not entities, and a
check in the component that detects such cases and skips the update step
as thought the batch were empty.
* Remove todo about data verification
Check for empty data was moved further up so this should be OK now - the
case in question shouldn't be possible.
* Fix gradient calculation
The model doesn't know which entities are not in the kb, so it generates
embeddings for the context of all of them.
However, the loss does know which entities aren't in the kb, and it
ignores them, as there's no sensible gradient.
This has the issue that the gradient will not be calculated for some of
the input embeddings, which causes a dimension mismatch in backprop.
That should have caused a clear error, but with numpyops it was causing
nans to happen, which is another problem that should be addressed
separately.
This commit changes the loss to give a zero gradient for entities not in
the kb.
* add failing test for v1 EL legacy architecture
* Add nasty but simple working check for legacy arch
* Clarify why init hack works the way it does
* Clarify use_gold_ents use case
* Fix use gold ents related handling
* Add tests for no gold ents and fix other tests
* Use aligned ents function (not working)
This doesn't actually work because the "aligned" ents are gold-only. But
if I have a different function that returns the intersection, *then*
this will work as desired.
* Use proper matching ent check
This changes the process when gold ents are not used so that the
intersection of ents in the pred and gold is used.
* Move get_matching_ents to Example
* Use model attribute to check for legacy arch
* Rename flag
* bump spacy-legacy to lower 3.0.9
Co-authored-by: svlandeg <svlandeg@github.com>
2022-03-04 08:17:36 +00:00
|
|
|
selected_encodings = sentence_encodings[keep_ents]
|
|
|
|
|
2022-05-23 18:42:26 +00:00
|
|
|
# if there are no matches, short circuit
|
|
|
|
if not keep_ents:
|
|
|
|
out = self.model.ops.alloc2f(*sentence_encodings.shape)
|
|
|
|
return 0, out
|
|
|
|
|
Fix entity linker batching (#9669)
* Partial fix of entity linker batching
* Add import
* Better name
* Add `use_gold_ents` option, docs
* Change to v2, create stub v1, update docs etc.
* Fix error type
Honestly no idea what the right type to use here is.
ConfigValidationError seems wrong. Maybe a NotImplementedError?
* Make mypy happy
* Add hacky fix for init issue
* Add legacy pipeline entity linker
* Fix references to class name
* Add __init__.py for legacy
* Attempted fix for loss issue
* Remove placeholder V1
* formatting
* slightly more interesting train data
* Handle batches with no usable examples
This adds a test for batches that have docs but not entities, and a
check in the component that detects such cases and skips the update step
as thought the batch were empty.
* Remove todo about data verification
Check for empty data was moved further up so this should be OK now - the
case in question shouldn't be possible.
* Fix gradient calculation
The model doesn't know which entities are not in the kb, so it generates
embeddings for the context of all of them.
However, the loss does know which entities aren't in the kb, and it
ignores them, as there's no sensible gradient.
This has the issue that the gradient will not be calculated for some of
the input embeddings, which causes a dimension mismatch in backprop.
That should have caused a clear error, but with numpyops it was causing
nans to happen, which is another problem that should be addressed
separately.
This commit changes the loss to give a zero gradient for entities not in
the kb.
* add failing test for v1 EL legacy architecture
* Add nasty but simple working check for legacy arch
* Clarify why init hack works the way it does
* Clarify use_gold_ents use case
* Fix use gold ents related handling
* Add tests for no gold ents and fix other tests
* Use aligned ents function (not working)
This doesn't actually work because the "aligned" ents are gold-only. But
if I have a different function that returns the intersection, *then*
this will work as desired.
* Use proper matching ent check
This changes the process when gold ents are not used so that the
intersection of ents in the pred and gold is used.
* Move get_matching_ents to Example
* Use model attribute to check for legacy arch
* Rename flag
* bump spacy-legacy to lower 3.0.9
Co-authored-by: svlandeg <svlandeg@github.com>
2022-03-04 08:17:36 +00:00
|
|
|
if selected_encodings.shape != entity_encodings.shape:
|
2020-07-22 11:42:59 +00:00
|
|
|
err = Errors.E147.format(
|
2020-07-30 21:30:54 +00:00
|
|
|
method="get_loss", msg="gold entities do not match up"
|
2020-07-22 11:42:59 +00:00
|
|
|
)
|
|
|
|
raise RuntimeError(err)
|
2022-05-31 22:41:28 +00:00
|
|
|
gradients = self.distance.get_grad(selected_encodings, entity_encodings)
|
Fix entity linker batching (#9669)
* Partial fix of entity linker batching
* Add import
* Better name
* Add `use_gold_ents` option, docs
* Change to v2, create stub v1, update docs etc.
* Fix error type
Honestly no idea what the right type to use here is.
ConfigValidationError seems wrong. Maybe a NotImplementedError?
* Make mypy happy
* Add hacky fix for init issue
* Add legacy pipeline entity linker
* Fix references to class name
* Add __init__.py for legacy
* Attempted fix for loss issue
* Remove placeholder V1
* formatting
* slightly more interesting train data
* Handle batches with no usable examples
This adds a test for batches that have docs but not entities, and a
check in the component that detects such cases and skips the update step
as thought the batch were empty.
* Remove todo about data verification
Check for empty data was moved further up so this should be OK now - the
case in question shouldn't be possible.
* Fix gradient calculation
The model doesn't know which entities are not in the kb, so it generates
embeddings for the context of all of them.
However, the loss does know which entities aren't in the kb, and it
ignores them, as there's no sensible gradient.
This has the issue that the gradient will not be calculated for some of
the input embeddings, which causes a dimension mismatch in backprop.
That should have caused a clear error, but with numpyops it was causing
nans to happen, which is another problem that should be addressed
separately.
This commit changes the loss to give a zero gradient for entities not in
the kb.
* add failing test for v1 EL legacy architecture
* Add nasty but simple working check for legacy arch
* Clarify why init hack works the way it does
* Clarify use_gold_ents use case
* Fix use gold ents related handling
* Add tests for no gold ents and fix other tests
* Use aligned ents function (not working)
This doesn't actually work because the "aligned" ents are gold-only. But
if I have a different function that returns the intersection, *then*
this will work as desired.
* Use proper matching ent check
This changes the process when gold ents are not used so that the
intersection of ents in the pred and gold is used.
* Move get_matching_ents to Example
* Use model attribute to check for legacy arch
* Rename flag
* bump spacy-legacy to lower 3.0.9
Co-authored-by: svlandeg <svlandeg@github.com>
2022-03-04 08:17:36 +00:00
|
|
|
# to match the input size, we need to give a zero gradient for items not in the kb
|
|
|
|
out = self.model.ops.alloc2f(*sentence_encodings.shape)
|
|
|
|
out[keep_ents] = gradients
|
|
|
|
|
2022-05-31 22:41:28 +00:00
|
|
|
loss = self.distance.get_loss(selected_encodings, entity_encodings)
|
2020-07-22 11:42:59 +00:00
|
|
|
loss = loss / len(entity_encodings)
|
Fix entity linker batching (#9669)
* Partial fix of entity linker batching
* Add import
* Better name
* Add `use_gold_ents` option, docs
* Change to v2, create stub v1, update docs etc.
* Fix error type
Honestly no idea what the right type to use here is.
ConfigValidationError seems wrong. Maybe a NotImplementedError?
* Make mypy happy
* Add hacky fix for init issue
* Add legacy pipeline entity linker
* Fix references to class name
* Add __init__.py for legacy
* Attempted fix for loss issue
* Remove placeholder V1
* formatting
* slightly more interesting train data
* Handle batches with no usable examples
This adds a test for batches that have docs but not entities, and a
check in the component that detects such cases and skips the update step
as thought the batch were empty.
* Remove todo about data verification
Check for empty data was moved further up so this should be OK now - the
case in question shouldn't be possible.
* Fix gradient calculation
The model doesn't know which entities are not in the kb, so it generates
embeddings for the context of all of them.
However, the loss does know which entities aren't in the kb, and it
ignores them, as there's no sensible gradient.
This has the issue that the gradient will not be calculated for some of
the input embeddings, which causes a dimension mismatch in backprop.
That should have caused a clear error, but with numpyops it was causing
nans to happen, which is another problem that should be addressed
separately.
This commit changes the loss to give a zero gradient for entities not in
the kb.
* add failing test for v1 EL legacy architecture
* Add nasty but simple working check for legacy arch
* Clarify why init hack works the way it does
* Clarify use_gold_ents use case
* Fix use gold ents related handling
* Add tests for no gold ents and fix other tests
* Use aligned ents function (not working)
This doesn't actually work because the "aligned" ents are gold-only. But
if I have a different function that returns the intersection, *then*
this will work as desired.
* Use proper matching ent check
This changes the process when gold ents are not used so that the
intersection of ents in the pred and gold is used.
* Move get_matching_ents to Example
* Use model attribute to check for legacy arch
* Rename flag
* bump spacy-legacy to lower 3.0.9
Co-authored-by: svlandeg <svlandeg@github.com>
2022-03-04 08:17:36 +00:00
|
|
|
return float(loss), out
|
2020-07-22 11:42:59 +00:00
|
|
|
|
2020-07-27 16:11:45 +00:00
|
|
|
def predict(self, docs: Iterable[Doc]) -> List[str]:
|
|
|
|
"""Apply the pipeline's model to a batch of docs, without modifying them.
|
|
|
|
Returns the KB IDs for each entity in each doc, including NIL if there is
|
|
|
|
no prediction.
|
|
|
|
|
|
|
|
docs (Iterable[Doc]): The documents to predict.
|
🏷 Add Mypy check to CI and ignore all existing Mypy errors (#9167)
* 🚨 Ignore all existing Mypy errors
* 🏗 Add Mypy check to CI
* Add types-mock and types-requests as dev requirements
* Add additional type ignore directives
* Add types packages to dev-only list in reqs test
* Add types-dataclasses for python 3.6
* Add ignore to pretrain
* 🏷 Improve type annotation on `run_command` helper
The `run_command` helper previously declared that it returned an
`Optional[subprocess.CompletedProcess]`, but it isn't actually possible
for the function to return `None`. These changes modify the type
annotation of the `run_command` helper and remove all now-unnecessary
`# type: ignore` directives.
* 🔧 Allow variable type redefinition in limited contexts
These changes modify how Mypy is configured to allow variables to have
their type automatically redefined under certain conditions. The Mypy
documentation contains the following example:
```python
def process(items: List[str]) -> None:
# 'items' has type List[str]
items = [item.split() for item in items]
# 'items' now has type List[List[str]]
...
```
This configuration change is especially helpful in reducing the number
of `# type: ignore` directives needed to handle the common pattern of:
* Accepting a filepath as a string
* Overwriting the variable using `filepath = ensure_path(filepath)`
These changes enable redefinition and remove all `# type: ignore`
directives rendered redundant by this change.
* 🏷 Add type annotation to converters mapping
* 🚨 Fix Mypy error in convert CLI argument verification
* 🏷 Improve type annotation on `resolve_dot_names` helper
* 🏷 Add type annotations for `Vocab` attributes `strings` and `vectors`
* 🏷 Add type annotations for more `Vocab` attributes
* 🏷 Add loose type annotation for gold data compilation
* 🏷 Improve `_format_labels` type annotation
* 🏷 Fix `get_lang_class` type annotation
* 🏷 Loosen return type of `Language.evaluate`
* 🏷 Don't accept `Scorer` in `handle_scores_per_type`
* 🏷 Add `string_to_list` overloads
* 🏷 Fix non-Optional command-line options
* 🙈 Ignore redefinition of `wandb_logger` in `loggers.py`
* ➕ Install `typing_extensions` in Python 3.8+
The `typing_extensions` package states that it should be used when
"writing code that must be compatible with multiple Python versions".
Since SpaCy needs to support multiple Python versions, it should be used
when newer `typing` module members are required. One example of this is
`Literal`, which is available starting with Python 3.8.
Previously SpaCy tried to import `Literal` from `typing`, falling back
to `typing_extensions` if the import failed. However, Mypy doesn't seem
to be able to understand what `Literal` means when the initial import
means. Therefore, these changes modify how `compat` imports `Literal` by
always importing it from `typing_extensions`.
These changes also modify how `typing_extensions` is installed, so that
it is a requirement for all Python versions, including those greater
than or equal to 3.8.
* 🏷 Improve type annotation for `Language.pipe`
These changes add a missing overload variant to the type signature of
`Language.pipe`. Additionally, the type signature is enhanced to allow
type checkers to differentiate between the two overload variants based
on the `as_tuple` parameter.
Fixes #8772
* ➖ Don't install `typing-extensions` in Python 3.8+
After more detailed analysis of how to implement Python version-specific
type annotations using SpaCy, it has been determined that by branching
on a comparison against `sys.version_info` can be statically analyzed by
Mypy well enough to enable us to conditionally use
`typing_extensions.Literal`. This means that we no longer need to
install `typing_extensions` for Python versions greater than or equal to
3.8! 🎉
These changes revert previous changes installing `typing-extensions`
regardless of Python version and modify how we import the `Literal` type
to ensure that Mypy treats it properly.
* resolve mypy errors for Strict pydantic types
* refactor code to avoid missing return statement
* fix types of convert CLI command
* avoid list-set confustion in debug_data
* fix typo and formatting
* small fixes to avoid type ignores
* fix types in profile CLI command and make it more efficient
* type fixes in projects CLI
* put one ignore back
* type fixes for render
* fix render types - the sequel
* fix BaseDefault in language definitions
* fix type of noun_chunks iterator - yields tuple instead of span
* fix types in language-specific modules
* 🏷 Expand accepted inputs of `get_string_id`
`get_string_id` accepts either a string (in which case it returns its
ID) or an ID (in which case it immediately returns the ID). These
changes extend the type annotation of `get_string_id` to indicate that
it can accept either strings or IDs.
* 🏷 Handle override types in `combine_score_weights`
The `combine_score_weights` function allows users to pass an `overrides`
mapping to override data extracted from the `weights` argument. Since it
allows `Optional` dictionary values, the return value may also include
`Optional` dictionary values.
These changes update the type annotations for `combine_score_weights` to
reflect this fact.
* 🏷 Fix tokenizer serialization method signatures in `DummyTokenizer`
* 🏷 Fix redefinition of `wandb_logger`
These changes fix the redefinition of `wandb_logger` by giving a
separate name to each `WandbLogger` version. For
backwards-compatibility, `spacy.train` still exports `wandb_logger_v3`
as `wandb_logger` for now.
* more fixes for typing in language
* type fixes in model definitions
* 🏷 Annotate `_RandomWords.probs` as `NDArray`
* 🏷 Annotate `tok2vec` layers to help Mypy
* 🐛 Fix `_RandomWords.probs` type annotations for Python 3.6
Also remove an import that I forgot to move to the top of the module 😅
* more fixes for matchers and other pipeline components
* quick fix for entity linker
* fixing types for spancat, textcat, etc
* bugfix for tok2vec
* type annotations for scorer
* add runtime_checkable for Protocol
* type and import fixes in tests
* mypy fixes for training utilities
* few fixes in util
* fix import
* 🐵 Remove unused `# type: ignore` directives
* 🏷 Annotate `Language._components`
* 🏷 Annotate `spacy.pipeline.Pipe`
* add doc as property to span.pyi
* small fixes and cleanup
* explicit type annotations instead of via comment
Co-authored-by: Adriane Boyd <adrianeboyd@gmail.com>
Co-authored-by: svlandeg <sofie.vanlandeghem@gmail.com>
Co-authored-by: svlandeg <svlandeg@github.com>
2021-10-14 13:21:40 +00:00
|
|
|
RETURNS (List[str]): The models prediction for each document.
|
2020-07-27 16:11:45 +00:00
|
|
|
|
2021-01-30 09:09:38 +00:00
|
|
|
DOCS: https://spacy.io/api/entitylinker#predict
|
2020-07-27 16:11:45 +00:00
|
|
|
"""
|
2020-10-07 12:56:00 +00:00
|
|
|
self.validate_kb()
|
2020-07-22 11:42:59 +00:00
|
|
|
entity_count = 0
|
🏷 Add Mypy check to CI and ignore all existing Mypy errors (#9167)
* 🚨 Ignore all existing Mypy errors
* 🏗 Add Mypy check to CI
* Add types-mock and types-requests as dev requirements
* Add additional type ignore directives
* Add types packages to dev-only list in reqs test
* Add types-dataclasses for python 3.6
* Add ignore to pretrain
* 🏷 Improve type annotation on `run_command` helper
The `run_command` helper previously declared that it returned an
`Optional[subprocess.CompletedProcess]`, but it isn't actually possible
for the function to return `None`. These changes modify the type
annotation of the `run_command` helper and remove all now-unnecessary
`# type: ignore` directives.
* 🔧 Allow variable type redefinition in limited contexts
These changes modify how Mypy is configured to allow variables to have
their type automatically redefined under certain conditions. The Mypy
documentation contains the following example:
```python
def process(items: List[str]) -> None:
# 'items' has type List[str]
items = [item.split() for item in items]
# 'items' now has type List[List[str]]
...
```
This configuration change is especially helpful in reducing the number
of `# type: ignore` directives needed to handle the common pattern of:
* Accepting a filepath as a string
* Overwriting the variable using `filepath = ensure_path(filepath)`
These changes enable redefinition and remove all `# type: ignore`
directives rendered redundant by this change.
* 🏷 Add type annotation to converters mapping
* 🚨 Fix Mypy error in convert CLI argument verification
* 🏷 Improve type annotation on `resolve_dot_names` helper
* 🏷 Add type annotations for `Vocab` attributes `strings` and `vectors`
* 🏷 Add type annotations for more `Vocab` attributes
* 🏷 Add loose type annotation for gold data compilation
* 🏷 Improve `_format_labels` type annotation
* 🏷 Fix `get_lang_class` type annotation
* 🏷 Loosen return type of `Language.evaluate`
* 🏷 Don't accept `Scorer` in `handle_scores_per_type`
* 🏷 Add `string_to_list` overloads
* 🏷 Fix non-Optional command-line options
* 🙈 Ignore redefinition of `wandb_logger` in `loggers.py`
* ➕ Install `typing_extensions` in Python 3.8+
The `typing_extensions` package states that it should be used when
"writing code that must be compatible with multiple Python versions".
Since SpaCy needs to support multiple Python versions, it should be used
when newer `typing` module members are required. One example of this is
`Literal`, which is available starting with Python 3.8.
Previously SpaCy tried to import `Literal` from `typing`, falling back
to `typing_extensions` if the import failed. However, Mypy doesn't seem
to be able to understand what `Literal` means when the initial import
means. Therefore, these changes modify how `compat` imports `Literal` by
always importing it from `typing_extensions`.
These changes also modify how `typing_extensions` is installed, so that
it is a requirement for all Python versions, including those greater
than or equal to 3.8.
* 🏷 Improve type annotation for `Language.pipe`
These changes add a missing overload variant to the type signature of
`Language.pipe`. Additionally, the type signature is enhanced to allow
type checkers to differentiate between the two overload variants based
on the `as_tuple` parameter.
Fixes #8772
* ➖ Don't install `typing-extensions` in Python 3.8+
After more detailed analysis of how to implement Python version-specific
type annotations using SpaCy, it has been determined that by branching
on a comparison against `sys.version_info` can be statically analyzed by
Mypy well enough to enable us to conditionally use
`typing_extensions.Literal`. This means that we no longer need to
install `typing_extensions` for Python versions greater than or equal to
3.8! 🎉
These changes revert previous changes installing `typing-extensions`
regardless of Python version and modify how we import the `Literal` type
to ensure that Mypy treats it properly.
* resolve mypy errors for Strict pydantic types
* refactor code to avoid missing return statement
* fix types of convert CLI command
* avoid list-set confustion in debug_data
* fix typo and formatting
* small fixes to avoid type ignores
* fix types in profile CLI command and make it more efficient
* type fixes in projects CLI
* put one ignore back
* type fixes for render
* fix render types - the sequel
* fix BaseDefault in language definitions
* fix type of noun_chunks iterator - yields tuple instead of span
* fix types in language-specific modules
* 🏷 Expand accepted inputs of `get_string_id`
`get_string_id` accepts either a string (in which case it returns its
ID) or an ID (in which case it immediately returns the ID). These
changes extend the type annotation of `get_string_id` to indicate that
it can accept either strings or IDs.
* 🏷 Handle override types in `combine_score_weights`
The `combine_score_weights` function allows users to pass an `overrides`
mapping to override data extracted from the `weights` argument. Since it
allows `Optional` dictionary values, the return value may also include
`Optional` dictionary values.
These changes update the type annotations for `combine_score_weights` to
reflect this fact.
* 🏷 Fix tokenizer serialization method signatures in `DummyTokenizer`
* 🏷 Fix redefinition of `wandb_logger`
These changes fix the redefinition of `wandb_logger` by giving a
separate name to each `WandbLogger` version. For
backwards-compatibility, `spacy.train` still exports `wandb_logger_v3`
as `wandb_logger` for now.
* more fixes for typing in language
* type fixes in model definitions
* 🏷 Annotate `_RandomWords.probs` as `NDArray`
* 🏷 Annotate `tok2vec` layers to help Mypy
* 🐛 Fix `_RandomWords.probs` type annotations for Python 3.6
Also remove an import that I forgot to move to the top of the module 😅
* more fixes for matchers and other pipeline components
* quick fix for entity linker
* fixing types for spancat, textcat, etc
* bugfix for tok2vec
* type annotations for scorer
* add runtime_checkable for Protocol
* type and import fixes in tests
* mypy fixes for training utilities
* few fixes in util
* fix import
* 🐵 Remove unused `# type: ignore` directives
* 🏷 Annotate `Language._components`
* 🏷 Annotate `spacy.pipeline.Pipe`
* add doc as property to span.pyi
* small fixes and cleanup
* explicit type annotations instead of via comment
Co-authored-by: Adriane Boyd <adrianeboyd@gmail.com>
Co-authored-by: svlandeg <sofie.vanlandeghem@gmail.com>
Co-authored-by: svlandeg <svlandeg@github.com>
2021-10-14 13:21:40 +00:00
|
|
|
final_kb_ids: List[str] = []
|
2022-05-31 22:41:28 +00:00
|
|
|
xp = self.model.ops.xp
|
2020-07-22 11:42:59 +00:00
|
|
|
if not docs:
|
|
|
|
return final_kb_ids
|
|
|
|
if isinstance(docs, Doc):
|
|
|
|
docs = [docs]
|
|
|
|
for i, doc in enumerate(docs):
|
2022-05-31 22:41:28 +00:00
|
|
|
if len(doc) == 0:
|
|
|
|
continue
|
2020-07-22 11:42:59 +00:00
|
|
|
sentences = [s for s in doc.sents]
|
2022-09-08 08:38:07 +00:00
|
|
|
|
|
|
|
# Loop over entities in batches.
|
|
|
|
for ent_idx in range(0, len(doc.ents), self.candidates_batch_size):
|
|
|
|
ent_batch = doc.ents[ent_idx : ent_idx + self.candidates_batch_size]
|
|
|
|
|
|
|
|
# Look up candidate entities.
|
|
|
|
valid_ent_idx = [
|
|
|
|
idx
|
|
|
|
for idx in range(len(ent_batch))
|
|
|
|
if ent_batch[idx].label_ not in self.labels_discard
|
|
|
|
]
|
|
|
|
|
|
|
|
batch_candidates = list(
|
|
|
|
self.get_candidates_batch(
|
|
|
|
self.kb, [ent_batch[idx] for idx in valid_ent_idx]
|
|
|
|
)
|
|
|
|
if self.candidates_batch_size > 1
|
|
|
|
else [
|
|
|
|
self.get_candidates(self.kb, ent_batch[idx])
|
|
|
|
for idx in valid_ent_idx
|
|
|
|
]
|
|
|
|
)
|
|
|
|
|
|
|
|
# Looping through each entity in batch (TODO: rewrite)
|
|
|
|
for j, ent in enumerate(ent_batch):
|
2023-03-14 21:02:49 +00:00
|
|
|
assert hasattr(ent, "sents")
|
|
|
|
sents = list(ent.sents)
|
|
|
|
sent_indices = (
|
|
|
|
sentences.index(sents[0]),
|
|
|
|
sentences.index(sents[-1]),
|
|
|
|
)
|
|
|
|
assert sent_indices[1] >= sent_indices[0] >= 0
|
2022-09-08 08:38:07 +00:00
|
|
|
|
|
|
|
if self.incl_context:
|
|
|
|
# get n_neighbour sentences, clipped to the length of the document
|
2023-03-14 21:02:49 +00:00
|
|
|
start_sentence = max(0, sent_indices[0] - self.n_sents)
|
2022-09-08 08:38:07 +00:00
|
|
|
end_sentence = min(
|
2023-03-14 21:02:49 +00:00
|
|
|
len(sentences) - 1, sent_indices[1] + self.n_sents
|
2022-09-08 08:38:07 +00:00
|
|
|
)
|
|
|
|
start_token = sentences[start_sentence].start
|
|
|
|
end_token = sentences[end_sentence].end
|
|
|
|
sent_doc = doc[start_token:end_token].as_doc()
|
2023-03-14 21:02:49 +00:00
|
|
|
|
2022-09-08 08:38:07 +00:00
|
|
|
# currently, the context is the same for each entity in a sentence (should be refined)
|
|
|
|
sentence_encoding = self.model.predict([sent_doc])[0]
|
|
|
|
sentence_encoding_t = sentence_encoding.T
|
|
|
|
sentence_norm = xp.linalg.norm(sentence_encoding_t)
|
|
|
|
entity_count += 1
|
|
|
|
if ent.label_ in self.labels_discard:
|
|
|
|
# ignoring this entity - setting to NIL
|
2021-04-12 08:08:01 +00:00
|
|
|
final_kb_ids.append(self.NIL)
|
|
|
|
else:
|
2022-09-08 08:38:07 +00:00
|
|
|
candidates = list(batch_candidates[j])
|
|
|
|
if not candidates:
|
|
|
|
# no prediction possible for this entity - setting to NIL
|
|
|
|
final_kb_ids.append(self.NIL)
|
|
|
|
elif len(candidates) == 1 and self.threshold is None:
|
|
|
|
# shortcut for efficiency reasons: take the 1 candidate
|
|
|
|
final_kb_ids.append(candidates[0].entity_)
|
|
|
|
else:
|
|
|
|
random.shuffle(candidates)
|
|
|
|
# set all prior probabilities to 0 if incl_prior=False
|
|
|
|
prior_probs = xp.asarray([c.prior_prob for c in candidates])
|
|
|
|
if not self.incl_prior:
|
|
|
|
prior_probs = xp.asarray([0.0 for _ in candidates])
|
|
|
|
scores = prior_probs
|
|
|
|
# add in similarity from the context
|
|
|
|
if self.incl_context:
|
|
|
|
entity_encodings = xp.asarray(
|
|
|
|
[c.entity_vector for c in candidates]
|
|
|
|
)
|
|
|
|
entity_norm = xp.linalg.norm(entity_encodings, axis=1)
|
|
|
|
if len(entity_encodings) != len(prior_probs):
|
|
|
|
raise RuntimeError(
|
|
|
|
Errors.E147.format(
|
|
|
|
method="predict",
|
|
|
|
msg="vectors not of equal length",
|
|
|
|
)
|
2021-04-12 08:08:01 +00:00
|
|
|
)
|
2022-09-08 08:38:07 +00:00
|
|
|
# cosine similarity
|
|
|
|
sims = xp.dot(entity_encodings, sentence_encoding_t) / (
|
|
|
|
sentence_norm * entity_norm
|
2021-06-28 09:48:00 +00:00
|
|
|
)
|
2022-09-08 08:38:07 +00:00
|
|
|
if sims.shape != prior_probs.shape:
|
|
|
|
raise ValueError(Errors.E161)
|
|
|
|
scores = prior_probs + sims - (prior_probs * sims)
|
|
|
|
final_kb_ids.append(
|
|
|
|
candidates[scores.argmax().item()].entity_
|
|
|
|
if self.threshold is None
|
|
|
|
or scores.max() >= self.threshold
|
|
|
|
else EntityLinker.NIL
|
2022-05-31 22:41:28 +00:00
|
|
|
)
|
2022-09-08 08:38:07 +00:00
|
|
|
|
2020-07-22 11:42:59 +00:00
|
|
|
if not (len(final_kb_ids) == entity_count):
|
|
|
|
err = Errors.E147.format(
|
|
|
|
method="predict", msg="result variables not of equal length"
|
|
|
|
)
|
|
|
|
raise RuntimeError(err)
|
|
|
|
return final_kb_ids
|
|
|
|
|
2020-07-27 16:11:45 +00:00
|
|
|
def set_annotations(self, docs: Iterable[Doc], kb_ids: List[str]) -> None:
|
|
|
|
"""Modify a batch of documents, using pre-computed scores.
|
|
|
|
|
|
|
|
docs (Iterable[Doc]): The documents to modify.
|
|
|
|
kb_ids (List[str]): The IDs to set, produced by EntityLinker.predict.
|
|
|
|
|
2021-01-30 09:09:38 +00:00
|
|
|
DOCS: https://spacy.io/api/entitylinker#set_annotations
|
2020-07-27 16:11:45 +00:00
|
|
|
"""
|
2020-07-22 11:42:59 +00:00
|
|
|
count_ents = len([ent for doc in docs for ent in doc.ents])
|
|
|
|
if count_ents != len(kb_ids):
|
|
|
|
raise ValueError(Errors.E148.format(ents=count_ents, ids=len(kb_ids)))
|
|
|
|
i = 0
|
Add overwrite settings for more components (#9050)
* Add overwrite settings for more components
For pipeline components where it's relevant and not already implemented,
add an explicit `overwrite` setting that controls whether
`set_annotations` overwrites existing annotation.
For the `morphologizer`, add an additional setting `extend`, which
controls whether the existing features are preserved.
* +overwrite, +extend: overwrite values of existing features, add any new
features
* +overwrite, -extend: overwrite completely, removing any existing
features
* -overwrite, +extend: keep values of existing features, add any new
features
* -overwrite, -extend: do not modify the existing value if set
In all cases an unset value will be set by `set_annotations`.
Preserve current overwrite defaults:
* True: morphologizer, entity linker
* False: tagger, sentencizer, senter
* Add backwards compat overwrite settings
* Put empty line back
Removed by accident in last commit
* Set backwards-compatible defaults in __init__
Because the `TrainablePipe` serialization methods update `cfg`, there's
no straightforward way to detect whether models serialized with a
previous version are missing the overwrite settings.
It would be possible in the sentencizer due to its separate
serialization methods, however to keep the changes parallel, this also
sets the default in `__init__`.
* Remove traces
Co-authored-by: Paul O'Leary McCann <polm@dampfkraft.com>
2021-09-30 13:35:55 +00:00
|
|
|
overwrite = self.cfg["overwrite"]
|
2020-07-22 11:42:59 +00:00
|
|
|
for doc in docs:
|
|
|
|
for ent in doc.ents:
|
|
|
|
kb_id = kb_ids[i]
|
|
|
|
i += 1
|
|
|
|
for token in ent:
|
Add overwrite settings for more components (#9050)
* Add overwrite settings for more components
For pipeline components where it's relevant and not already implemented,
add an explicit `overwrite` setting that controls whether
`set_annotations` overwrites existing annotation.
For the `morphologizer`, add an additional setting `extend`, which
controls whether the existing features are preserved.
* +overwrite, +extend: overwrite values of existing features, add any new
features
* +overwrite, -extend: overwrite completely, removing any existing
features
* -overwrite, +extend: keep values of existing features, add any new
features
* -overwrite, -extend: do not modify the existing value if set
In all cases an unset value will be set by `set_annotations`.
Preserve current overwrite defaults:
* True: morphologizer, entity linker
* False: tagger, sentencizer, senter
* Add backwards compat overwrite settings
* Put empty line back
Removed by accident in last commit
* Set backwards-compatible defaults in __init__
Because the `TrainablePipe` serialization methods update `cfg`, there's
no straightforward way to detect whether models serialized with a
previous version are missing the overwrite settings.
It would be possible in the sentencizer due to its separate
serialization methods, however to keep the changes parallel, this also
sets the default in `__init__`.
* Remove traces
Co-authored-by: Paul O'Leary McCann <polm@dampfkraft.com>
2021-09-30 13:35:55 +00:00
|
|
|
if token.ent_kb_id == 0 or overwrite:
|
|
|
|
token.ent_kb_id_ = kb_id
|
2020-07-22 11:42:59 +00:00
|
|
|
|
2021-05-20 08:11:30 +00:00
|
|
|
def to_bytes(self, *, exclude=tuple()):
|
|
|
|
"""Serialize the pipe to a bytestring.
|
|
|
|
|
|
|
|
exclude (Iterable[str]): String names of serialization fields to exclude.
|
|
|
|
RETURNS (bytes): The serialized object.
|
|
|
|
|
|
|
|
DOCS: https://spacy.io/api/entitylinker#to_bytes
|
|
|
|
"""
|
|
|
|
self._validate_serialization_attrs()
|
|
|
|
serialize = {}
|
|
|
|
if hasattr(self, "cfg") and self.cfg is not None:
|
|
|
|
serialize["cfg"] = lambda: srsly.json_dumps(self.cfg)
|
2021-08-03 12:42:44 +00:00
|
|
|
serialize["vocab"] = lambda: self.vocab.to_bytes(exclude=exclude)
|
2021-05-20 08:11:30 +00:00
|
|
|
serialize["kb"] = self.kb.to_bytes
|
|
|
|
serialize["model"] = self.model.to_bytes
|
|
|
|
return util.to_bytes(serialize, exclude)
|
|
|
|
|
|
|
|
def from_bytes(self, bytes_data, *, exclude=tuple()):
|
|
|
|
"""Load the pipe from a bytestring.
|
|
|
|
|
|
|
|
exclude (Iterable[str]): String names of serialization fields to exclude.
|
|
|
|
RETURNS (TrainablePipe): The loaded object.
|
|
|
|
|
|
|
|
DOCS: https://spacy.io/api/entitylinker#from_bytes
|
|
|
|
"""
|
|
|
|
self._validate_serialization_attrs()
|
|
|
|
|
|
|
|
def load_model(b):
|
|
|
|
try:
|
|
|
|
self.model.from_bytes(b)
|
|
|
|
except AttributeError:
|
|
|
|
raise ValueError(Errors.E149) from None
|
|
|
|
|
|
|
|
deserialize = {}
|
|
|
|
if hasattr(self, "cfg") and self.cfg is not None:
|
|
|
|
deserialize["cfg"] = lambda b: self.cfg.update(srsly.json_loads(b))
|
2021-08-03 12:42:44 +00:00
|
|
|
deserialize["vocab"] = lambda b: self.vocab.from_bytes(b, exclude=exclude)
|
2021-05-20 08:11:30 +00:00
|
|
|
deserialize["kb"] = lambda b: self.kb.from_bytes(b)
|
|
|
|
deserialize["model"] = load_model
|
|
|
|
util.from_bytes(bytes_data, deserialize, exclude)
|
|
|
|
return self
|
|
|
|
|
2020-07-29 13:14:07 +00:00
|
|
|
def to_disk(
|
2020-09-08 20:44:25 +00:00
|
|
|
self, path: Union[str, Path], *, exclude: Iterable[str] = SimpleFrozenList()
|
2020-07-29 13:14:07 +00:00
|
|
|
) -> None:
|
2020-07-27 16:11:45 +00:00
|
|
|
"""Serialize the pipe to disk.
|
|
|
|
|
|
|
|
path (str / Path): Path to a directory.
|
|
|
|
exclude (Iterable[str]): String names of serialization fields to exclude.
|
|
|
|
|
2021-01-30 09:09:38 +00:00
|
|
|
DOCS: https://spacy.io/api/entitylinker#to_disk
|
2020-07-27 16:11:45 +00:00
|
|
|
"""
|
2020-07-22 11:42:59 +00:00
|
|
|
serialize = {}
|
2021-08-03 12:42:44 +00:00
|
|
|
serialize["vocab"] = lambda p: self.vocab.to_disk(p, exclude=exclude)
|
2020-07-22 11:42:59 +00:00
|
|
|
serialize["cfg"] = lambda p: srsly.write_json(p, self.cfg)
|
2020-08-18 14:10:36 +00:00
|
|
|
serialize["kb"] = lambda p: self.kb.to_disk(p)
|
2020-07-22 11:42:59 +00:00
|
|
|
serialize["model"] = lambda p: self.model.to_disk(p)
|
|
|
|
util.to_disk(path, serialize, exclude)
|
|
|
|
|
|
|
|
def from_disk(
|
2020-09-08 20:44:25 +00:00
|
|
|
self, path: Union[str, Path], *, exclude: Iterable[str] = SimpleFrozenList()
|
2020-07-22 11:42:59 +00:00
|
|
|
) -> "EntityLinker":
|
2020-07-27 16:11:45 +00:00
|
|
|
"""Load the pipe from disk. Modifies the object in place and returns it.
|
|
|
|
|
|
|
|
path (str / Path): Path to a directory.
|
|
|
|
exclude (Iterable[str]): String names of serialization fields to exclude.
|
|
|
|
RETURNS (EntityLinker): The modified EntityLinker object.
|
|
|
|
|
2021-01-30 09:09:38 +00:00
|
|
|
DOCS: https://spacy.io/api/entitylinker#from_disk
|
2020-07-27 16:11:45 +00:00
|
|
|
"""
|
|
|
|
|
2020-07-22 11:42:59 +00:00
|
|
|
def load_model(p):
|
|
|
|
try:
|
2021-05-31 09:04:29 +00:00
|
|
|
with p.open("rb") as infile:
|
|
|
|
self.model.from_bytes(infile.read())
|
2020-07-22 11:42:59 +00:00
|
|
|
except AttributeError:
|
2020-08-05 21:53:21 +00:00
|
|
|
raise ValueError(Errors.E149) from None
|
2020-07-22 11:42:59 +00:00
|
|
|
|
🏷 Add Mypy check to CI and ignore all existing Mypy errors (#9167)
* 🚨 Ignore all existing Mypy errors
* 🏗 Add Mypy check to CI
* Add types-mock and types-requests as dev requirements
* Add additional type ignore directives
* Add types packages to dev-only list in reqs test
* Add types-dataclasses for python 3.6
* Add ignore to pretrain
* 🏷 Improve type annotation on `run_command` helper
The `run_command` helper previously declared that it returned an
`Optional[subprocess.CompletedProcess]`, but it isn't actually possible
for the function to return `None`. These changes modify the type
annotation of the `run_command` helper and remove all now-unnecessary
`# type: ignore` directives.
* 🔧 Allow variable type redefinition in limited contexts
These changes modify how Mypy is configured to allow variables to have
their type automatically redefined under certain conditions. The Mypy
documentation contains the following example:
```python
def process(items: List[str]) -> None:
# 'items' has type List[str]
items = [item.split() for item in items]
# 'items' now has type List[List[str]]
...
```
This configuration change is especially helpful in reducing the number
of `# type: ignore` directives needed to handle the common pattern of:
* Accepting a filepath as a string
* Overwriting the variable using `filepath = ensure_path(filepath)`
These changes enable redefinition and remove all `# type: ignore`
directives rendered redundant by this change.
* 🏷 Add type annotation to converters mapping
* 🚨 Fix Mypy error in convert CLI argument verification
* 🏷 Improve type annotation on `resolve_dot_names` helper
* 🏷 Add type annotations for `Vocab` attributes `strings` and `vectors`
* 🏷 Add type annotations for more `Vocab` attributes
* 🏷 Add loose type annotation for gold data compilation
* 🏷 Improve `_format_labels` type annotation
* 🏷 Fix `get_lang_class` type annotation
* 🏷 Loosen return type of `Language.evaluate`
* 🏷 Don't accept `Scorer` in `handle_scores_per_type`
* 🏷 Add `string_to_list` overloads
* 🏷 Fix non-Optional command-line options
* 🙈 Ignore redefinition of `wandb_logger` in `loggers.py`
* ➕ Install `typing_extensions` in Python 3.8+
The `typing_extensions` package states that it should be used when
"writing code that must be compatible with multiple Python versions".
Since SpaCy needs to support multiple Python versions, it should be used
when newer `typing` module members are required. One example of this is
`Literal`, which is available starting with Python 3.8.
Previously SpaCy tried to import `Literal` from `typing`, falling back
to `typing_extensions` if the import failed. However, Mypy doesn't seem
to be able to understand what `Literal` means when the initial import
means. Therefore, these changes modify how `compat` imports `Literal` by
always importing it from `typing_extensions`.
These changes also modify how `typing_extensions` is installed, so that
it is a requirement for all Python versions, including those greater
than or equal to 3.8.
* 🏷 Improve type annotation for `Language.pipe`
These changes add a missing overload variant to the type signature of
`Language.pipe`. Additionally, the type signature is enhanced to allow
type checkers to differentiate between the two overload variants based
on the `as_tuple` parameter.
Fixes #8772
* ➖ Don't install `typing-extensions` in Python 3.8+
After more detailed analysis of how to implement Python version-specific
type annotations using SpaCy, it has been determined that by branching
on a comparison against `sys.version_info` can be statically analyzed by
Mypy well enough to enable us to conditionally use
`typing_extensions.Literal`. This means that we no longer need to
install `typing_extensions` for Python versions greater than or equal to
3.8! 🎉
These changes revert previous changes installing `typing-extensions`
regardless of Python version and modify how we import the `Literal` type
to ensure that Mypy treats it properly.
* resolve mypy errors for Strict pydantic types
* refactor code to avoid missing return statement
* fix types of convert CLI command
* avoid list-set confustion in debug_data
* fix typo and formatting
* small fixes to avoid type ignores
* fix types in profile CLI command and make it more efficient
* type fixes in projects CLI
* put one ignore back
* type fixes for render
* fix render types - the sequel
* fix BaseDefault in language definitions
* fix type of noun_chunks iterator - yields tuple instead of span
* fix types in language-specific modules
* 🏷 Expand accepted inputs of `get_string_id`
`get_string_id` accepts either a string (in which case it returns its
ID) or an ID (in which case it immediately returns the ID). These
changes extend the type annotation of `get_string_id` to indicate that
it can accept either strings or IDs.
* 🏷 Handle override types in `combine_score_weights`
The `combine_score_weights` function allows users to pass an `overrides`
mapping to override data extracted from the `weights` argument. Since it
allows `Optional` dictionary values, the return value may also include
`Optional` dictionary values.
These changes update the type annotations for `combine_score_weights` to
reflect this fact.
* 🏷 Fix tokenizer serialization method signatures in `DummyTokenizer`
* 🏷 Fix redefinition of `wandb_logger`
These changes fix the redefinition of `wandb_logger` by giving a
separate name to each `WandbLogger` version. For
backwards-compatibility, `spacy.train` still exports `wandb_logger_v3`
as `wandb_logger` for now.
* more fixes for typing in language
* type fixes in model definitions
* 🏷 Annotate `_RandomWords.probs` as `NDArray`
* 🏷 Annotate `tok2vec` layers to help Mypy
* 🐛 Fix `_RandomWords.probs` type annotations for Python 3.6
Also remove an import that I forgot to move to the top of the module 😅
* more fixes for matchers and other pipeline components
* quick fix for entity linker
* fixing types for spancat, textcat, etc
* bugfix for tok2vec
* type annotations for scorer
* add runtime_checkable for Protocol
* type and import fixes in tests
* mypy fixes for training utilities
* few fixes in util
* fix import
* 🐵 Remove unused `# type: ignore` directives
* 🏷 Annotate `Language._components`
* 🏷 Annotate `spacy.pipeline.Pipe`
* add doc as property to span.pyi
* small fixes and cleanup
* explicit type annotations instead of via comment
Co-authored-by: Adriane Boyd <adrianeboyd@gmail.com>
Co-authored-by: svlandeg <sofie.vanlandeghem@gmail.com>
Co-authored-by: svlandeg <svlandeg@github.com>
2021-10-14 13:21:40 +00:00
|
|
|
deserialize: Dict[str, Callable[[Any], Any]] = {}
|
2020-07-22 11:42:59 +00:00
|
|
|
deserialize["cfg"] = lambda p: self.cfg.update(deserialize_config(p))
|
2021-08-03 12:42:44 +00:00
|
|
|
deserialize["vocab"] = lambda p: self.vocab.from_disk(p, exclude=exclude)
|
2020-08-18 14:10:36 +00:00
|
|
|
deserialize["kb"] = lambda p: self.kb.from_disk(p)
|
2020-07-22 11:42:59 +00:00
|
|
|
deserialize["model"] = load_model
|
|
|
|
util.from_disk(path, deserialize, exclude)
|
|
|
|
return self
|
|
|
|
|
2020-07-27 16:11:45 +00:00
|
|
|
def rehearse(self, examples, *, sgd=None, losses=None, **config):
|
2020-07-22 11:42:59 +00:00
|
|
|
raise NotImplementedError
|
|
|
|
|
|
|
|
def add_label(self, label):
|
|
|
|
raise NotImplementedError
|