2017-10-09 01:42:35 +00:00
|
|
|
import pytest
|
2020-07-06 11:06:25 +00:00
|
|
|
from thinc.api import Adam, fix_random_seed
|
2020-07-22 11:42:59 +00:00
|
|
|
from spacy import registry
|
2021-01-27 01:52:29 +00:00
|
|
|
from spacy.language import Language
|
2018-07-24 21:38:44 +00:00
|
|
|
from spacy.attrs import NORM
|
|
|
|
from spacy.vocab import Vocab
|
2020-09-09 08:31:03 +00:00
|
|
|
from spacy.training import Example
|
2018-07-24 21:38:44 +00:00
|
|
|
from spacy.tokens import Doc
|
2019-03-23 11:35:29 +00:00
|
|
|
from spacy.pipeline import DependencyParser, EntityRecognizer
|
2020-07-22 11:42:59 +00:00
|
|
|
from spacy.pipeline.ner import DEFAULT_NER_MODEL
|
|
|
|
from spacy.pipeline.dep_parser import DEFAULT_PARSER_MODEL
|
2017-10-09 01:42:35 +00:00
|
|
|
|
|
|
|
|
|
|
|
@pytest.fixture
|
|
|
|
def vocab():
|
|
|
|
return Vocab(lex_attr_getters={NORM: lambda s: s})
|
|
|
|
|
|
|
|
|
|
|
|
@pytest.fixture
|
|
|
|
def parser(vocab):
|
2020-07-25 13:01:15 +00:00
|
|
|
cfg = {"model": DEFAULT_PARSER_MODEL}
|
2020-09-27 20:21:31 +00:00
|
|
|
model = registry.resolve(cfg, validate=True)["model"]
|
2021-06-17 07:33:00 +00:00
|
|
|
parser = DependencyParser(vocab, model)
|
2019-03-23 11:35:29 +00:00
|
|
|
return parser
|
|
|
|
|
|
|
|
|
|
|
|
def test_init_parser(parser):
|
|
|
|
pass
|
|
|
|
|
|
|
|
|
|
|
|
def _train_parser(parser):
|
|
|
|
fix_random_seed(1)
|
2018-11-27 00:09:36 +00:00
|
|
|
parser.add_label("left")
|
2020-09-29 10:05:38 +00:00
|
|
|
parser.initialize(lambda: [_parser_example(parser)])
|
2020-05-15 11:25:00 +00:00
|
|
|
sgd = Adam(0.001)
|
2017-10-09 01:42:35 +00:00
|
|
|
|
2019-03-23 12:46:25 +00:00
|
|
|
for i in range(5):
|
2017-10-09 01:42:35 +00:00
|
|
|
losses = {}
|
2019-03-23 11:35:29 +00:00
|
|
|
doc = Doc(parser.vocab, words=["a", "b", "c", "d"])
|
2020-06-26 17:34:12 +00:00
|
|
|
gold = {"heads": [1, 1, 3, 3], "deps": ["left", "ROOT", "left", "ROOT"]}
|
|
|
|
example = Example.from_dict(doc, gold)
|
|
|
|
parser.update([example], sgd=sgd, losses=losses)
|
2017-10-09 01:42:35 +00:00
|
|
|
return parser
|
|
|
|
|
2018-07-24 21:38:44 +00:00
|
|
|
|
2020-09-08 20:44:25 +00:00
|
|
|
def _parser_example(parser):
|
|
|
|
doc = Doc(parser.vocab, words=["a", "b", "c", "d"])
|
|
|
|
gold = {"heads": [1, 1, 3, 3], "deps": ["right", "ROOT", "left", "ROOT"]}
|
|
|
|
return Example.from_dict(doc, gold)
|
|
|
|
|
|
|
|
|
|
|
|
def _ner_example(ner):
|
2020-09-13 08:55:36 +00:00
|
|
|
doc = Doc(
|
|
|
|
ner.vocab,
|
|
|
|
words=["Joe", "loves", "visiting", "London", "during", "the", "weekend"],
|
|
|
|
)
|
2020-09-08 20:44:25 +00:00
|
|
|
gold = {"entities": [(0, 3, "PERSON"), (19, 25, "LOC")]}
|
|
|
|
return Example.from_dict(doc, gold)
|
|
|
|
|
|
|
|
|
2017-10-09 01:42:35 +00:00
|
|
|
def test_add_label(parser):
|
2019-03-23 11:35:29 +00:00
|
|
|
parser = _train_parser(parser)
|
2018-11-27 00:09:36 +00:00
|
|
|
parser.add_label("right")
|
2020-05-15 11:25:00 +00:00
|
|
|
sgd = Adam(0.001)
|
2020-01-29 16:06:46 +00:00
|
|
|
for i in range(100):
|
2017-10-09 01:42:35 +00:00
|
|
|
losses = {}
|
2020-09-08 20:44:25 +00:00
|
|
|
parser.update([_parser_example(parser)], sgd=sgd, losses=losses)
|
2018-11-27 00:09:36 +00:00
|
|
|
doc = Doc(parser.vocab, words=["a", "b", "c", "d"])
|
2017-10-09 01:42:35 +00:00
|
|
|
doc = parser(doc)
|
2018-11-27 00:09:36 +00:00
|
|
|
assert doc[0].dep_ == "right"
|
|
|
|
assert doc[2].dep_ == "left"
|
2019-03-23 11:35:29 +00:00
|
|
|
|
|
|
|
|
|
|
|
def test_add_label_deserializes_correctly():
|
2020-07-25 13:01:15 +00:00
|
|
|
cfg = {"model": DEFAULT_NER_MODEL}
|
2020-09-27 20:21:31 +00:00
|
|
|
model = registry.resolve(cfg, validate=True)["model"]
|
2021-06-17 07:33:00 +00:00
|
|
|
ner1 = EntityRecognizer(Vocab(), model)
|
2019-03-23 11:35:29 +00:00
|
|
|
ner1.add_label("C")
|
|
|
|
ner1.add_label("B")
|
|
|
|
ner1.add_label("A")
|
2020-09-28 19:35:09 +00:00
|
|
|
ner1.initialize(lambda: [_ner_example(ner1)])
|
2021-06-17 07:33:00 +00:00
|
|
|
ner2 = EntityRecognizer(Vocab(), model)
|
2020-02-27 17:42:27 +00:00
|
|
|
|
|
|
|
# the second model needs to be resized before we can call from_bytes
|
2020-05-18 20:23:33 +00:00
|
|
|
ner2.model.attrs["resize_output"](ner2.model, ner1.moves.n_moves)
|
2020-02-27 17:42:27 +00:00
|
|
|
ner2.from_bytes(ner1.to_bytes())
|
2019-03-23 11:35:29 +00:00
|
|
|
assert ner1.moves.n_moves == ner2.moves.n_moves
|
|
|
|
for i in range(ner1.moves.n_moves):
|
|
|
|
assert ner1.moves.get_class_name(i) == ner2.moves.get_class_name(i)
|
2019-09-11 16:29:35 +00:00
|
|
|
|
|
|
|
|
|
|
|
@pytest.mark.parametrize(
|
2020-07-22 11:42:59 +00:00
|
|
|
"pipe_cls,n_moves,model_config",
|
2020-07-25 13:01:15 +00:00
|
|
|
[
|
|
|
|
(DependencyParser, 5, DEFAULT_PARSER_MODEL),
|
|
|
|
(EntityRecognizer, 4, DEFAULT_NER_MODEL),
|
|
|
|
],
|
2019-09-11 16:29:35 +00:00
|
|
|
)
|
2020-07-22 11:42:59 +00:00
|
|
|
def test_add_label_get_label(pipe_cls, n_moves, model_config):
|
2019-09-11 16:29:35 +00:00
|
|
|
"""Test that added labels are returned correctly. This test was added to
|
|
|
|
test for a bug in DependencyParser.labels that'd cause it to fail when
|
|
|
|
splitting the move names.
|
|
|
|
"""
|
|
|
|
labels = ["A", "B", "C"]
|
2020-09-27 20:21:31 +00:00
|
|
|
model = registry.resolve({"model": model_config}, validate=True)["model"]
|
2021-06-17 07:33:00 +00:00
|
|
|
pipe = pipe_cls(Vocab(), model)
|
2019-09-11 16:29:35 +00:00
|
|
|
for label in labels:
|
|
|
|
pipe.add_label(label)
|
|
|
|
assert len(pipe.move_names) == len(labels) * n_moves
|
|
|
|
pipe_labels = sorted(list(pipe.labels))
|
|
|
|
assert pipe_labels == labels
|
2021-01-27 01:52:29 +00:00
|
|
|
|
|
|
|
|
|
|
|
def test_ner_labels_added_implicitly_on_predict():
|
|
|
|
nlp = Language()
|
|
|
|
ner = nlp.add_pipe("ner")
|
|
|
|
for label in ["A", "B", "C"]:
|
|
|
|
ner.add_label(label)
|
|
|
|
nlp.initialize()
|
|
|
|
doc = Doc(nlp.vocab, words=["hello", "world"], ents=["B-D", "O"])
|
|
|
|
ner(doc)
|
|
|
|
assert [t.ent_type_ for t in doc] == ["D", ""]
|
|
|
|
assert "D" in ner.labels
|
|
|
|
|
|
|
|
|
2021-01-27 12:39:14 +00:00
|
|
|
def test_ner_labels_added_implicitly_on_beam_parse():
|
|
|
|
nlp = Language()
|
|
|
|
ner = nlp.add_pipe("beam_ner")
|
|
|
|
for label in ["A", "B", "C"]:
|
|
|
|
ner.add_label(label)
|
|
|
|
nlp.initialize()
|
|
|
|
doc = Doc(nlp.vocab, words=["hello", "world"], ents=["B-D", "O"])
|
|
|
|
ner.beam_parse([doc], beam_width=32)
|
|
|
|
assert "D" in ner.labels
|
|
|
|
|
|
|
|
|
|
|
|
def test_ner_labels_added_implicitly_on_greedy_parse():
|
|
|
|
nlp = Language()
|
|
|
|
ner = nlp.add_pipe("beam_ner")
|
|
|
|
for label in ["A", "B", "C"]:
|
|
|
|
ner.add_label(label)
|
|
|
|
nlp.initialize()
|
|
|
|
doc = Doc(nlp.vocab, words=["hello", "world"], ents=["B-D", "O"])
|
|
|
|
ner.greedy_parse([doc])
|
|
|
|
assert "D" in ner.labels
|
|
|
|
|
|
|
|
|
2021-01-27 01:52:29 +00:00
|
|
|
def test_ner_labels_added_implicitly_on_update():
|
|
|
|
nlp = Language()
|
|
|
|
ner = nlp.add_pipe("ner")
|
|
|
|
for label in ["A", "B", "C"]:
|
|
|
|
ner.add_label(label)
|
|
|
|
nlp.initialize()
|
|
|
|
doc = Doc(nlp.vocab, words=["hello", "world"], ents=["B-D", "O"])
|
|
|
|
example = Example(nlp.make_doc(doc.text), doc)
|
|
|
|
assert "D" not in ner.labels
|
|
|
|
nlp.update([example])
|
|
|
|
assert "D" in ner.labels
|