2019-10-31 14:01:15 +00:00
|
|
|
import pytest
|
|
|
|
|
2020-02-27 17:42:27 +00:00
|
|
|
from spacy.ml.models.tok2vec import build_Tok2Vec_model
|
2020-07-29 11:47:37 +00:00
|
|
|
from spacy.ml.models.tok2vec import MultiHashEmbed, CharacterEmbed
|
|
|
|
from spacy.ml.models.tok2vec import MishWindowEncoder, MaxoutWindowEncoder
|
2020-08-31 10:41:39 +00:00
|
|
|
from spacy.pipeline.tok2vec import Tok2Vec, Tok2VecListener
|
2019-10-31 14:01:15 +00:00
|
|
|
from spacy.vocab import Vocab
|
|
|
|
from spacy.tokens import Doc
|
2020-09-09 08:31:03 +00:00
|
|
|
from spacy.training import Example
|
2020-08-31 10:41:39 +00:00
|
|
|
from spacy import util
|
|
|
|
from spacy.lang.en import English
|
2020-09-15 19:40:38 +00:00
|
|
|
from ..util import get_batch
|
2019-10-31 14:01:15 +00:00
|
|
|
|
2020-08-31 10:41:39 +00:00
|
|
|
from thinc.api import Config
|
|
|
|
|
|
|
|
from numpy.testing import assert_equal
|
|
|
|
|
2019-10-31 14:01:15 +00:00
|
|
|
|
|
|
|
def test_empty_doc():
|
|
|
|
width = 128
|
|
|
|
embed_size = 2000
|
|
|
|
vocab = Vocab()
|
|
|
|
doc = Doc(vocab, words=[])
|
2020-07-20 12:49:54 +00:00
|
|
|
tok2vec = build_Tok2Vec_model(
|
2020-07-28 21:06:46 +00:00
|
|
|
MultiHashEmbed(
|
|
|
|
width=width,
|
|
|
|
rows=embed_size,
|
2020-10-05 13:24:33 +00:00
|
|
|
include_static_vectors=False,
|
|
|
|
attrs=["NORM", "PREFIX", "SUFFIX", "SHAPE"],
|
2020-07-28 21:06:46 +00:00
|
|
|
),
|
2020-08-05 14:00:59 +00:00
|
|
|
MaxoutWindowEncoder(width=width, depth=4, window_size=1, maxout_pieces=3),
|
2020-07-20 12:49:54 +00:00
|
|
|
)
|
|
|
|
tok2vec.initialize()
|
2019-10-31 14:01:15 +00:00
|
|
|
vectors, backprop = tok2vec.begin_update([doc])
|
|
|
|
assert len(vectors) == 1
|
|
|
|
assert vectors[0].shape == (0, width)
|
|
|
|
|
|
|
|
|
|
|
|
@pytest.mark.parametrize(
|
|
|
|
"batch_size,width,embed_size", [[1, 128, 2000], [2, 128, 2000], [3, 8, 63]]
|
|
|
|
)
|
|
|
|
def test_tok2vec_batch_sizes(batch_size, width, embed_size):
|
|
|
|
batch = get_batch(batch_size)
|
2020-02-27 17:42:27 +00:00
|
|
|
tok2vec = build_Tok2Vec_model(
|
2020-07-28 21:06:46 +00:00
|
|
|
MultiHashEmbed(
|
|
|
|
width=width,
|
|
|
|
rows=embed_size,
|
2020-10-05 13:27:06 +00:00
|
|
|
include_static_vectors=False,
|
|
|
|
attrs=["NORM", "PREFIX", "SUFFIX", "SHAPE"],
|
2020-07-28 21:06:46 +00:00
|
|
|
),
|
2020-08-31 10:41:39 +00:00
|
|
|
MaxoutWindowEncoder(width=width, depth=4, window_size=1, maxout_pieces=3),
|
2020-02-27 17:42:27 +00:00
|
|
|
)
|
2020-01-29 16:06:46 +00:00
|
|
|
tok2vec.initialize()
|
2019-10-31 14:01:15 +00:00
|
|
|
vectors, backprop = tok2vec.begin_update(batch)
|
|
|
|
assert len(vectors) == len(batch)
|
|
|
|
for doc_vec, doc in zip(vectors, batch):
|
|
|
|
assert doc_vec.shape == (len(doc), width)
|
|
|
|
|
|
|
|
|
2020-02-27 17:42:27 +00:00
|
|
|
# fmt: off
|
2019-10-31 14:01:15 +00:00
|
|
|
@pytest.mark.parametrize(
|
2020-07-29 11:47:37 +00:00
|
|
|
"width,embed_arch,embed_config,encode_arch,encode_config",
|
2019-10-31 14:01:15 +00:00
|
|
|
[
|
2020-10-05 13:27:06 +00:00
|
|
|
(8, MultiHashEmbed, {"rows": 100, "attrs": ["SHAPE", "LOWER"], "include_static_vectors": False}, MaxoutWindowEncoder, {"window_size": 1, "maxout_pieces": 3, "depth": 2}),
|
2020-10-05 13:28:12 +00:00
|
|
|
(8, MultiHashEmbed, {"rows": 100, "attrs": {"ORTH": 1.0, "PREFIX": 0.2}, "include_static_vectors": False}, MishWindowEncoder, {"window_size": 1, "depth": 6}),
|
2020-09-16 15:45:04 +00:00
|
|
|
(8, CharacterEmbed, {"rows": 100, "nM": 64, "nC": 8, "also_use_static_vectors": False}, MaxoutWindowEncoder, {"window_size": 1, "maxout_pieces": 3, "depth": 3}),
|
|
|
|
(8, CharacterEmbed, {"rows": 100, "nM": 16, "nC": 2, "also_use_static_vectors": False}, MishWindowEncoder, {"window_size": 1, "depth": 3}),
|
2019-10-31 14:01:15 +00:00
|
|
|
],
|
|
|
|
)
|
2020-02-27 17:42:27 +00:00
|
|
|
# fmt: on
|
2020-07-29 11:47:37 +00:00
|
|
|
def test_tok2vec_configs(width, embed_arch, embed_config, encode_arch, encode_config):
|
|
|
|
embed_config["width"] = width
|
|
|
|
encode_config["width"] = width
|
2019-10-31 14:01:15 +00:00
|
|
|
docs = get_batch(3)
|
2020-07-29 11:47:37 +00:00
|
|
|
tok2vec = build_Tok2Vec_model(
|
2020-09-29 19:39:28 +00:00
|
|
|
embed_arch(**embed_config), encode_arch(**encode_config)
|
2020-07-29 11:47:37 +00:00
|
|
|
)
|
2020-03-29 17:40:36 +00:00
|
|
|
tok2vec.initialize(docs)
|
2019-10-31 14:01:15 +00:00
|
|
|
vectors, backprop = tok2vec.begin_update(docs)
|
|
|
|
assert len(vectors) == len(docs)
|
2020-07-29 11:47:37 +00:00
|
|
|
assert vectors[0].shape == (len(docs[0]), width)
|
2019-10-31 14:01:15 +00:00
|
|
|
backprop(vectors)
|
2020-08-31 10:41:39 +00:00
|
|
|
|
|
|
|
|
|
|
|
def test_init_tok2vec():
|
|
|
|
# Simple test to initialize the default tok2vec
|
|
|
|
nlp = English()
|
|
|
|
tok2vec = nlp.add_pipe("tok2vec")
|
|
|
|
assert tok2vec.listeners == []
|
2020-09-28 19:35:09 +00:00
|
|
|
nlp.initialize()
|
2020-09-08 20:44:25 +00:00
|
|
|
assert tok2vec.model.get_dim("nO")
|
2020-08-31 10:41:39 +00:00
|
|
|
|
|
|
|
|
|
|
|
cfg_string = """
|
|
|
|
[nlp]
|
|
|
|
lang = "en"
|
|
|
|
pipeline = ["tok2vec","tagger"]
|
|
|
|
|
|
|
|
[components]
|
|
|
|
|
|
|
|
[components.tagger]
|
|
|
|
factory = "tagger"
|
|
|
|
|
|
|
|
[components.tagger.model]
|
|
|
|
@architectures = "spacy.Tagger.v1"
|
|
|
|
nO = null
|
|
|
|
|
|
|
|
[components.tagger.model.tok2vec]
|
|
|
|
@architectures = "spacy.Tok2VecListener.v1"
|
|
|
|
width = ${components.tok2vec.model.encode.width}
|
|
|
|
|
|
|
|
[components.tok2vec]
|
|
|
|
factory = "tok2vec"
|
|
|
|
|
|
|
|
[components.tok2vec.model]
|
|
|
|
@architectures = "spacy.Tok2Vec.v1"
|
|
|
|
|
|
|
|
[components.tok2vec.model.embed]
|
|
|
|
@architectures = "spacy.MultiHashEmbed.v1"
|
|
|
|
width = ${components.tok2vec.model.encode.width}
|
|
|
|
rows = 2000
|
|
|
|
also_embed_subwords = true
|
|
|
|
also_use_static_vectors = false
|
|
|
|
|
|
|
|
[components.tok2vec.model.encode]
|
|
|
|
@architectures = "spacy.MaxoutWindowEncoder.v1"
|
|
|
|
width = 96
|
|
|
|
depth = 4
|
|
|
|
window_size = 1
|
|
|
|
maxout_pieces = 3
|
|
|
|
"""
|
|
|
|
|
|
|
|
TRAIN_DATA = [
|
|
|
|
("I like green eggs", {"tags": ["N", "V", "J", "N"]}),
|
|
|
|
("Eat blue ham", {"tags": ["V", "J", "N"]}),
|
|
|
|
]
|
|
|
|
|
2020-09-04 11:42:33 +00:00
|
|
|
|
2020-08-31 10:41:39 +00:00
|
|
|
def test_tok2vec_listener():
|
|
|
|
orig_config = Config().from_str(cfg_string)
|
2020-09-27 20:21:31 +00:00
|
|
|
nlp = util.load_model_from_config(orig_config, auto_fill=True, validate=True)
|
2020-08-31 10:41:39 +00:00
|
|
|
assert nlp.pipe_names == ["tok2vec", "tagger"]
|
|
|
|
tagger = nlp.get_pipe("tagger")
|
|
|
|
tok2vec = nlp.get_pipe("tok2vec")
|
|
|
|
tagger_tok2vec = tagger.model.get_ref("tok2vec")
|
|
|
|
assert isinstance(tok2vec, Tok2Vec)
|
|
|
|
assert isinstance(tagger_tok2vec, Tok2VecListener)
|
|
|
|
train_examples = []
|
|
|
|
for t in TRAIN_DATA:
|
|
|
|
train_examples.append(Example.from_dict(nlp.make_doc(t[0]), t[1]))
|
|
|
|
for tag in t[1]["tags"]:
|
|
|
|
tagger.add_label(tag)
|
|
|
|
|
|
|
|
# Check that the Tok2Vec component finds it listeners
|
|
|
|
assert tok2vec.listeners == []
|
2020-09-28 19:35:09 +00:00
|
|
|
optimizer = nlp.initialize(lambda: train_examples)
|
2020-08-31 10:41:39 +00:00
|
|
|
assert tok2vec.listeners == [tagger_tok2vec]
|
|
|
|
|
|
|
|
for i in range(5):
|
|
|
|
losses = {}
|
|
|
|
nlp.update(train_examples, sgd=optimizer, losses=losses)
|
|
|
|
|
|
|
|
doc = nlp("Running the pipeline as a whole.")
|
|
|
|
doc_tensor = tagger_tok2vec.predict([doc])[0]
|
|
|
|
assert_equal(doc.tensor, doc_tensor)
|
|
|
|
|
|
|
|
# TODO: should this warn or error?
|
|
|
|
nlp.select_pipes(disable="tok2vec")
|
|
|
|
assert nlp.pipe_names == ["tagger"]
|
|
|
|
nlp("Running the pipeline with the Tok2Vec component disabled.")
|
2020-09-22 11:54:44 +00:00
|
|
|
|
|
|
|
|
|
|
|
def test_tok2vec_listener_callback():
|
|
|
|
orig_config = Config().from_str(cfg_string)
|
2020-09-27 20:21:31 +00:00
|
|
|
nlp = util.load_model_from_config(orig_config, auto_fill=True, validate=True)
|
2020-09-22 11:54:44 +00:00
|
|
|
assert nlp.pipe_names == ["tok2vec", "tagger"]
|
|
|
|
tagger = nlp.get_pipe("tagger")
|
|
|
|
tok2vec = nlp.get_pipe("tok2vec")
|
|
|
|
nlp._link_components()
|
|
|
|
docs = [nlp.make_doc("A random sentence")]
|
|
|
|
tok2vec.model.initialize(X=docs)
|
|
|
|
gold_array = [[1.0 for tag in ["V", "Z"]] for word in docs]
|
|
|
|
label_sample = [tagger.model.ops.asarray(gold_array, dtype="float32")]
|
|
|
|
tagger.model.initialize(X=docs, Y=label_sample)
|
|
|
|
docs = [nlp.make_doc("Another entirely random sentence")]
|
2020-09-22 19:54:52 +00:00
|
|
|
tok2vec.update([Example.from_dict(x, {}) for x in docs])
|
2020-09-22 11:54:44 +00:00
|
|
|
Y, get_dX = tagger.model.begin_update(docs)
|
|
|
|
# assure that the backprop call works (and doesn't hit a 'None' callback)
|
|
|
|
assert get_dX(Y) is not None
|