2017-12-07 08:53:30 +00:00
|
|
|
import numpy
|
2023-06-14 15:48:41 +00:00
|
|
|
import pytest
|
2018-12-06 19:43:47 +00:00
|
|
|
import srsly
|
2023-06-14 15:48:41 +00:00
|
|
|
|
|
|
|
from spacy.attrs import NORM
|
2021-02-12 12:27:46 +00:00
|
|
|
from spacy.lang.en import English
|
2018-07-24 21:38:44 +00:00
|
|
|
from spacy.strings import StringStore
|
2021-02-12 12:27:46 +00:00
|
|
|
from spacy.tokens import Doc
|
2018-07-24 21:38:44 +00:00
|
|
|
from spacy.vocab import Vocab
|
2017-03-07 16:15:18 +00:00
|
|
|
|
|
|
|
|
2018-11-27 00:09:36 +00:00
|
|
|
@pytest.mark.parametrize("text1,text2", [("hello", "bye")])
|
2018-07-24 21:38:44 +00:00
|
|
|
def test_pickle_string_store(text1, text2):
|
|
|
|
stringstore = StringStore()
|
2017-03-15 16:39:54 +00:00
|
|
|
store1 = stringstore[text1]
|
|
|
|
store2 = stringstore[text2]
|
2018-12-06 19:43:47 +00:00
|
|
|
data = srsly.pickle_dumps(stringstore, protocol=-1)
|
|
|
|
unpickled = srsly.pickle_loads(data)
|
2017-03-15 16:39:54 +00:00
|
|
|
assert unpickled[text1] == store1
|
|
|
|
assert unpickled[text2] == store2
|
|
|
|
assert len(stringstore) == len(unpickled)
|
2017-03-07 16:15:18 +00:00
|
|
|
|
2017-03-07 19:58:55 +00:00
|
|
|
|
2018-11-27 00:09:36 +00:00
|
|
|
@pytest.mark.parametrize("text1,text2", [("dog", "cat")])
|
2017-03-15 16:39:54 +00:00
|
|
|
def test_pickle_vocab(text1, text2):
|
2021-02-12 12:27:46 +00:00
|
|
|
vocab = Vocab(
|
|
|
|
lex_attr_getters={int(NORM): lambda string: string[:-1]},
|
|
|
|
get_noun_chunks=English.Defaults.syntax_iterators.get("noun_chunks"),
|
|
|
|
)
|
2018-11-27 00:09:36 +00:00
|
|
|
vocab.set_vector("dog", numpy.ones((5,), dtype="f"))
|
2017-03-15 16:39:54 +00:00
|
|
|
lex1 = vocab[text1]
|
|
|
|
lex2 = vocab[text2]
|
|
|
|
assert lex1.norm_ == text1[:-1]
|
|
|
|
assert lex2.norm_ == text2[:-1]
|
2018-12-06 19:46:36 +00:00
|
|
|
data = srsly.pickle_dumps(vocab)
|
|
|
|
unpickled = srsly.pickle_loads(data)
|
2017-03-15 16:39:54 +00:00
|
|
|
assert unpickled[text1].orth == lex1.orth
|
|
|
|
assert unpickled[text2].orth == lex2.orth
|
|
|
|
assert unpickled[text1].norm == lex1.norm
|
|
|
|
assert unpickled[text2].norm == lex2.norm
|
|
|
|
assert unpickled[text1].norm != unpickled[text2].norm
|
2017-12-07 08:53:30 +00:00
|
|
|
assert unpickled.vectors is not None
|
2021-02-12 12:27:46 +00:00
|
|
|
assert unpickled.get_noun_chunks is not None
|
2018-11-27 00:09:36 +00:00
|
|
|
assert list(vocab["dog"].vector) == [1.0, 1.0, 1.0, 1.0, 1.0]
|
2021-02-12 12:27:46 +00:00
|
|
|
|
|
|
|
|
|
|
|
def test_pickle_doc(en_vocab):
|
|
|
|
words = ["a", "b", "c"]
|
|
|
|
deps = ["dep"] * len(words)
|
|
|
|
heads = [0] * len(words)
|
|
|
|
doc = Doc(
|
|
|
|
en_vocab,
|
|
|
|
words=words,
|
|
|
|
deps=deps,
|
|
|
|
heads=heads,
|
|
|
|
)
|
|
|
|
data = srsly.pickle_dumps(doc)
|
|
|
|
unpickled = srsly.pickle_loads(data)
|
|
|
|
assert [t.text for t in unpickled] == words
|
|
|
|
assert [t.dep_ for t in unpickled] == deps
|
|
|
|
assert [t.head.i for t in unpickled] == heads
|
|
|
|
assert list(doc.noun_chunks) == []
|