2017-11-09 01:29:03 +00:00
|
|
|
# coding: utf-8
|
|
|
|
from __future__ import unicode_literals
|
|
|
|
|
2019-10-03 12:48:45 +00:00
|
|
|
import spacy
|
|
|
|
|
2019-03-10 18:16:45 +00:00
|
|
|
import pytest
|
2019-10-03 12:48:45 +00:00
|
|
|
|
|
|
|
from spacy.lang.en import English
|
|
|
|
from spacy.tokens import Doc, DocBin
|
2018-07-24 21:38:44 +00:00
|
|
|
from spacy.compat import path2str
|
2017-11-09 01:29:03 +00:00
|
|
|
|
2018-07-24 21:38:44 +00:00
|
|
|
from ..util import make_tempdir
|
|
|
|
|
|
|
|
|
|
|
|
def test_serialize_empty_doc(en_vocab):
|
|
|
|
doc = Doc(en_vocab)
|
|
|
|
data = doc.to_bytes()
|
|
|
|
doc2 = Doc(en_vocab)
|
|
|
|
doc2.from_bytes(data)
|
|
|
|
assert len(doc) == len(doc2)
|
|
|
|
for token1, token2 in zip(doc, doc2):
|
|
|
|
assert token1.text == token2.text
|
2017-11-09 01:29:03 +00:00
|
|
|
|
|
|
|
|
|
|
|
def test_serialize_doc_roundtrip_bytes(en_vocab):
|
2018-11-27 00:09:36 +00:00
|
|
|
doc = Doc(en_vocab, words=["hello", "world"])
|
2017-11-09 01:29:03 +00:00
|
|
|
doc_b = doc.to_bytes()
|
|
|
|
new_doc = Doc(en_vocab).from_bytes(doc_b)
|
|
|
|
assert new_doc.to_bytes() == doc_b
|
|
|
|
|
|
|
|
|
|
|
|
def test_serialize_doc_roundtrip_disk(en_vocab):
|
2018-11-27 00:09:36 +00:00
|
|
|
doc = Doc(en_vocab, words=["hello", "world"])
|
2017-11-09 01:29:03 +00:00
|
|
|
with make_tempdir() as d:
|
2018-11-27 00:09:36 +00:00
|
|
|
file_path = d / "doc"
|
2017-11-09 01:29:03 +00:00
|
|
|
doc.to_disk(file_path)
|
|
|
|
doc_d = Doc(en_vocab).from_disk(file_path)
|
|
|
|
assert doc.to_bytes() == doc_d.to_bytes()
|
|
|
|
|
|
|
|
|
|
|
|
def test_serialize_doc_roundtrip_disk_str_path(en_vocab):
|
2018-11-27 00:09:36 +00:00
|
|
|
doc = Doc(en_vocab, words=["hello", "world"])
|
2017-11-09 01:29:03 +00:00
|
|
|
with make_tempdir() as d:
|
2018-11-27 00:09:36 +00:00
|
|
|
file_path = d / "doc"
|
2017-11-09 01:29:03 +00:00
|
|
|
file_path = path2str(file_path)
|
|
|
|
doc.to_disk(file_path)
|
|
|
|
doc_d = Doc(en_vocab).from_disk(file_path)
|
|
|
|
assert doc.to_bytes() == doc_d.to_bytes()
|
2019-03-10 18:16:45 +00:00
|
|
|
|
|
|
|
|
|
|
|
def test_serialize_doc_exclude(en_vocab):
|
|
|
|
doc = Doc(en_vocab, words=["hello", "world"])
|
|
|
|
doc.user_data["foo"] = "bar"
|
|
|
|
new_doc = Doc(en_vocab).from_bytes(doc.to_bytes())
|
|
|
|
assert new_doc.user_data["foo"] == "bar"
|
|
|
|
new_doc = Doc(en_vocab).from_bytes(doc.to_bytes(), exclude=["user_data"])
|
|
|
|
assert not new_doc.user_data
|
|
|
|
new_doc = Doc(en_vocab).from_bytes(doc.to_bytes(exclude=["user_data"]))
|
|
|
|
assert not new_doc.user_data
|
|
|
|
with pytest.raises(ValueError):
|
|
|
|
doc.to_bytes(user_data=False)
|
|
|
|
with pytest.raises(ValueError):
|
|
|
|
Doc(en_vocab).from_bytes(doc.to_bytes(), tensor=False)
|
2019-10-03 12:48:45 +00:00
|
|
|
|
|
|
|
|
|
|
|
def test_serialize_doc_bin():
|
|
|
|
doc_bin = DocBin(attrs=["LEMMA", "ENT_IOB", "ENT_TYPE"], store_user_data=True)
|
|
|
|
texts = ["Some text", "Lots of texts...", "..."]
|
|
|
|
nlp = English()
|
|
|
|
for doc in nlp.pipe(texts):
|
|
|
|
doc_bin.add(doc)
|
|
|
|
bytes_data = doc_bin.to_bytes()
|
|
|
|
|
|
|
|
# Deserialize later, e.g. in a new process
|
|
|
|
nlp = spacy.blank("en")
|
|
|
|
doc_bin = DocBin().from_bytes(bytes_data)
|
2019-10-18 09:27:38 +00:00
|
|
|
list(doc_bin.get_docs(nlp.vocab))
|