2017-01-11 12:55:33 +00:00
|
|
|
# coding: utf-8
|
|
|
|
from __future__ import unicode_literals
|
|
|
|
|
2017-01-12 15:49:57 +00:00
|
|
|
import numpy
|
2017-06-02 08:56:09 +00:00
|
|
|
import tempfile
|
|
|
|
import shutil
|
|
|
|
import contextlib
|
💫 Replace ujson, msgpack and dill/pickle/cloudpickle with srsly (#3003)
Remove hacks and wrappers, keep code in sync across our libraries and move spaCy a few steps closer to only depending on packages with binary wheels 🎉
See here: https://github.com/explosion/srsly
Serialization is hard, especially across Python versions and multiple platforms. After dealing with many subtle bugs over the years (encodings, locales, large files) our libraries like spaCy and Prodigy have steadily grown a number of utility functions to wrap the multiple serialization formats we need to support (especially json, msgpack and pickle). These wrapping functions ended up duplicated across our codebases, so we wanted to put them in one place.
At the same time, we noticed that having a lot of small dependencies was making maintainence harder, and making installation slower. To solve this, we've made srsly standalone, by including the component packages directly within it. This way we can provide all the serialization utilities we need in a single binary wheel.
srsly currently includes forks of the following packages:
ujson
msgpack
msgpack-numpy
cloudpickle
* WIP: replace json/ujson with srsly
* Replace ujson in examples
Use regular json instead of srsly to make code easier to read and follow
* Update requirements
* Fix imports
* Fix typos
* Replace msgpack with srsly
* Fix warning
2018-12-03 00:28:22 +00:00
|
|
|
import srsly
|
2017-06-02 08:56:09 +00:00
|
|
|
from pathlib import Path
|
2018-07-24 21:38:44 +00:00
|
|
|
from spacy.tokens import Doc, Span
|
|
|
|
from spacy.attrs import POS, HEAD, DEP
|
|
|
|
from spacy.compat import path2str
|
2017-05-29 20:11:31 +00:00
|
|
|
|
|
|
|
|
2017-06-02 08:56:09 +00:00
|
|
|
@contextlib.contextmanager
|
2018-11-27 00:09:36 +00:00
|
|
|
def make_tempfile(mode="r"):
|
2017-06-02 08:56:09 +00:00
|
|
|
f = tempfile.TemporaryFile(mode=mode)
|
|
|
|
yield f
|
|
|
|
f.close()
|
|
|
|
|
|
|
|
|
|
|
|
@contextlib.contextmanager
|
|
|
|
def make_tempdir():
|
|
|
|
d = Path(tempfile.mkdtemp())
|
|
|
|
yield d
|
|
|
|
shutil.rmtree(path2str(d))
|
|
|
|
|
|
|
|
|
2017-01-12 11:25:10 +00:00
|
|
|
def get_doc(vocab, words=[], pos=None, heads=None, deps=None, tags=None, ents=None):
|
2017-01-11 12:55:33 +00:00
|
|
|
"""Create Doc object from given vocab, words and annotations."""
|
2018-11-27 00:09:36 +00:00
|
|
|
pos = pos or [""] * len(words)
|
|
|
|
tags = tags or [""] * len(words)
|
2017-01-11 12:55:33 +00:00
|
|
|
heads = heads or [0] * len(words)
|
2018-11-27 00:09:36 +00:00
|
|
|
deps = deps or [""] * len(words)
|
|
|
|
for value in deps + tags + pos:
|
2017-05-28 13:10:22 +00:00
|
|
|
vocab.strings.add(value)
|
2017-01-11 12:55:33 +00:00
|
|
|
|
|
|
|
doc = Doc(vocab, words=words)
|
|
|
|
attrs = doc.to_array([POS, HEAD, DEP])
|
2017-01-12 10:18:36 +00:00
|
|
|
for i, (p, head, dep) in enumerate(zip(pos, heads, deps)):
|
|
|
|
attrs[i, 0] = doc.vocab.strings[p]
|
2017-01-11 12:55:33 +00:00
|
|
|
attrs[i, 1] = head
|
|
|
|
attrs[i, 2] = doc.vocab.strings[dep]
|
|
|
|
doc.from_array([POS, HEAD, DEP], attrs)
|
2017-01-12 11:25:10 +00:00
|
|
|
if ents:
|
2018-11-27 00:09:36 +00:00
|
|
|
doc.ents = [
|
|
|
|
Span(doc, start, end, label=doc.vocab.strings[label])
|
|
|
|
for start, end, label in ents
|
|
|
|
]
|
2017-01-12 10:18:36 +00:00
|
|
|
if tags:
|
|
|
|
for token in doc:
|
|
|
|
token.tag_ = tags[token.i]
|
2017-01-11 12:55:33 +00:00
|
|
|
return doc
|
2017-01-11 20:30:14 +00:00
|
|
|
|
|
|
|
|
|
|
|
def apply_transition_sequence(parser, doc, sequence):
|
|
|
|
"""Perform a series of pre-specified transitions, to put the parser in a
|
|
|
|
desired state."""
|
|
|
|
for action_name in sequence:
|
2018-11-27 00:09:36 +00:00
|
|
|
if "-" in action_name:
|
|
|
|
move, label = action_name.split("-")
|
2017-01-11 20:30:14 +00:00
|
|
|
parser.add_label(label)
|
|
|
|
with parser.step_through(doc) as stepwise:
|
|
|
|
for transition in sequence:
|
|
|
|
stepwise.transition(transition)
|
2017-01-12 15:49:57 +00:00
|
|
|
|
|
|
|
|
2017-01-13 13:26:30 +00:00
|
|
|
def add_vecs_to_vocab(vocab, vectors):
|
|
|
|
"""Add list of vector tuples to given vocab. All vectors need to have the
|
|
|
|
same length. Format: [("text", [1, 2, 3])]"""
|
|
|
|
length = len(vectors[0][1])
|
2017-10-31 17:25:08 +00:00
|
|
|
vocab.reset_vectors(width=length)
|
2017-01-13 13:26:30 +00:00
|
|
|
for word, vec in vectors:
|
2017-10-31 17:25:08 +00:00
|
|
|
vocab.set_vector(word, vector=vec)
|
2017-01-13 13:26:30 +00:00
|
|
|
return vocab
|
|
|
|
|
|
|
|
|
2017-01-12 15:49:57 +00:00
|
|
|
def get_cosine(vec1, vec2):
|
|
|
|
"""Get cosine for two given vectors"""
|
|
|
|
return numpy.dot(vec1, vec2) / (numpy.linalg.norm(vec1) * numpy.linalg.norm(vec2))
|
2017-01-12 20:56:52 +00:00
|
|
|
|
|
|
|
|
|
|
|
def assert_docs_equal(doc1, doc2):
|
2017-01-13 13:25:53 +00:00
|
|
|
"""Compare two Doc objects and assert that they're equal. Tests for tokens,
|
|
|
|
tags, dependencies and entities."""
|
2018-11-27 00:09:36 +00:00
|
|
|
assert [t.orth for t in doc1] == [t.orth for t in doc2]
|
2017-01-12 20:56:52 +00:00
|
|
|
|
2018-11-27 00:09:36 +00:00
|
|
|
assert [t.pos for t in doc1] == [t.pos for t in doc2]
|
|
|
|
assert [t.tag for t in doc1] == [t.tag for t in doc2]
|
2017-01-12 20:56:52 +00:00
|
|
|
|
2018-11-27 00:09:36 +00:00
|
|
|
assert [t.head.i for t in doc1] == [t.head.i for t in doc2]
|
|
|
|
assert [t.dep for t in doc1] == [t.dep for t in doc2]
|
2017-01-12 20:56:52 +00:00
|
|
|
if doc1.is_parsed and doc2.is_parsed:
|
2018-11-27 00:09:36 +00:00
|
|
|
assert [s for s in doc1.sents] == [s for s in doc2.sents]
|
2017-01-12 20:56:52 +00:00
|
|
|
|
2018-11-27 00:09:36 +00:00
|
|
|
assert [t.ent_type for t in doc1] == [t.ent_type for t in doc2]
|
|
|
|
assert [t.ent_iob for t in doc1] == [t.ent_iob for t in doc2]
|
|
|
|
assert [ent for ent in doc1.ents] == [ent for ent in doc2.ents]
|
2017-06-03 15:04:30 +00:00
|
|
|
|
|
|
|
|
|
|
|
def assert_packed_msg_equal(b1, b2):
|
|
|
|
"""Assert that two packed msgpack messages are equal."""
|
💫 Replace ujson, msgpack and dill/pickle/cloudpickle with srsly (#3003)
Remove hacks and wrappers, keep code in sync across our libraries and move spaCy a few steps closer to only depending on packages with binary wheels 🎉
See here: https://github.com/explosion/srsly
Serialization is hard, especially across Python versions and multiple platforms. After dealing with many subtle bugs over the years (encodings, locales, large files) our libraries like spaCy and Prodigy have steadily grown a number of utility functions to wrap the multiple serialization formats we need to support (especially json, msgpack and pickle). These wrapping functions ended up duplicated across our codebases, so we wanted to put them in one place.
At the same time, we noticed that having a lot of small dependencies was making maintainence harder, and making installation slower. To solve this, we've made srsly standalone, by including the component packages directly within it. This way we can provide all the serialization utilities we need in a single binary wheel.
srsly currently includes forks of the following packages:
ujson
msgpack
msgpack-numpy
cloudpickle
* WIP: replace json/ujson with srsly
* Replace ujson in examples
Use regular json instead of srsly to make code easier to read and follow
* Update requirements
* Fix imports
* Fix typos
* Replace msgpack with srsly
* Fix warning
2018-12-03 00:28:22 +00:00
|
|
|
msg1 = srsly.msgpack_loads(b1)
|
|
|
|
msg2 = srsly.msgpack_loads(b2)
|
2017-06-03 15:04:30 +00:00
|
|
|
assert sorted(msg1.keys()) == sorted(msg2.keys())
|
|
|
|
for (k1, v1), (k2, v2) in zip(sorted(msg1.items()), sorted(msg2.items())):
|
|
|
|
assert k1 == k2
|
|
|
|
assert v1 == v2
|