2015-07-17 19:21:10 +00:00
|
|
|
from __future__ import unicode_literals
|
|
|
|
|
2015-07-19 23:38:29 +00:00
|
|
|
import re
|
|
|
|
|
2015-07-17 19:21:10 +00:00
|
|
|
import pytest
|
|
|
|
import numpy
|
|
|
|
|
2015-08-26 17:22:26 +00:00
|
|
|
from spacy.language import Language
|
2015-11-03 08:45:16 +00:00
|
|
|
from spacy.en import English
|
2015-07-17 19:21:10 +00:00
|
|
|
from spacy.vocab import Vocab
|
|
|
|
from spacy.tokens.doc import Doc
|
2015-07-19 23:38:29 +00:00
|
|
|
from spacy.tokenizer import Tokenizer
|
|
|
|
from os import path
|
2015-12-29 17:00:48 +00:00
|
|
|
import os
|
2015-07-19 23:38:29 +00:00
|
|
|
|
2016-01-16 11:23:45 +00:00
|
|
|
from spacy import util
|
2015-07-18 20:46:40 +00:00
|
|
|
from spacy.attrs import ORTH, SPACY, TAG, DEP, HEAD
|
2015-07-17 19:21:10 +00:00
|
|
|
from spacy.serialize.packer import Packer
|
|
|
|
|
|
|
|
from spacy.serialize.bits import BitArray
|
|
|
|
|
|
|
|
|
|
|
|
@pytest.fixture
|
|
|
|
def vocab():
|
2016-01-16 11:23:45 +00:00
|
|
|
data_dir = os.environ.get('SPACY_DATA')
|
|
|
|
if data_dir is None:
|
|
|
|
package = util.get_package_by_name()
|
2015-12-29 17:00:48 +00:00
|
|
|
else:
|
2016-01-16 11:23:45 +00:00
|
|
|
package = util.get_package(data_dir)
|
|
|
|
|
|
|
|
vocab = English.default_vocab(package=package)
|
2015-08-26 17:22:26 +00:00
|
|
|
lex = vocab['dog']
|
2015-07-18 20:46:40 +00:00
|
|
|
assert vocab[vocab.strings['dog']].orth_ == 'dog'
|
2015-08-26 17:22:26 +00:00
|
|
|
lex = vocab['the']
|
|
|
|
lex = vocab['quick']
|
|
|
|
lex = vocab['jumped']
|
2015-07-17 19:21:10 +00:00
|
|
|
return vocab
|
|
|
|
|
|
|
|
|
2015-07-19 23:38:29 +00:00
|
|
|
@pytest.fixture
|
|
|
|
def tokenizer(vocab):
|
|
|
|
null_re = re.compile(r'!!!!!!!!!')
|
|
|
|
tokenizer = Tokenizer(vocab, {}, null_re, null_re, null_re)
|
|
|
|
return tokenizer
|
|
|
|
|
|
|
|
|
|
|
|
def test_char_packer(vocab):
|
|
|
|
packer = Packer(vocab, [])
|
|
|
|
bits = BitArray()
|
|
|
|
bits.seek(0)
|
|
|
|
|
2015-07-24 02:51:53 +00:00
|
|
|
byte_str = bytearray(b'the dog jumped')
|
2015-07-19 23:38:29 +00:00
|
|
|
packer.char_codec.encode(byte_str, bits)
|
|
|
|
bits.seek(0)
|
|
|
|
result = [b''] * len(byte_str)
|
|
|
|
packer.char_codec.decode(bits, result)
|
2015-07-24 01:47:59 +00:00
|
|
|
assert bytearray(result) == byte_str
|
2015-07-17 19:21:10 +00:00
|
|
|
|
2015-07-19 23:38:29 +00:00
|
|
|
|
|
|
|
def test_packer_unannotated(tokenizer):
|
|
|
|
packer = Packer(tokenizer.vocab, [])
|
|
|
|
|
|
|
|
msg = tokenizer(u'the dog jumped')
|
2015-07-17 19:21:10 +00:00
|
|
|
|
|
|
|
assert msg.string == 'the dog jumped'
|
2015-07-19 23:38:29 +00:00
|
|
|
|
2015-07-17 19:21:10 +00:00
|
|
|
|
|
|
|
bits = packer.pack(msg)
|
|
|
|
|
|
|
|
result = packer.unpack(bits)
|
|
|
|
|
|
|
|
assert result.string == 'the dog jumped'
|
|
|
|
|
2015-07-19 23:38:29 +00:00
|
|
|
|
2015-11-03 07:07:08 +00:00
|
|
|
@pytest.mark.models
|
2015-07-19 23:38:29 +00:00
|
|
|
def test_packer_annotated(tokenizer):
|
|
|
|
vocab = tokenizer.vocab
|
2015-07-17 19:21:10 +00:00
|
|
|
nn = vocab.strings['NN']
|
|
|
|
dt = vocab.strings['DT']
|
|
|
|
vbd = vocab.strings['VBD']
|
|
|
|
jj = vocab.strings['JJ']
|
|
|
|
det = vocab.strings['det']
|
|
|
|
nsubj = vocab.strings['nsubj']
|
|
|
|
adj = vocab.strings['adj']
|
|
|
|
root = vocab.strings['ROOT']
|
|
|
|
|
|
|
|
attr_freqs = [
|
|
|
|
(TAG, [(nn, 0.1), (dt, 0.2), (jj, 0.01), (vbd, 0.05)]),
|
|
|
|
(DEP, {det: 0.2, nsubj: 0.1, adj: 0.05, root: 0.1}.items()),
|
|
|
|
(HEAD, {0: 0.05, 1: 0.2, -1: 0.2, -2: 0.1, 2: 0.1}.items())
|
|
|
|
]
|
|
|
|
|
|
|
|
packer = Packer(vocab, attr_freqs)
|
|
|
|
|
2015-07-19 23:38:29 +00:00
|
|
|
msg = tokenizer(u'the dog jumped')
|
|
|
|
|
2015-07-17 19:21:10 +00:00
|
|
|
msg.from_array(
|
|
|
|
[TAG, DEP, HEAD],
|
|
|
|
numpy.array([
|
|
|
|
[dt, det, 1],
|
|
|
|
[nn, nsubj, 1],
|
|
|
|
[vbd, root, 0]
|
|
|
|
], dtype=numpy.int32))
|
|
|
|
|
|
|
|
assert msg.string == 'the dog jumped'
|
|
|
|
assert [t.tag_ for t in msg] == ['DT', 'NN', 'VBD']
|
|
|
|
assert [t.dep_ for t in msg] == ['det', 'nsubj', 'ROOT']
|
|
|
|
assert [(t.head.i - t.i) for t in msg] == [1, 1, 0]
|
|
|
|
|
|
|
|
bits = packer.pack(msg)
|
|
|
|
result = packer.unpack(bits)
|
|
|
|
|
|
|
|
assert result.string == 'the dog jumped'
|
|
|
|
assert [t.tag_ for t in result] == ['DT', 'NN', 'VBD']
|
|
|
|
assert [t.dep_ for t in result] == ['det', 'nsubj', 'ROOT']
|
|
|
|
assert [(t.head.i - t.i) for t in result] == [1, 1, 0]
|
2015-07-27 19:25:48 +00:00
|
|
|
|
|
|
|
|
|
|
|
def test_packer_bad_chars(tokenizer):
|
|
|
|
string = u'naja gut, is eher bl\xf6d und nicht mit reddit.com/digg.com vergleichbar; vielleicht auf dem weg dahin'
|
|
|
|
packer = Packer(tokenizer.vocab, [])
|
|
|
|
|
|
|
|
doc = tokenizer(string)
|
|
|
|
bits = packer.pack(doc)
|
|
|
|
result = packer.unpack(bits)
|
|
|
|
assert result.string == doc.string
|
|
|
|
|
|
|
|
|
|
|
|
@pytest.mark.models
|
|
|
|
def test_packer_bad_chars(EN):
|
|
|
|
string = u'naja gut, is eher bl\xf6d und nicht mit reddit.com/digg.com vergleichbar; vielleicht auf dem weg dahin'
|
|
|
|
doc = EN(string)
|
|
|
|
byte_string = doc.to_bytes()
|
|
|
|
result = Doc(EN.vocab).from_bytes(byte_string)
|
|
|
|
assert [t.tag_ for t in result] == [t.tag_ for t in doc]
|