2015-01-30 05:43:55 +00:00
|
|
|
# coding: utf-8
|
|
|
|
from __future__ import unicode_literals
|
|
|
|
|
2018-07-24 21:38:44 +00:00
|
|
|
from spacy.attrs import ORTH, SHAPE, POS, DEP
|
2015-01-30 05:43:55 +00:00
|
|
|
|
2018-07-24 21:38:44 +00:00
|
|
|
from ..util import get_doc
|
2015-01-30 05:43:55 +00:00
|
|
|
|
|
|
|
|
2017-01-11 18:00:52 +00:00
|
|
|
def test_doc_array_attr_of_token(en_tokenizer, en_vocab):
|
2017-01-11 17:54:46 +00:00
|
|
|
text = "An example sentence"
|
|
|
|
tokens = en_tokenizer(text)
|
|
|
|
example = tokens.vocab["example"]
|
2015-01-30 05:43:55 +00:00
|
|
|
assert example.orth != example.shape
|
2017-01-11 17:54:46 +00:00
|
|
|
feats_array = tokens.to_array((ORTH, SHAPE))
|
|
|
|
assert feats_array[0][0] != feats_array[0][1]
|
2015-01-30 05:43:55 +00:00
|
|
|
assert feats_array[0][0] != feats_array[0][1]
|
|
|
|
|
|
|
|
|
2017-10-18 10:22:17 +00:00
|
|
|
def test_doc_stringy_array_attr_of_token(en_tokenizer, en_vocab):
|
|
|
|
text = "An example sentence"
|
|
|
|
tokens = en_tokenizer(text)
|
|
|
|
example = tokens.vocab["example"]
|
|
|
|
assert example.orth != example.shape
|
|
|
|
feats_array = tokens.to_array((ORTH, SHAPE))
|
|
|
|
feats_array_stringy = tokens.to_array(("ORTH", "SHAPE"))
|
|
|
|
assert feats_array_stringy[0][0] == feats_array[0][0]
|
|
|
|
assert feats_array_stringy[0][1] == feats_array[0][1]
|
|
|
|
|
|
|
|
|
|
|
|
def test_doc_scalar_attr_of_token(en_tokenizer, en_vocab):
|
|
|
|
text = "An example sentence"
|
|
|
|
tokens = en_tokenizer(text)
|
|
|
|
example = tokens.vocab["example"]
|
|
|
|
assert example.orth != example.shape
|
|
|
|
feats_array = tokens.to_array(ORTH)
|
|
|
|
assert feats_array.shape == (3,)
|
|
|
|
|
|
|
|
|
2017-01-11 18:00:52 +00:00
|
|
|
def test_doc_array_tag(en_tokenizer):
|
2017-01-11 17:54:46 +00:00
|
|
|
text = "A nice sentence."
|
2017-01-12 10:18:36 +00:00
|
|
|
pos = ['DET', 'ADJ', 'NOUN', 'PUNCT']
|
2017-01-11 17:54:46 +00:00
|
|
|
tokens = en_tokenizer(text)
|
2018-07-24 21:38:44 +00:00
|
|
|
doc = get_doc(tokens.vocab, words=[t.text for t in tokens], pos=pos)
|
2017-01-11 17:54:46 +00:00
|
|
|
assert doc[0].pos != doc[1].pos != doc[2].pos != doc[3].pos
|
|
|
|
feats_array = doc.to_array((ORTH, POS))
|
|
|
|
assert feats_array[0][1] == doc[0].pos
|
|
|
|
assert feats_array[1][1] == doc[1].pos
|
|
|
|
assert feats_array[2][1] == doc[2].pos
|
|
|
|
assert feats_array[3][1] == doc[3].pos
|
|
|
|
|
|
|
|
|
2017-01-11 18:00:52 +00:00
|
|
|
def test_doc_array_dep(en_tokenizer):
|
2017-01-11 17:54:46 +00:00
|
|
|
text = "A nice sentence."
|
|
|
|
deps = ['det', 'amod', 'ROOT', 'punct']
|
|
|
|
tokens = en_tokenizer(text)
|
2018-07-24 21:38:44 +00:00
|
|
|
doc = get_doc(tokens.vocab, words=[t.text for t in tokens], deps=deps)
|
2017-01-11 17:54:46 +00:00
|
|
|
feats_array = doc.to_array((ORTH, DEP))
|
|
|
|
assert feats_array[0][1] == doc[0].dep
|
|
|
|
assert feats_array[1][1] == doc[1].dep
|
|
|
|
assert feats_array[2][1] == doc[2].dep
|
|
|
|
assert feats_array[3][1] == doc[3].dep
|