2015-02-07 18:14:07 +00:00
import pytest
2017-01-11 17:05:36 +00:00
import numpy
2019-03-10 14:24:34 +00:00
from spacy . tokens import Doc , Span
2018-07-24 21:38:44 +00:00
from spacy . vocab import Vocab
2020-08-10 14:43:52 +00:00
from spacy . lexeme import Lexeme
2020-07-22 11:42:59 +00:00
from spacy . lang . en import English
2020-07-14 12:07:35 +00:00
from spacy . attrs import ENT_TYPE , ENT_IOB , SENT_START , HEAD , DEP , MORPH
2018-07-24 21:38:44 +00:00
from . . util import get_doc
2017-01-11 17:05:36 +00:00
2015-02-07 18:14:07 +00:00
2018-11-27 00:09:36 +00:00
@pytest.mark.parametrize ( " text " , [ [ " one " , " two " , " three " ] ] )
2017-01-11 17:05:36 +00:00
def test_doc_api_compare_by_string_position ( en_vocab , text ) :
2018-07-24 21:38:44 +00:00
doc = Doc ( en_vocab , words = text )
2017-01-09 18:12:00 +00:00
# Get the tokens in this order, so their ID ordering doesn't match the idx
2017-01-11 17:05:36 +00:00
token3 = doc [ - 1 ]
token2 = doc [ - 2 ]
token1 = doc [ - 1 ]
token1 , token2 , token3 = doc
assert token1 < token2 < token3
assert not token1 > token2
assert token2 > token1
assert token2 < = token3
assert token3 > = token1
def test_doc_api_getitem ( en_tokenizer ) :
text = " Give it back! He pleaded. "
tokens = en_tokenizer ( text )
2018-11-27 00:09:36 +00:00
assert tokens [ 0 ] . text == " Give "
assert tokens [ - 1 ] . text == " . "
2015-02-07 18:14:07 +00:00
with pytest . raises ( IndexError ) :
tokens [ len ( tokens ) ]
2015-07-13 16:39:38 +00:00
2015-10-06 08:59:11 +00:00
def to_str ( span ) :
2018-11-27 00:09:36 +00:00
return " / " . join ( token . text for token in span )
2015-10-06 08:59:11 +00:00
2015-10-06 07:51:25 +00:00
span = tokens [ 1 : 1 ]
2015-10-06 08:59:11 +00:00
assert not to_str ( span )
2015-10-06 07:51:25 +00:00
span = tokens [ 1 : 4 ]
2018-11-27 00:09:36 +00:00
assert to_str ( span ) == " it/back/! "
2015-10-06 07:56:33 +00:00
span = tokens [ 1 : 4 : 1 ]
2018-11-27 00:09:36 +00:00
assert to_str ( span ) == " it/back/! "
2015-10-06 07:51:25 +00:00
with pytest . raises ( ValueError ) :
tokens [ 1 : 4 : 2 ]
with pytest . raises ( ValueError ) :
tokens [ 1 : 4 : - 1 ]
2015-10-06 08:59:11 +00:00
span = tokens [ - 3 : 6 ]
2018-11-27 00:09:36 +00:00
assert to_str ( span ) == " He/pleaded "
2015-10-06 08:59:11 +00:00
span = tokens [ 4 : - 1 ]
2018-11-27 00:09:36 +00:00
assert to_str ( span ) == " He/pleaded "
2015-10-06 08:59:11 +00:00
span = tokens [ - 5 : - 3 ]
2018-11-27 00:09:36 +00:00
assert to_str ( span ) == " back/! "
2015-10-06 08:59:11 +00:00
span = tokens [ 5 : 4 ]
assert span . start == span . end == 5 and not to_str ( span )
span = tokens [ 4 : - 3 ]
assert span . start == span . end == 4 and not to_str ( span )
span = tokens [ : ]
2018-11-27 00:09:36 +00:00
assert to_str ( span ) == " Give/it/back/!/He/pleaded/. "
2015-10-06 08:59:11 +00:00
span = tokens [ 4 : ]
2018-11-27 00:09:36 +00:00
assert to_str ( span ) == " He/pleaded/. "
2015-10-06 08:59:11 +00:00
span = tokens [ : 4 ]
2018-11-27 00:09:36 +00:00
assert to_str ( span ) == " Give/it/back/! "
2015-10-06 08:59:11 +00:00
span = tokens [ : - 3 ]
2018-11-27 00:09:36 +00:00
assert to_str ( span ) == " Give/it/back/! "
2015-10-06 08:59:11 +00:00
span = tokens [ - 3 : ]
2018-11-27 00:09:36 +00:00
assert to_str ( span ) == " He/pleaded/. "
2015-10-06 08:59:11 +00:00
span = tokens [ 4 : 50 ]
2018-11-27 00:09:36 +00:00
assert to_str ( span ) == " He/pleaded/. "
2015-10-06 08:59:11 +00:00
span = tokens [ - 50 : 4 ]
2018-11-27 00:09:36 +00:00
assert to_str ( span ) == " Give/it/back/! "
2015-10-06 08:59:11 +00:00
span = tokens [ - 50 : - 40 ]
assert span . start == span . end == 0 and not to_str ( span )
span = tokens [ 40 : 50 ]
assert span . start == span . end == 7 and not to_str ( span )
2015-10-06 09:08:39 +00:00
span = tokens [ 1 : 4 ]
2018-11-27 00:09:36 +00:00
assert span [ 0 ] . orth_ == " it "
2015-10-06 09:45:49 +00:00
subspan = span [ : ]
2018-11-27 00:09:36 +00:00
assert to_str ( subspan ) == " it/back/! "
2015-10-06 09:45:49 +00:00
subspan = span [ : 2 ]
2018-11-27 00:09:36 +00:00
assert to_str ( subspan ) == " it/back "
2015-10-06 09:45:49 +00:00
subspan = span [ 1 : ]
2018-11-27 00:09:36 +00:00
assert to_str ( subspan ) == " back/! "
2015-10-06 09:45:49 +00:00
subspan = span [ : - 1 ]
2018-11-27 00:09:36 +00:00
assert to_str ( subspan ) == " it/back "
2015-10-06 09:45:49 +00:00
subspan = span [ - 2 : ]
2018-11-27 00:09:36 +00:00
assert to_str ( subspan ) == " back/! "
2015-10-06 09:45:49 +00:00
subspan = span [ 1 : 2 ]
2018-11-27 00:09:36 +00:00
assert to_str ( subspan ) == " back "
2015-10-06 09:45:49 +00:00
subspan = span [ - 2 : - 1 ]
2018-11-27 00:09:36 +00:00
assert to_str ( subspan ) == " back "
2015-10-06 09:45:49 +00:00
subspan = span [ - 50 : 50 ]
2018-11-27 00:09:36 +00:00
assert to_str ( subspan ) == " it/back/! "
2015-10-06 09:45:49 +00:00
subspan = span [ 50 : - 50 ]
assert subspan . start == subspan . end == 4 and not to_str ( subspan )
2015-10-06 09:08:39 +00:00
2015-07-13 16:39:38 +00:00
2018-11-27 00:09:36 +00:00
@pytest.mark.parametrize (
" text " , [ " Give it back! He pleaded. " , " Give it back! He pleaded. " ]
)
2017-01-11 17:05:36 +00:00
def test_doc_api_serialize ( en_tokenizer , text ) :
tokens = en_tokenizer ( text )
2020-07-02 15:11:57 +00:00
tokens [ 0 ] . lemma_ = " lemma "
tokens [ 0 ] . norm_ = " norm "
tokens [ 0 ] . ent_kb_id_ = " ent_kb_id "
2018-07-24 21:38:44 +00:00
new_tokens = Doc ( tokens . vocab ) . from_bytes ( tokens . to_bytes ( ) )
2017-05-30 21:34:23 +00:00
assert tokens . text == new_tokens . text
2017-01-11 17:05:36 +00:00
assert [ t . text for t in tokens ] == [ t . text for t in new_tokens ]
2015-07-13 16:39:38 +00:00
assert [ t . orth for t in tokens ] == [ t . orth for t in new_tokens ]
2020-07-02 15:11:57 +00:00
assert new_tokens [ 0 ] . lemma_ == " lemma "
assert new_tokens [ 0 ] . norm_ == " norm "
assert new_tokens [ 0 ] . ent_kb_id_ == " ent_kb_id "
2015-08-05 22:35:40 +00:00
2018-07-24 21:38:44 +00:00
new_tokens = Doc ( tokens . vocab ) . from_bytes (
2019-03-10 18:16:45 +00:00
tokens . to_bytes ( exclude = [ " tensor " ] ) , exclude = [ " tensor " ]
2018-11-27 00:09:36 +00:00
)
2018-05-01 11:40:22 +00:00
assert tokens . text == new_tokens . text
assert [ t . text for t in tokens ] == [ t . text for t in new_tokens ]
assert [ t . orth for t in tokens ] == [ t . orth for t in new_tokens ]
2018-07-24 21:38:44 +00:00
new_tokens = Doc ( tokens . vocab ) . from_bytes (
2019-03-10 18:16:45 +00:00
tokens . to_bytes ( exclude = [ " sentiment " ] ) , exclude = [ " sentiment " ]
2018-11-27 00:09:36 +00:00
)
2018-05-01 11:40:22 +00:00
assert tokens . text == new_tokens . text
assert [ t . text for t in tokens ] == [ t . text for t in new_tokens ]
assert [ t . orth for t in tokens ] == [ t . orth for t in new_tokens ]
2015-08-05 22:35:40 +00:00
2017-01-11 17:05:36 +00:00
def test_doc_api_set_ents ( en_tokenizer ) :
text = " I use goggle chrone to surf the web "
tokens = en_tokenizer ( text )
2015-08-05 22:35:40 +00:00
assert len ( tokens . ents ) == 0
2018-11-27 00:09:36 +00:00
tokens . ents = [ ( tokens . vocab . strings [ " PRODUCT " ] , 2 , 4 ) ]
2015-08-05 22:35:40 +00:00
assert len ( list ( tokens . ents ) ) == 1
2016-10-26 15:22:03 +00:00
assert [ t . ent_iob for t in tokens ] == [ 0 , 0 , 3 , 1 , 0 , 0 , 0 , 0 ]
2018-11-27 00:09:36 +00:00
assert tokens . ents [ 0 ] . label_ == " PRODUCT "
2017-01-11 17:05:36 +00:00
assert tokens . ents [ 0 ] . start == 2
assert tokens . ents [ 0 ] . end == 4
2015-10-18 06:17:27 +00:00
2017-01-11 17:05:36 +00:00
def test_doc_api_sents_empty_string ( en_tokenizer ) :
doc = en_tokenizer ( " " )
2016-09-27 17:21:22 +00:00
doc . is_parsed = True
2016-09-27 16:49:14 +00:00
sents = list ( doc . sents )
assert len ( sents ) == 0
2017-01-11 17:05:36 +00:00
def test_doc_api_runtime_error ( en_tokenizer ) :
2016-01-25 14:22:42 +00:00
# Example that caused run-time error while parsing Reddit
2018-11-27 00:09:36 +00:00
# fmt: off
2017-01-11 17:05:36 +00:00
text = " 67 % o f black households are single parent \n \n 72 % o f all black babies born out of wedlock \n \n 50 % o f all black kids don \u2019 t finish high school "
2020-03-02 10:49:28 +00:00
deps = [ " nummod " , " nsubj " , " prep " , " amod " , " pobj " , " ROOT " , " amod " , " attr " , " " , " nummod " , " appos " , " prep " , " det " ,
" amod " , " pobj " , " acl " , " prep " , " prep " , " pobj " ,
" " , " nummod " , " nsubj " , " prep " , " det " , " amod " , " pobj " , " aux " , " neg " , " ccomp " , " amod " , " dobj " ]
2018-11-27 00:09:36 +00:00
# fmt: on
2017-01-11 17:05:36 +00:00
tokens = en_tokenizer ( text )
2018-07-24 21:38:44 +00:00
doc = get_doc ( tokens . vocab , words = [ t . text for t in tokens ] , deps = deps )
2016-01-25 14:22:42 +00:00
nps = [ ]
for np in doc . noun_chunks :
2018-11-27 00:09:36 +00:00
while len ( np ) > 1 and np [ 0 ] . dep_ not in ( " advmod " , " amod " , " compound " ) :
2016-01-25 14:22:42 +00:00
np = np [ 1 : ]
if len ( np ) > 1 :
2019-02-15 09:29:44 +00:00
nps . append ( np )
with doc . retokenize ( ) as retokenizer :
for np in nps :
attrs = {
" tag " : np . root . tag_ ,
" lemma " : np . text ,
" ent_type " : np . root . ent_type_ ,
}
retokenizer . merge ( np , attrs = attrs )
2016-02-06 22:47:51 +00:00
2017-01-11 17:05:36 +00:00
def test_doc_api_right_edge ( en_tokenizer ) :
2017-01-14 12:41:19 +00:00
""" Test for bug occurring from Unshift action, causing incorrect right edge """
2018-11-27 00:09:36 +00:00
# fmt: off
2017-01-11 17:05:36 +00:00
text = " I have proposed to myself, for the sake of such as live under the government of the Romans, to translate those books into the Greek tongue. "
heads = [ 2 , 1 , 0 , - 1 , - 1 , - 3 , 15 , 1 , - 2 , - 1 , 1 , - 3 , - 1 , - 1 , 1 , - 2 , - 1 , 1 ,
- 2 , - 7 , 1 , - 19 , 1 , - 2 , - 3 , 2 , 1 , - 3 , - 26 ]
2018-11-27 00:09:36 +00:00
# fmt: on
2017-01-11 17:05:36 +00:00
tokens = en_tokenizer ( text )
2018-07-24 21:38:44 +00:00
doc = get_doc ( tokens . vocab , words = [ t . text for t in tokens ] , heads = heads )
2018-11-27 00:09:36 +00:00
assert doc [ 6 ] . text == " for "
2017-01-11 17:05:36 +00:00
subtree = [ w . text for w in doc [ 6 ] . subtree ]
2020-07-04 14:25:34 +00:00
# fmt: off
assert subtree == [ " for " , " the " , " sake " , " of " , " such " , " as " , " live " , " under " , " the " , " government " , " of " , " the " , " Romans " , " , " ]
# fmt: on
2018-11-27 00:09:36 +00:00
assert doc [ 6 ] . right_edge . text == " , "
2017-01-11 17:05:36 +00:00
2017-10-24 15:05:15 +00:00
def test_doc_api_has_vector ( ) :
vocab = Vocab ( )
2017-10-31 17:25:08 +00:00
vocab . reset_vectors ( width = 2 )
2018-11-27 00:09:36 +00:00
vocab . set_vector ( " kitten " , vector = numpy . asarray ( [ 0.0 , 2.0 ] , dtype = " f " ) )
doc = Doc ( vocab , words = [ " kitten " ] )
2016-05-09 10:36:14 +00:00
assert doc . has_vector
2016-10-16 18:20:23 +00:00
2018-01-15 15:29:48 +00:00
def test_doc_api_similarity_match ( ) :
2018-11-27 00:09:36 +00:00
doc = Doc ( Vocab ( ) , words = [ " a " ] )
2019-02-10 13:02:19 +00:00
assert doc . similarity ( doc [ 0 ] ) == 1.0
assert doc . similarity ( doc . vocab [ " a " ] ) == 1.0
2018-11-27 00:09:36 +00:00
doc2 = Doc ( doc . vocab , words = [ " a " , " b " , " c " ] )
2020-04-28 11:37:37 +00:00
with pytest . warns ( UserWarning ) :
2018-05-20 23:22:38 +00:00
assert doc . similarity ( doc2 [ : 1 ] ) == 1.0
assert doc . similarity ( doc2 ) == 0.0
2018-01-15 15:29:48 +00:00
2019-02-07 19:54:07 +00:00
@pytest.mark.parametrize (
" sentence,heads,lca_matrix " ,
[
(
" the lazy dog slept " ,
[ 2 , 1 , 1 , 0 ] ,
numpy . array ( [ [ 0 , 2 , 2 , 3 ] , [ 2 , 1 , 2 , 3 ] , [ 2 , 2 , 2 , 3 ] , [ 3 , 3 , 3 , 3 ] ] ) ,
) ,
(
" The lazy dog slept. The quick fox jumped " ,
[ 2 , 1 , 1 , 0 , - 1 , 2 , 1 , 1 , 0 ] ,
numpy . array (
[
[ 0 , 2 , 2 , 3 , 3 , - 1 , - 1 , - 1 , - 1 ] ,
[ 2 , 1 , 2 , 3 , 3 , - 1 , - 1 , - 1 , - 1 ] ,
[ 2 , 2 , 2 , 3 , 3 , - 1 , - 1 , - 1 , - 1 ] ,
[ 3 , 3 , 3 , 3 , 3 , - 1 , - 1 , - 1 , - 1 ] ,
[ 3 , 3 , 3 , 3 , 4 , - 1 , - 1 , - 1 , - 1 ] ,
[ - 1 , - 1 , - 1 , - 1 , - 1 , 5 , 7 , 7 , 8 ] ,
[ - 1 , - 1 , - 1 , - 1 , - 1 , 7 , 6 , 7 , 8 ] ,
[ - 1 , - 1 , - 1 , - 1 , - 1 , 7 , 7 , 7 , 8 ] ,
[ - 1 , - 1 , - 1 , - 1 , - 1 , 8 , 8 , 8 , 8 ] ,
]
) ,
) ,
] ,
)
2019-01-06 18:07:50 +00:00
def test_lowest_common_ancestor ( en_tokenizer , sentence , heads , lca_matrix ) :
tokens = en_tokenizer ( sentence )
doc = get_doc ( tokens . vocab , [ t . text for t in tokens ] , heads = heads )
2017-10-20 18:28:00 +00:00
lca = doc . get_lca_matrix ( )
2019-01-06 18:07:50 +00:00
assert ( lca == lca_matrix ) . all ( )
2018-11-27 00:09:36 +00:00
assert lca [ 1 , 1 ] == 1
assert lca [ 0 , 1 ] == 2
assert lca [ 1 , 2 ] == 2
2019-03-10 14:24:34 +00:00
def test_doc_is_nered ( en_vocab ) :
words = [ " I " , " live " , " in " , " New " , " York " ]
doc = Doc ( en_vocab , words = words )
assert not doc . is_nered
doc . ents = [ Span ( doc , 3 , 5 , label = " GPE " ) ]
assert doc . is_nered
# Test creating doc from array with unknown values
arr = numpy . array ( [ [ 0 , 0 ] , [ 0 , 0 ] , [ 0 , 0 ] , [ 384 , 3 ] , [ 384 , 1 ] ] , dtype = " uint64 " )
doc = Doc ( en_vocab , words = words ) . from_array ( [ ENT_TYPE , ENT_IOB ] , arr )
assert doc . is_nered
# Test serialization
new_doc = Doc ( en_vocab ) . from_bytes ( doc . to_bytes ( ) )
assert new_doc . is_nered
2019-03-11 13:21:40 +00:00
2020-02-16 16:17:09 +00:00
def test_doc_from_array_sent_starts ( en_vocab ) :
words = [ " I " , " live " , " in " , " New " , " York " , " . " , " I " , " like " , " cats " , " . " ]
2020-09-16 18:32:38 +00:00
heads = [ 0 , - 1 , - 2 , - 3 , - 4 , - 5 , 0 , - 1 , - 2 , - 3 ]
2020-03-02 10:49:28 +00:00
# fmt: off
2020-09-16 18:32:38 +00:00
deps = [ " ROOT " , " dep " , " dep " , " dep " , " dep " , " dep " , " ROOT " , " dep " , " dep " , " dep " ]
2020-03-02 10:49:28 +00:00
# fmt: on
2020-09-16 18:32:38 +00:00
doc = get_doc ( en_vocab , words = words , heads = heads , deps = deps )
2020-02-16 16:17:09 +00:00
attrs = [ SENT_START , HEAD ]
arr = doc . to_array ( attrs )
new_doc = Doc ( en_vocab , words = words )
with pytest . raises ( ValueError ) :
new_doc . from_array ( attrs , arr )
attrs = [ SENT_START , DEP ]
arr = doc . to_array ( attrs )
new_doc = Doc ( en_vocab , words = words )
new_doc . from_array ( attrs , arr )
assert [ t . is_sent_start for t in doc ] == [ t . is_sent_start for t in new_doc ]
assert not new_doc . is_parsed
attrs = [ HEAD , DEP ]
arr = doc . to_array ( attrs )
new_doc = Doc ( en_vocab , words = words )
new_doc . from_array ( attrs , arr )
assert [ t . is_sent_start for t in doc ] == [ t . is_sent_start for t in new_doc ]
assert new_doc . is_parsed
2020-07-14 12:07:35 +00:00
def test_doc_from_array_morph ( en_vocab ) :
words = [ " I " , " live " , " in " , " New " , " York " , " . " ]
# fmt: off
morphs = [ " Feat1=A " , " Feat1=B " , " Feat1=C " , " Feat1=A|Feat2=D " , " Feat2=E " , " Feat3=F " ]
# fmt: on
doc = Doc ( en_vocab , words = words )
for i , morph in enumerate ( morphs ) :
doc [ i ] . morph_ = morph
attrs = [ MORPH ]
arr = doc . to_array ( attrs )
new_doc = Doc ( en_vocab , words = words )
new_doc . from_array ( attrs , arr )
assert [ t . morph_ for t in new_doc ] == morphs
assert [ t . morph_ for t in doc ] == [ t . morph_ for t in new_doc ]
2020-07-03 09:32:42 +00:00
def test_doc_api_from_docs ( en_tokenizer , de_tokenizer ) :
2020-09-03 08:09:03 +00:00
en_texts = [ " Merging the docs is fun. " , " " , " They don ' t think alike. " ]
en_texts_without_empty = [ t for t in en_texts if len ( t ) ]
2020-07-03 09:32:42 +00:00
de_text = " Wie war die Frage? "
en_docs = [ en_tokenizer ( text ) for text in en_texts ]
2020-07-04 14:25:34 +00:00
docs_idx = en_texts [ 0 ] . index ( " docs " )
2020-07-03 09:32:42 +00:00
de_doc = de_tokenizer ( de_text )
2020-07-04 14:25:34 +00:00
en_docs [ 0 ] . user_data [ ( " ._. " , " is_ambiguous " , docs_idx , None ) ] = (
True ,
None ,
None ,
None ,
)
2020-07-03 09:32:42 +00:00
assert Doc . from_docs ( [ ] ) is None
assert de_doc is not Doc . from_docs ( [ de_doc ] )
assert str ( de_doc ) == str ( Doc . from_docs ( [ de_doc ] ) )
with pytest . raises ( ValueError ) :
Doc . from_docs ( en_docs + [ de_doc ] )
m_doc = Doc . from_docs ( en_docs )
2020-09-03 08:09:03 +00:00
assert len ( en_texts_without_empty ) == len ( list ( m_doc . sents ) )
2020-07-03 09:32:42 +00:00
assert len ( str ( m_doc ) ) > len ( en_texts [ 0 ] ) + len ( en_texts [ 1 ] )
2020-09-03 08:09:03 +00:00
assert str ( m_doc ) == " " . join ( en_texts_without_empty )
2020-07-04 14:25:34 +00:00
p_token = m_doc [ len ( en_docs [ 0 ] ) - 1 ]
2020-07-03 09:32:42 +00:00
assert p_token . text == " . " and bool ( p_token . whitespace_ )
en_docs_tokens = [ t for doc in en_docs for t in doc ]
assert len ( m_doc ) == len ( en_docs_tokens )
2020-09-03 08:09:03 +00:00
think_idx = len ( en_texts [ 0 ] ) + 1 + en_texts [ 2 ] . index ( " think " )
2020-07-03 09:32:42 +00:00
assert m_doc [ 9 ] . idx == think_idx
with pytest . raises ( AttributeError ) :
2020-07-04 14:25:34 +00:00
# not callable, because it was not set via set_extension
m_doc [ 2 ] . _ . is_ambiguous
assert len ( m_doc . user_data ) == len ( en_docs [ 0 ] . user_data ) # but it's there
2020-07-03 09:32:42 +00:00
m_doc = Doc . from_docs ( en_docs , ensure_whitespace = False )
2020-09-03 08:09:03 +00:00
assert len ( en_texts_without_empty ) == len ( list ( m_doc . sents ) )
assert len ( str ( m_doc ) ) == sum ( len ( t ) for t in en_texts )
2020-07-03 09:32:42 +00:00
assert str ( m_doc ) == " " . join ( en_texts )
p_token = m_doc [ len ( en_docs [ 0 ] ) - 1 ]
assert p_token . text == " . " and not bool ( p_token . whitespace_ )
en_docs_tokens = [ t for doc in en_docs for t in doc ]
assert len ( m_doc ) == len ( en_docs_tokens )
2020-09-03 08:09:03 +00:00
think_idx = len ( en_texts [ 0 ] ) + 0 + en_texts [ 2 ] . index ( " think " )
2020-07-03 09:32:42 +00:00
assert m_doc [ 9 ] . idx == think_idx
2020-07-04 14:25:34 +00:00
m_doc = Doc . from_docs ( en_docs , attrs = [ " lemma " , " length " , " pos " ] )
with pytest . raises ( ValueError ) :
# important attributes from sentenziser or parser are missing
2020-07-03 09:32:42 +00:00
assert list ( m_doc . sents )
assert len ( str ( m_doc ) ) > len ( en_texts [ 0 ] ) + len ( en_texts [ 1 ] )
2020-07-04 14:25:34 +00:00
# space delimiter considered, although spacy attribute was missing
2020-09-03 08:09:03 +00:00
assert str ( m_doc ) == " " . join ( en_texts_without_empty )
2020-07-03 09:32:42 +00:00
p_token = m_doc [ len ( en_docs [ 0 ] ) - 1 ]
assert p_token . text == " . " and bool ( p_token . whitespace_ )
en_docs_tokens = [ t for doc in en_docs for t in doc ]
assert len ( m_doc ) == len ( en_docs_tokens )
2020-09-03 08:09:03 +00:00
think_idx = len ( en_texts [ 0 ] ) + 1 + en_texts [ 2 ] . index ( " think " )
2020-07-03 09:32:42 +00:00
assert m_doc [ 9 ] . idx == think_idx
2019-03-11 13:21:40 +00:00
def test_doc_lang ( en_vocab ) :
doc = Doc ( en_vocab , words = [ " Hello " , " world " ] )
assert doc . lang_ == " en "
assert doc . lang == en_vocab . strings [ " en " ]
2020-07-22 11:42:59 +00:00
assert doc [ 0 ] . lang_ == " en "
assert doc [ 0 ] . lang == en_vocab . strings [ " en " ]
nlp = English ( )
doc = nlp ( " Hello world " )
assert doc . lang_ == " en "
assert doc . lang == en_vocab . strings [ " en " ]
assert doc [ 0 ] . lang_ == " en "
assert doc [ 0 ] . lang == en_vocab . strings [ " en " ]
2020-08-10 14:43:52 +00:00
def test_token_lexeme ( en_vocab ) :
""" Test that tokens expose their lexeme. """
token = Doc ( en_vocab , words = [ " Hello " , " world " ] ) [ 0 ]
assert isinstance ( token . lex , Lexeme )
assert token . lex . text == token . text
assert en_vocab [ token . orth ] == token . lex