2017-01-11 17:05:36 +00:00
# coding: utf-8
2015-02-07 18:14:07 +00:00
from __future__ import unicode_literals
import pytest
2017-01-11 17:05:36 +00:00
import numpy
2018-07-24 21:38:44 +00:00
from spacy . tokens import Doc
from spacy . vocab import Vocab
from spacy . attrs import LEMMA
from . . util import get_doc
2017-01-11 17:05:36 +00:00
2015-02-07 18:14:07 +00:00
2018-11-27 00:09:36 +00:00
@pytest.mark.parametrize ( " text " , [ [ " one " , " two " , " three " ] ] )
2017-01-11 17:05:36 +00:00
def test_doc_api_compare_by_string_position ( en_vocab , text ) :
2018-07-24 21:38:44 +00:00
doc = Doc ( en_vocab , words = text )
2017-01-09 18:12:00 +00:00
# Get the tokens in this order, so their ID ordering doesn't match the idx
2017-01-11 17:05:36 +00:00
token3 = doc [ - 1 ]
token2 = doc [ - 2 ]
token1 = doc [ - 1 ]
token1 , token2 , token3 = doc
assert token1 < token2 < token3
assert not token1 > token2
assert token2 > token1
assert token2 < = token3
assert token3 > = token1
def test_doc_api_getitem ( en_tokenizer ) :
text = " Give it back! He pleaded. "
tokens = en_tokenizer ( text )
2018-11-27 00:09:36 +00:00
assert tokens [ 0 ] . text == " Give "
assert tokens [ - 1 ] . text == " . "
2015-02-07 18:14:07 +00:00
with pytest . raises ( IndexError ) :
tokens [ len ( tokens ) ]
2015-07-13 16:39:38 +00:00
2015-10-06 08:59:11 +00:00
def to_str ( span ) :
2018-11-27 00:09:36 +00:00
return " / " . join ( token . text for token in span )
2015-10-06 08:59:11 +00:00
2015-10-06 07:51:25 +00:00
span = tokens [ 1 : 1 ]
2015-10-06 08:59:11 +00:00
assert not to_str ( span )
2015-10-06 07:51:25 +00:00
span = tokens [ 1 : 4 ]
2018-11-27 00:09:36 +00:00
assert to_str ( span ) == " it/back/! "
2015-10-06 07:56:33 +00:00
span = tokens [ 1 : 4 : 1 ]
2018-11-27 00:09:36 +00:00
assert to_str ( span ) == " it/back/! "
2015-10-06 07:51:25 +00:00
with pytest . raises ( ValueError ) :
tokens [ 1 : 4 : 2 ]
with pytest . raises ( ValueError ) :
tokens [ 1 : 4 : - 1 ]
2015-10-06 08:59:11 +00:00
span = tokens [ - 3 : 6 ]
2018-11-27 00:09:36 +00:00
assert to_str ( span ) == " He/pleaded "
2015-10-06 08:59:11 +00:00
span = tokens [ 4 : - 1 ]
2018-11-27 00:09:36 +00:00
assert to_str ( span ) == " He/pleaded "
2015-10-06 08:59:11 +00:00
span = tokens [ - 5 : - 3 ]
2018-11-27 00:09:36 +00:00
assert to_str ( span ) == " back/! "
2015-10-06 08:59:11 +00:00
span = tokens [ 5 : 4 ]
assert span . start == span . end == 5 and not to_str ( span )
span = tokens [ 4 : - 3 ]
assert span . start == span . end == 4 and not to_str ( span )
span = tokens [ : ]
2018-11-27 00:09:36 +00:00
assert to_str ( span ) == " Give/it/back/!/He/pleaded/. "
2015-10-06 08:59:11 +00:00
span = tokens [ 4 : ]
2018-11-27 00:09:36 +00:00
assert to_str ( span ) == " He/pleaded/. "
2015-10-06 08:59:11 +00:00
span = tokens [ : 4 ]
2018-11-27 00:09:36 +00:00
assert to_str ( span ) == " Give/it/back/! "
2015-10-06 08:59:11 +00:00
span = tokens [ : - 3 ]
2018-11-27 00:09:36 +00:00
assert to_str ( span ) == " Give/it/back/! "
2015-10-06 08:59:11 +00:00
span = tokens [ - 3 : ]
2018-11-27 00:09:36 +00:00
assert to_str ( span ) == " He/pleaded/. "
2015-10-06 08:59:11 +00:00
span = tokens [ 4 : 50 ]
2018-11-27 00:09:36 +00:00
assert to_str ( span ) == " He/pleaded/. "
2015-10-06 08:59:11 +00:00
span = tokens [ - 50 : 4 ]
2018-11-27 00:09:36 +00:00
assert to_str ( span ) == " Give/it/back/! "
2015-10-06 08:59:11 +00:00
span = tokens [ - 50 : - 40 ]
assert span . start == span . end == 0 and not to_str ( span )
span = tokens [ 40 : 50 ]
assert span . start == span . end == 7 and not to_str ( span )
2015-10-06 09:08:39 +00:00
span = tokens [ 1 : 4 ]
2018-11-27 00:09:36 +00:00
assert span [ 0 ] . orth_ == " it "
2015-10-06 09:45:49 +00:00
subspan = span [ : ]
2018-11-27 00:09:36 +00:00
assert to_str ( subspan ) == " it/back/! "
2015-10-06 09:45:49 +00:00
subspan = span [ : 2 ]
2018-11-27 00:09:36 +00:00
assert to_str ( subspan ) == " it/back "
2015-10-06 09:45:49 +00:00
subspan = span [ 1 : ]
2018-11-27 00:09:36 +00:00
assert to_str ( subspan ) == " back/! "
2015-10-06 09:45:49 +00:00
subspan = span [ : - 1 ]
2018-11-27 00:09:36 +00:00
assert to_str ( subspan ) == " it/back "
2015-10-06 09:45:49 +00:00
subspan = span [ - 2 : ]
2018-11-27 00:09:36 +00:00
assert to_str ( subspan ) == " back/! "
2015-10-06 09:45:49 +00:00
subspan = span [ 1 : 2 ]
2018-11-27 00:09:36 +00:00
assert to_str ( subspan ) == " back "
2015-10-06 09:45:49 +00:00
subspan = span [ - 2 : - 1 ]
2018-11-27 00:09:36 +00:00
assert to_str ( subspan ) == " back "
2015-10-06 09:45:49 +00:00
subspan = span [ - 50 : 50 ]
2018-11-27 00:09:36 +00:00
assert to_str ( subspan ) == " it/back/! "
2015-10-06 09:45:49 +00:00
subspan = span [ 50 : - 50 ]
assert subspan . start == subspan . end == 4 and not to_str ( subspan )
2015-10-06 09:08:39 +00:00
2015-07-13 16:39:38 +00:00
2018-11-27 00:09:36 +00:00
@pytest.mark.parametrize (
" text " , [ " Give it back! He pleaded. " , " Give it back! He pleaded. " ]
)
2017-01-11 17:05:36 +00:00
def test_doc_api_serialize ( en_tokenizer , text ) :
tokens = en_tokenizer ( text )
2018-07-24 21:38:44 +00:00
new_tokens = Doc ( tokens . vocab ) . from_bytes ( tokens . to_bytes ( ) )
2017-05-30 21:34:23 +00:00
assert tokens . text == new_tokens . text
2017-01-11 17:05:36 +00:00
assert [ t . text for t in tokens ] == [ t . text for t in new_tokens ]
2015-07-13 16:39:38 +00:00
assert [ t . orth for t in tokens ] == [ t . orth for t in new_tokens ]
2015-08-05 22:35:40 +00:00
2018-07-24 21:38:44 +00:00
new_tokens = Doc ( tokens . vocab ) . from_bytes (
2018-11-27 00:09:36 +00:00
tokens . to_bytes ( tensor = False ) , tensor = False
)
2018-05-01 11:40:22 +00:00
assert tokens . text == new_tokens . text
assert [ t . text for t in tokens ] == [ t . text for t in new_tokens ]
assert [ t . orth for t in tokens ] == [ t . orth for t in new_tokens ]
2018-07-24 21:38:44 +00:00
new_tokens = Doc ( tokens . vocab ) . from_bytes (
2018-11-27 00:09:36 +00:00
tokens . to_bytes ( sentiment = False ) , sentiment = False
)
2018-05-01 11:40:22 +00:00
assert tokens . text == new_tokens . text
assert [ t . text for t in tokens ] == [ t . text for t in new_tokens ]
assert [ t . orth for t in tokens ] == [ t . orth for t in new_tokens ]
2015-08-05 22:35:40 +00:00
2017-01-11 17:05:36 +00:00
def test_doc_api_set_ents ( en_tokenizer ) :
text = " I use goggle chrone to surf the web "
tokens = en_tokenizer ( text )
2015-08-05 22:35:40 +00:00
assert len ( tokens . ents ) == 0
2018-11-27 00:09:36 +00:00
tokens . ents = [ ( tokens . vocab . strings [ " PRODUCT " ] , 2 , 4 ) ]
2015-08-05 22:35:40 +00:00
assert len ( list ( tokens . ents ) ) == 1
2016-10-26 15:22:03 +00:00
assert [ t . ent_iob for t in tokens ] == [ 0 , 0 , 3 , 1 , 0 , 0 , 0 , 0 ]
2018-11-27 00:09:36 +00:00
assert tokens . ents [ 0 ] . label_ == " PRODUCT "
2017-01-11 17:05:36 +00:00
assert tokens . ents [ 0 ] . start == 2
assert tokens . ents [ 0 ] . end == 4
2015-10-18 06:17:27 +00:00
2017-01-11 17:05:36 +00:00
def test_doc_api_merge ( en_tokenizer ) :
text = " WKRO played songs by the beach boys all night "
2015-10-18 06:17:27 +00:00
# merge 'The Beach Boys'
2017-01-11 17:05:36 +00:00
doc = en_tokenizer ( text )
assert len ( doc ) == 9
2018-11-27 00:09:36 +00:00
doc . merge (
doc [ 4 ] . idx ,
doc [ 6 ] . idx + len ( doc [ 6 ] ) ,
tag = " NAMED " ,
lemma = " LEMMA " ,
ent_type = " TYPE " ,
)
2015-10-18 06:17:27 +00:00
assert len ( doc ) == 7
2018-11-27 00:09:36 +00:00
assert doc [ 4 ] . text == " the beach boys "
assert doc [ 4 ] . text_with_ws == " the beach boys "
assert doc [ 4 ] . tag_ == " NAMED "
2015-10-18 06:17:27 +00:00
2017-01-11 17:05:36 +00:00
# merge 'all night'
doc = en_tokenizer ( text )
2015-10-19 04:47:04 +00:00
assert len ( doc ) == 9
2018-11-27 00:09:36 +00:00
doc . merge (
doc [ 7 ] . idx ,
doc [ 8 ] . idx + len ( doc [ 8 ] ) ,
tag = " NAMED " ,
lemma = " LEMMA " ,
ent_type = " TYPE " ,
)
2015-10-19 04:47:04 +00:00
assert len ( doc ) == 8
2018-11-27 00:09:36 +00:00
assert doc [ 7 ] . text == " all night "
assert doc [ 7 ] . text_with_ws == " all night "
2015-10-19 04:47:04 +00:00
2017-01-11 17:05:36 +00:00
def test_doc_api_merge_children ( en_tokenizer ) :
2015-10-18 06:17:27 +00:00
""" Test that attachments work correctly after merging. """
2017-01-11 17:05:36 +00:00
text = " WKRO played songs by the beach boys all night "
doc = en_tokenizer ( text )
assert len ( doc ) == 9
2018-11-27 00:09:36 +00:00
doc . merge (
doc [ 4 ] . idx ,
doc [ 6 ] . idx + len ( doc [ 6 ] ) ,
tag = " NAMED " ,
lemma = " LEMMA " ,
ent_type = " TYPE " ,
)
2017-01-11 17:05:36 +00:00
2015-10-18 06:17:27 +00:00
for word in doc :
if word . i < word . head . i :
assert word in list ( word . head . lefts )
elif word . i > word . head . i :
assert word in list ( word . head . rights )
2016-01-16 17:00:26 +00:00
2017-01-11 17:05:36 +00:00
def test_doc_api_merge_hang ( en_tokenizer ) :
text = " through North and South Carolina "
doc = en_tokenizer ( text )
2018-11-27 00:09:36 +00:00
doc . merge ( 18 , 32 , tag = " " , lemma = " " , ent_type = " ORG " )
doc . merge ( 8 , 32 , tag = " " , lemma = " " , ent_type = " ORG " )
2016-01-25 14:22:42 +00:00
2018-05-20 13:15:37 +00:00
def test_doc_api_retokenizer ( en_tokenizer ) :
doc = en_tokenizer ( " WKRO played songs by the beach boys all night " )
with doc . retokenize ( ) as retokenizer :
retokenizer . merge ( doc [ 4 : 7 ] )
assert len ( doc ) == 7
2018-11-27 00:09:36 +00:00
assert doc [ 4 ] . text == " the beach boys "
2018-05-20 13:15:37 +00:00
def test_doc_api_retokenizer_attrs ( en_tokenizer ) :
doc = en_tokenizer ( " WKRO played songs by the beach boys all night " )
# test both string and integer attributes and values
2018-11-27 00:09:36 +00:00
attrs = { LEMMA : " boys " , " ENT_TYPE " : doc . vocab . strings [ " ORG " ] }
2018-05-20 13:15:37 +00:00
with doc . retokenize ( ) as retokenizer :
retokenizer . merge ( doc [ 4 : 7 ] , attrs = attrs )
assert len ( doc ) == 7
2018-11-27 00:09:36 +00:00
assert doc [ 4 ] . text == " the beach boys "
assert doc [ 4 ] . lemma_ == " boys "
assert doc [ 4 ] . ent_type_ == " ORG "
2018-05-20 13:15:37 +00:00
2018-07-24 21:38:44 +00:00
@pytest.mark.xfail
def test_doc_api_retokenizer_lex_attrs ( en_tokenizer ) :
""" Test that lexical attributes can be changed (see #2390). """
doc = en_tokenizer ( " WKRO played beach boys songs " )
assert not any ( token . is_stop for token in doc )
with doc . retokenize ( ) as retokenizer :
2018-11-27 00:09:36 +00:00
retokenizer . merge ( doc [ 2 : 4 ] , attrs = { " LEMMA " : " boys " , " IS_STOP " : True } )
assert doc [ 2 ] . text == " beach boys "
assert doc [ 2 ] . lemma_ == " boys "
2018-07-24 21:38:44 +00:00
assert doc [ 2 ] . is_stop
2018-11-27 00:09:36 +00:00
new_doc = Doc ( doc . vocab , words = [ " beach boys " ] )
2018-07-24 21:38:44 +00:00
assert new_doc [ 0 ] . is_stop
2017-01-11 17:05:36 +00:00
def test_doc_api_sents_empty_string ( en_tokenizer ) :
doc = en_tokenizer ( " " )
2016-09-27 17:21:22 +00:00
doc . is_parsed = True
2016-09-27 16:49:14 +00:00
sents = list ( doc . sents )
assert len ( sents ) == 0
2017-01-11 17:05:36 +00:00
def test_doc_api_runtime_error ( en_tokenizer ) :
2016-01-25 14:22:42 +00:00
# Example that caused run-time error while parsing Reddit
2018-11-27 00:09:36 +00:00
# fmt: off
2017-01-11 17:05:36 +00:00
text = " 67 % o f black households are single parent \n \n 72 % o f all black babies born out of wedlock \n \n 50 % o f all black kids don \u2019 t finish high school "
2018-11-27 00:09:36 +00:00
deps = [ " nsubj " , " prep " , " amod " , " pobj " , " ROOT " , " amod " , " attr " , " " ,
" nummod " , " prep " , " det " , " amod " , " pobj " , " acl " , " prep " , " prep " ,
" pobj " , " " , " nummod " , " prep " , " det " , " amod " , " pobj " , " aux " , " neg " ,
" ROOT " , " amod " , " dobj " ]
# fmt: on
2017-01-11 17:05:36 +00:00
tokens = en_tokenizer ( text )
2018-07-24 21:38:44 +00:00
doc = get_doc ( tokens . vocab , words = [ t . text for t in tokens ] , deps = deps )
2017-01-11 17:05:36 +00:00
2016-01-25 14:22:42 +00:00
nps = [ ]
for np in doc . noun_chunks :
2018-11-27 00:09:36 +00:00
while len ( np ) > 1 and np [ 0 ] . dep_ not in ( " advmod " , " amod " , " compound " ) :
2016-01-25 14:22:42 +00:00
np = np [ 1 : ]
if len ( np ) > 1 :
2018-11-27 00:09:36 +00:00
nps . append (
( np . start_char , np . end_char , np . root . tag_ , np . text , np . root . ent_type_ )
)
2016-01-25 14:22:42 +00:00
for np in nps :
2017-11-01 15:49:11 +00:00
start , end , tag , lemma , ent_type = np
doc . merge ( start , end , tag = tag , lemma = lemma , ent_type = ent_type )
2016-02-06 22:47:51 +00:00
2017-01-11 17:05:36 +00:00
def test_doc_api_right_edge ( en_tokenizer ) :
2017-01-14 12:41:19 +00:00
""" Test for bug occurring from Unshift action, causing incorrect right edge """
2018-11-27 00:09:36 +00:00
# fmt: off
2017-01-11 17:05:36 +00:00
text = " I have proposed to myself, for the sake of such as live under the government of the Romans, to translate those books into the Greek tongue. "
heads = [ 2 , 1 , 0 , - 1 , - 1 , - 3 , 15 , 1 , - 2 , - 1 , 1 , - 3 , - 1 , - 1 , 1 , - 2 , - 1 , 1 ,
- 2 , - 7 , 1 , - 19 , 1 , - 2 , - 3 , 2 , 1 , - 3 , - 26 ]
2018-11-27 00:09:36 +00:00
# fmt: on
2017-01-11 17:05:36 +00:00
tokens = en_tokenizer ( text )
2018-07-24 21:38:44 +00:00
doc = get_doc ( tokens . vocab , words = [ t . text for t in tokens ] , heads = heads )
2018-11-27 00:09:36 +00:00
assert doc [ 6 ] . text == " for "
2017-01-11 17:05:36 +00:00
subtree = [ w . text for w in doc [ 6 ] . subtree ]
2018-11-27 00:09:36 +00:00
assert subtree == [
" for " ,
" the " ,
" sake " ,
" of " ,
" such " ,
" as " ,
" live " ,
" under " ,
" the " ,
" government " ,
" of " ,
" the " ,
" Romans " ,
" , " ,
]
assert doc [ 6 ] . right_edge . text == " , "
2017-01-11 17:05:36 +00:00
2017-10-24 15:05:15 +00:00
def test_doc_api_has_vector ( ) :
vocab = Vocab ( )
2017-10-31 17:25:08 +00:00
vocab . reset_vectors ( width = 2 )
2018-11-27 00:09:36 +00:00
vocab . set_vector ( " kitten " , vector = numpy . asarray ( [ 0.0 , 2.0 ] , dtype = " f " ) )
doc = Doc ( vocab , words = [ " kitten " ] )
2016-05-09 10:36:14 +00:00
assert doc . has_vector
2016-10-16 18:20:23 +00:00
2018-01-15 15:29:48 +00:00
def test_doc_api_similarity_match ( ) :
2018-11-27 00:09:36 +00:00
doc = Doc ( Vocab ( ) , words = [ " a " ] )
2018-05-20 23:22:38 +00:00
with pytest . warns ( None ) :
assert doc . similarity ( doc [ 0 ] ) == 1.0
2018-11-27 00:09:36 +00:00
assert doc . similarity ( doc . vocab [ " a " ] ) == 1.0
doc2 = Doc ( doc . vocab , words = [ " a " , " b " , " c " ] )
2018-05-20 23:22:38 +00:00
with pytest . warns ( None ) :
assert doc . similarity ( doc2 [ : 1 ] ) == 1.0
assert doc . similarity ( doc2 ) == 0.0
2018-01-15 15:29:48 +00:00
2017-10-20 18:28:00 +00:00
def test_lowest_common_ancestor ( en_tokenizer ) :
2018-11-27 00:09:36 +00:00
tokens = en_tokenizer ( " the lazy dog slept " )
2018-07-24 21:38:44 +00:00
doc = get_doc ( tokens . vocab , words = [ t . text for t in tokens ] , heads = [ 2 , 1 , 1 , 0 ] )
2017-10-20 18:28:00 +00:00
lca = doc . get_lca_matrix ( )
2018-11-27 00:09:36 +00:00
assert lca [ 1 , 1 ] == 1
assert lca [ 0 , 1 ] == 2
assert lca [ 1 , 2 ] == 2
2016-10-16 18:20:23 +00:00
2018-01-15 15:29:48 +00:00
2017-05-13 10:32:45 +00:00
def test_parse_tree ( en_tokenizer ) :
""" Tests doc.print_tree() method. """
2018-11-27 00:09:36 +00:00
text = " I like New York in Autumn. "
2017-05-13 10:32:45 +00:00
heads = [ 1 , 0 , 1 , - 2 , - 3 , - 1 , - 5 ]
2018-11-27 00:09:36 +00:00
tags = [ " PRP " , " IN " , " NNP " , " NNP " , " IN " , " NNP " , " . " ]
2017-05-13 10:32:45 +00:00
tokens = en_tokenizer ( text )
2018-07-24 21:38:44 +00:00
doc = get_doc ( tokens . vocab , words = [ t . text for t in tokens ] , heads = heads , tags = tags )
2016-10-16 19:12:08 +00:00
# full method parse_tree(text) is a trivial composition
2016-12-30 17:19:18 +00:00
trees = doc . print_tree ( )
2016-10-16 18:20:23 +00:00
assert len ( trees ) > 0
tree = trees [ 0 ]
2018-11-27 00:09:36 +00:00
assert all (
k in list ( tree . keys ( ) )
for k in [ " word " , " lemma " , " NE " , " POS_fine " , " POS_coarse " , " arc " , " modifiers " ]
)
assert tree [ " word " ] == " like " # check root is correct