2016-04-13 13:28:28 +00:00
|
|
|
from __future__ import unicode_literals
|
|
|
|
|
2015-04-28 18:46:29 +00:00
|
|
|
import pytest
|
|
|
|
|
|
|
|
|
2015-07-23 07:26:43 +00:00
|
|
|
@pytest.mark.models
|
2015-06-07 16:02:24 +00:00
|
|
|
def test_root(EN):
|
|
|
|
tokens = EN(u"i don't have other assistance")
|
2015-04-28 18:46:29 +00:00
|
|
|
for t in tokens:
|
|
|
|
assert t.dep != 0, t.orth_
|
2016-04-13 13:28:28 +00:00
|
|
|
|
|
|
|
|
|
|
|
@pytest.mark.models
|
|
|
|
def test_one_word_sentence(EN):
|
|
|
|
# one word sentence
|
|
|
|
doc = EN.tokenizer.tokens_from_list(['Hello'])
|
|
|
|
EN.tagger(doc)
|
|
|
|
assert len(doc) == 1
|
|
|
|
with EN.parser.step_through(doc) as _:
|
|
|
|
pass
|
|
|
|
assert doc[0].dep != 0
|