spaCy/spacy/tests/tokenizer/test_infix.py

50 lines
1.3 KiB
Python
Raw Normal View History

2014-10-14 09:26:16 +00:00
from __future__ import unicode_literals
import pytest
def test_hyphen(en_tokenizer):
tokens = en_tokenizer('best-known')
assert len(tokens) == 3
2014-10-14 09:26:16 +00:00
def test_numeric_range(en_tokenizer):
tokens = en_tokenizer('0.1-13.5')
assert len(tokens) == 3
def test_period(en_tokenizer):
tokens = en_tokenizer('best.Known')
2014-10-14 09:26:16 +00:00
assert len(tokens) == 3
tokens = en_tokenizer('zombo.com')
2014-10-14 09:26:16 +00:00
assert len(tokens) == 1
2015-07-26 15:30:34 +00:00
def test_ellipsis(en_tokenizer):
tokens = en_tokenizer('best...Known')
assert len(tokens) == 3
tokens = en_tokenizer('best...known')
assert len(tokens) == 3
def test_email(en_tokenizer):
tokens = en_tokenizer('hello@example.com')
2015-07-26 15:32:55 +00:00
assert len(tokens) == 1
2015-07-26 15:30:34 +00:00
tokens = en_tokenizer('hi+there@gmail.it')
2015-07-26 15:32:55 +00:00
assert len(tokens) == 1
2015-07-26 15:30:34 +00:00
def test_double_hyphen(en_tokenizer):
tokens = en_tokenizer(u'No decent--let alone well-bred--people.')
assert tokens[0].text == u'No'
assert tokens[1].text == u'decent'
assert tokens[2].text == u'--'
assert tokens[3].text == u'let'
assert tokens[4].text == u'alone'
assert tokens[5].text == u'well'
assert tokens[6].text == u'-'
# TODO: This points to a deeper issue with the tokenizer: it doesn't re-enter
# on infixes.
assert tokens[7].text == u'bred'
assert tokens[8].text == u'--'
assert tokens[9].text == u'people'