2014-09-16 16:01:46 +00:00
|
|
|
"""Test entries in the tokenization special-case interacting with prefix
|
|
|
|
and suffix punctuation."""
|
|
|
|
from __future__ import unicode_literals
|
|
|
|
import pytest
|
|
|
|
|
2014-12-23 00:40:32 +00:00
|
|
|
from spacy.en import English
|
2014-09-16 16:01:46 +00:00
|
|
|
|
2014-12-23 00:40:32 +00:00
|
|
|
@pytest.fixture
|
|
|
|
def EN():
|
2015-01-29 23:41:55 +00:00
|
|
|
return English().tokenizer
|
2014-09-16 16:01:46 +00:00
|
|
|
|
|
|
|
|
2014-12-23 00:40:32 +00:00
|
|
|
def test_no_special(EN):
|
|
|
|
assert len(EN("(can)")) == 3
|
2014-09-16 16:01:46 +00:00
|
|
|
|
2014-12-23 00:40:32 +00:00
|
|
|
def test_no_punct(EN):
|
|
|
|
assert len(EN("can't")) == 2
|
2014-09-16 16:01:46 +00:00
|
|
|
|
2014-12-23 00:40:32 +00:00
|
|
|
def test_prefix(EN):
|
|
|
|
assert len(EN("(can't")) == 3
|
2014-09-16 16:01:46 +00:00
|
|
|
|
|
|
|
|
2014-12-23 00:40:32 +00:00
|
|
|
def test_suffix(EN):
|
|
|
|
assert len(EN("can't)")) == 3
|
2014-09-16 16:01:46 +00:00
|
|
|
|
|
|
|
|
2014-12-23 00:40:32 +00:00
|
|
|
def test_wrap(EN):
|
|
|
|
assert len(EN("(can't)")) == 4
|
2014-09-16 16:01:46 +00:00
|
|
|
|
|
|
|
|
2014-12-23 00:40:32 +00:00
|
|
|
def test_uneven_wrap(EN):
|
|
|
|
assert len(EN("(can't?)")) == 5
|
2014-09-16 16:01:46 +00:00
|
|
|
|
|
|
|
|
2014-12-23 00:40:32 +00:00
|
|
|
def test_prefix_interact(EN):
|
|
|
|
assert len(EN("U.S.")) == 1
|
|
|
|
assert len(EN("us.")) == 2
|
|
|
|
assert len(EN("(U.S.")) == 2
|
2014-09-16 16:01:46 +00:00
|
|
|
|
|
|
|
|
2014-12-23 00:40:32 +00:00
|
|
|
def test_suffix_interact(EN):
|
|
|
|
assert len(EN("U.S.)")) == 2
|
2014-09-16 16:01:46 +00:00
|
|
|
|
|
|
|
|
2014-12-23 00:40:32 +00:00
|
|
|
def test_even_wrap_interact(EN):
|
|
|
|
assert len(EN("(U.S.)")) == 3
|
|
|
|
|
|
|
|
|
|
|
|
def test_uneven_wrap_interact(EN):
|
|
|
|
assert len(EN("(U.S.?)")) == 4
|