spaCy/tests/test_contractions.py

58 lines
1.2 KiB
Python
Raw Normal View History

from __future__ import unicode_literals
2014-12-21 09:41:13 +00:00
import pytest
2014-12-21 09:41:13 +00:00
from spacy.en import English
2014-12-21 09:41:13 +00:00
@pytest.fixture
def EN():
2014-12-30 10:34:09 +00:00
return English()
2014-12-21 09:41:13 +00:00
def test_possess(EN):
tokens = EN("Mike's")
2015-01-05 02:15:46 +00:00
assert EN.vocab.strings[tokens[0].sic] == b"Mike"
assert EN.vocab.strings[tokens[1].sic] == b"'s"
assert len(tokens) == 2
2014-12-21 09:41:13 +00:00
def test_apostrophe(EN):
tokens = EN("schools'")
assert len(tokens) == 2
assert tokens[1].string == "'"
assert tokens[0].string == "schools"
2014-12-21 09:41:13 +00:00
def test_LL(EN):
tokens = EN("we'll")
assert len(tokens) == 2
assert tokens[1].string == "'ll"
assert tokens[1].lemma == "will"
assert tokens[0].string == "we"
2014-12-21 09:41:13 +00:00
def test_aint(EN):
tokens = EN("ain't")
assert len(tokens) == 2
assert tokens[0].string == "ai"
assert tokens[0].lemma == "be"
assert tokens[1].string == "n't"
assert tokens[1].lemma == "not"
2014-12-21 09:41:13 +00:00
def test_capitalized(EN):
tokens = EN("can't")
assert len(tokens) == 2
2014-12-21 09:41:13 +00:00
tokens = EN("Can't")
assert len(tokens) == 2
2014-12-21 09:41:13 +00:00
tokens = EN("Ain't")
assert len(tokens) == 2
assert tokens[0].string == "Ai"
assert tokens[0].lemma == "be"
2014-12-07 11:08:04 +00:00
2014-12-21 09:41:13 +00:00
def test_punct(EN):
tokens = EN("We've")
2014-12-07 11:08:04 +00:00
assert len(tokens) == 2
2014-12-21 09:41:13 +00:00
tokens = EN("``We've")
2014-12-07 11:08:04 +00:00
assert len(tokens) == 3