2014-07-07 02:23:46 +00:00
|
|
|
from __future__ import unicode_literals
|
|
|
|
|
2014-08-27 18:22:33 +00:00
|
|
|
from spacy.en import EN
|
2014-07-07 02:23:46 +00:00
|
|
|
|
|
|
|
|
|
|
|
def test_possess():
|
2014-08-27 18:22:33 +00:00
|
|
|
tokens = EN.tokenize("Mike's")
|
2014-10-29 12:19:38 +00:00
|
|
|
assert EN.lexicon.strings[tokens[0].sic] == "Mike"
|
|
|
|
assert EN.lexicon.strings[tokens[1].sic] == "'s"
|
2014-08-18 17:14:00 +00:00
|
|
|
assert len(tokens) == 2
|
2014-07-07 02:23:46 +00:00
|
|
|
|
|
|
|
|
|
|
|
def test_apostrophe():
|
2014-08-27 18:22:33 +00:00
|
|
|
tokens = EN.tokenize("schools'")
|
2014-07-07 02:23:46 +00:00
|
|
|
assert len(tokens) == 2
|
2014-08-27 18:22:33 +00:00
|
|
|
assert tokens[1].string == "'"
|
|
|
|
assert tokens[0].string == "schools"
|
2014-07-07 02:23:46 +00:00
|
|
|
|
|
|
|
|
|
|
|
def test_LL():
|
2014-08-27 18:22:33 +00:00
|
|
|
tokens = EN.tokenize("we'll")
|
2014-07-07 02:23:46 +00:00
|
|
|
assert len(tokens) == 2
|
2014-08-27 18:22:33 +00:00
|
|
|
assert tokens[1].string == "will"
|
|
|
|
assert tokens[0].string == "we"
|
2014-07-07 02:23:46 +00:00
|
|
|
|
|
|
|
|
|
|
|
def test_aint():
|
2014-08-27 18:22:33 +00:00
|
|
|
tokens = EN.tokenize("ain't")
|
2014-07-07 02:23:46 +00:00
|
|
|
assert len(tokens) == 2
|
2014-08-27 18:22:33 +00:00
|
|
|
assert tokens[0].string == "are"
|
|
|
|
assert tokens[1].string == "not"
|
2014-07-07 03:07:21 +00:00
|
|
|
|
|
|
|
|
|
|
|
def test_capitalized():
|
2014-08-27 18:22:33 +00:00
|
|
|
tokens = EN.tokenize("can't")
|
2014-07-07 03:07:21 +00:00
|
|
|
assert len(tokens) == 2
|
2014-08-27 18:22:33 +00:00
|
|
|
tokens = EN.tokenize("Can't")
|
2014-07-07 03:07:21 +00:00
|
|
|
assert len(tokens) == 2
|
2014-08-27 18:22:33 +00:00
|
|
|
tokens = EN.tokenize("Ain't")
|
2014-07-07 03:07:21 +00:00
|
|
|
assert len(tokens) == 2
|
2014-08-27 18:22:33 +00:00
|
|
|
assert tokens[0].string == "Are"
|
2014-12-07 11:08:04 +00:00
|
|
|
|
|
|
|
|
|
|
|
def test_punct():
|
|
|
|
tokens = EN.tokenize("We've")
|
|
|
|
assert len(tokens) == 2
|
|
|
|
tokens = EN.tokenize("``We've")
|
|
|
|
assert len(tokens) == 3
|