2017-01-05 12:11:31 +00:00
|
|
|
# coding: utf-8
|
|
|
|
from __future__ import unicode_literals
|
|
|
|
|
|
|
|
import pytest
|
|
|
|
|
|
|
|
|
2017-05-29 20:14:31 +00:00
|
|
|
def test_en_tokenizer_handles_basic_contraction(en_tokenizer):
|
|
|
|
text = "don't giggle"
|
|
|
|
tokens = en_tokenizer(text)
|
|
|
|
assert len(tokens) == 3
|
|
|
|
assert tokens[1].text == "n't"
|
|
|
|
text = "i said don't!"
|
|
|
|
tokens = en_tokenizer(text)
|
|
|
|
assert len(tokens) == 5
|
|
|
|
assert tokens[4].text == "!"
|
|
|
|
|
|
|
|
|
2018-11-27 00:09:36 +00:00
|
|
|
@pytest.mark.parametrize("text", ["`ain't", """"isn't""", "can't!"])
|
2017-05-29 20:14:31 +00:00
|
|
|
def test_en_tokenizer_handles_basic_contraction_punct(en_tokenizer, text):
|
|
|
|
tokens = en_tokenizer(text)
|
|
|
|
assert len(tokens) == 3
|
|
|
|
|
|
|
|
|
2018-11-27 00:09:36 +00:00
|
|
|
@pytest.mark.parametrize(
|
|
|
|
"text_poss,text", [("Robin's", "Robin"), ("Alexis's", "Alexis")]
|
|
|
|
)
|
2017-05-29 20:14:31 +00:00
|
|
|
def test_en_tokenizer_handles_poss_contraction(en_tokenizer, text_poss, text):
|
|
|
|
tokens = en_tokenizer(text_poss)
|
|
|
|
assert len(tokens) == 2
|
|
|
|
assert tokens[0].text == text
|
|
|
|
assert tokens[1].text == "'s"
|
|
|
|
|
|
|
|
|
2018-11-27 00:09:36 +00:00
|
|
|
@pytest.mark.parametrize("text", ["schools'", "Alexis'"])
|
2017-05-29 20:14:31 +00:00
|
|
|
def test_en_tokenizer_splits_trailing_apos(en_tokenizer, text):
|
|
|
|
tokens = en_tokenizer(text)
|
|
|
|
assert len(tokens) == 2
|
|
|
|
assert tokens[0].text == text.split("'")[0]
|
|
|
|
assert tokens[1].text == "'"
|
|
|
|
|
|
|
|
|
2018-11-27 00:09:36 +00:00
|
|
|
@pytest.mark.parametrize("text", ["'em", "nothin'", "ol'"])
|
2018-07-24 21:38:44 +00:00
|
|
|
def test_en_tokenizer_doesnt_split_apos_exc(en_tokenizer, text):
|
2017-05-29 20:14:31 +00:00
|
|
|
tokens = en_tokenizer(text)
|
|
|
|
assert len(tokens) == 1
|
|
|
|
assert tokens[0].text == text
|
|
|
|
|
|
|
|
|
2018-11-27 00:09:36 +00:00
|
|
|
@pytest.mark.parametrize("text", ["we'll", "You'll", "there'll"])
|
2017-05-29 20:14:31 +00:00
|
|
|
def test_en_tokenizer_handles_ll_contraction(en_tokenizer, text):
|
|
|
|
tokens = en_tokenizer(text)
|
|
|
|
assert len(tokens) == 2
|
|
|
|
assert tokens[0].text == text.split("'")[0]
|
|
|
|
assert tokens[1].text == "'ll"
|
|
|
|
assert tokens[1].lemma_ == "will"
|
|
|
|
|
|
|
|
|
2018-11-27 00:09:36 +00:00
|
|
|
@pytest.mark.parametrize(
|
|
|
|
"text_lower,text_title", [("can't", "Can't"), ("ain't", "Ain't")]
|
|
|
|
)
|
2017-05-29 20:14:31 +00:00
|
|
|
def test_en_tokenizer_handles_capitalization(en_tokenizer, text_lower, text_title):
|
|
|
|
tokens_lower = en_tokenizer(text_lower)
|
|
|
|
tokens_title = en_tokenizer(text_title)
|
|
|
|
assert tokens_title[0].text == tokens_lower[0].text.title()
|
|
|
|
assert tokens_lower[0].text == tokens_title[0].text.lower()
|
|
|
|
assert tokens_lower[1].text == tokens_title[1].text
|
|
|
|
|
|
|
|
|
2018-11-27 00:09:36 +00:00
|
|
|
@pytest.mark.parametrize("pron", ["I", "You", "He", "She", "It", "We", "They"])
|
|
|
|
@pytest.mark.parametrize("contraction", ["'ll", "'d"])
|
2017-05-29 20:14:31 +00:00
|
|
|
def test_en_tokenizer_keeps_title_case(en_tokenizer, pron, contraction):
|
|
|
|
tokens = en_tokenizer(pron + contraction)
|
|
|
|
assert tokens[0].text == pron
|
|
|
|
assert tokens[1].text == contraction
|
|
|
|
|
|
|
|
|
2018-11-27 00:09:36 +00:00
|
|
|
@pytest.mark.parametrize("exc", ["Ill", "ill", "Hell", "hell", "Well", "well"])
|
2017-05-29 20:14:31 +00:00
|
|
|
def test_en_tokenizer_excludes_ambiguous(en_tokenizer, exc):
|
|
|
|
tokens = en_tokenizer(exc)
|
|
|
|
assert len(tokens) == 1
|
|
|
|
|
|
|
|
|
2018-11-27 00:09:36 +00:00
|
|
|
@pytest.mark.parametrize(
|
2019-02-01 07:05:22 +00:00
|
|
|
"wo_punct,w_punct", [("We've", "`We've"), ("couldn't", "couldn't)")]
|
2018-11-27 00:09:36 +00:00
|
|
|
)
|
2017-05-29 20:14:31 +00:00
|
|
|
def test_en_tokenizer_splits_defined_punct(en_tokenizer, wo_punct, w_punct):
|
|
|
|
tokens = en_tokenizer(wo_punct)
|
|
|
|
assert len(tokens) == 2
|
|
|
|
tokens = en_tokenizer(w_punct)
|
|
|
|
assert len(tokens) == 3
|
|
|
|
|
|
|
|
|
2018-11-27 00:09:36 +00:00
|
|
|
@pytest.mark.parametrize("text", ["e.g.", "p.m.", "Jan.", "Dec.", "Inc."])
|
2017-05-29 20:14:31 +00:00
|
|
|
def test_en_tokenizer_handles_abbr(en_tokenizer, text):
|
2017-01-05 12:11:31 +00:00
|
|
|
tokens = en_tokenizer(text)
|
|
|
|
assert len(tokens) == 1
|
|
|
|
|
|
|
|
|
2017-05-29 20:14:31 +00:00
|
|
|
def test_en_tokenizer_handles_exc_in_text(en_tokenizer):
|
2017-01-05 12:11:31 +00:00
|
|
|
text = "It's mediocre i.e. bad."
|
|
|
|
tokens = en_tokenizer(text)
|
|
|
|
assert len(tokens) == 6
|
|
|
|
assert tokens[3].text == "i.e."
|
2017-03-12 12:44:20 +00:00
|
|
|
|
|
|
|
|
2018-11-27 00:09:36 +00:00
|
|
|
@pytest.mark.parametrize("text", ["1am", "12a.m.", "11p.m.", "4pm"])
|
2017-05-29 20:14:31 +00:00
|
|
|
def test_en_tokenizer_handles_times(en_tokenizer, text):
|
2017-03-12 12:44:20 +00:00
|
|
|
tokens = en_tokenizer(text)
|
|
|
|
assert len(tokens) == 2
|
|
|
|
assert tokens[1].lemma_ in ["a.m.", "p.m."]
|
2017-06-03 18:59:50 +00:00
|
|
|
|
|
|
|
|
2018-11-27 00:09:36 +00:00
|
|
|
@pytest.mark.parametrize(
|
|
|
|
"text,norms", [("I'm", ["i", "am"]), ("shan't", ["shall", "not"])]
|
|
|
|
)
|
2017-06-03 18:59:50 +00:00
|
|
|
def test_en_tokenizer_norm_exceptions(en_tokenizer, text, norms):
|
|
|
|
tokens = en_tokenizer(text)
|
|
|
|
assert [token.norm_ for token in tokens] == norms
|
|
|
|
|
|
|
|
|
2018-11-27 00:09:36 +00:00
|
|
|
@pytest.mark.parametrize(
|
|
|
|
"text,norm", [("radicalised", "radicalized"), ("cuz", "because")]
|
|
|
|
)
|
2017-06-03 18:59:50 +00:00
|
|
|
def test_en_lex_attrs_norm_exceptions(en_tokenizer, text, norm):
|
|
|
|
tokens = en_tokenizer(text)
|
|
|
|
assert tokens[0].norm_ == norm
|
2019-03-28 12:23:33 +00:00
|
|
|
|
|
|
|
|
|
|
|
@pytest.mark.parametrize("text", ["faster", "fastest", "better", "best"])
|
|
|
|
def test_en_lemmatizer_handles_irreg_adverbs(en_tokenizer, text):
|
|
|
|
tokens = en_tokenizer(text)
|
|
|
|
assert tokens[0].lemma_ in ["fast", "well"]
|