2019-02-07 20:05:11 +00:00
|
|
|
|
import pytest
|
|
|
|
|
|
|
|
|
|
|
2019-02-08 13:14:49 +00:00
|
|
|
|
PUNCT_OPEN = ["(", "[", "{", "*"]
|
|
|
|
|
PUNCT_CLOSE = [")", "]", "}", "*"]
|
|
|
|
|
PUNCT_PAIRED = [("(", ")"), ("[", "]"), ("{", "}"), ("*", "*")]
|
2019-02-07 20:05:11 +00:00
|
|
|
|
|
|
|
|
|
|
2019-02-08 13:14:49 +00:00
|
|
|
|
@pytest.mark.parametrize("text", ["(", "((", "<"])
|
2019-02-07 20:05:11 +00:00
|
|
|
|
def test_uk_tokenizer_handles_only_punct(uk_tokenizer, text):
|
|
|
|
|
tokens = uk_tokenizer(text)
|
|
|
|
|
assert len(tokens) == len(text)
|
|
|
|
|
|
|
|
|
|
|
2019-02-08 13:14:49 +00:00
|
|
|
|
@pytest.mark.parametrize("punct", PUNCT_OPEN)
|
|
|
|
|
@pytest.mark.parametrize(
|
|
|
|
|
"text", ["Привет", "Привіт", "Ґелґотати", "З'єднання", "Єдність", "їхні"]
|
|
|
|
|
)
|
2019-02-07 20:05:11 +00:00
|
|
|
|
def test_uk_tokenizer_splits_open_punct(uk_tokenizer, punct, text):
|
|
|
|
|
tokens = uk_tokenizer(punct + text)
|
|
|
|
|
assert len(tokens) == 2
|
|
|
|
|
assert tokens[0].text == punct
|
|
|
|
|
assert tokens[1].text == text
|
|
|
|
|
|
|
|
|
|
|
2019-02-08 13:14:49 +00:00
|
|
|
|
@pytest.mark.parametrize("punct", PUNCT_CLOSE)
|
|
|
|
|
@pytest.mark.parametrize(
|
|
|
|
|
"text", ["Привет", "Привіт", "Ґелґотати", "З'єднання", "Єдність", "їхні"]
|
|
|
|
|
)
|
2019-02-07 20:05:11 +00:00
|
|
|
|
def test_uk_tokenizer_splits_close_punct(uk_tokenizer, punct, text):
|
|
|
|
|
tokens = uk_tokenizer(text + punct)
|
|
|
|
|
assert len(tokens) == 2
|
|
|
|
|
assert tokens[0].text == text
|
|
|
|
|
assert tokens[1].text == punct
|
|
|
|
|
|
|
|
|
|
|
2019-02-08 13:14:49 +00:00
|
|
|
|
@pytest.mark.parametrize("punct", PUNCT_OPEN)
|
|
|
|
|
@pytest.mark.parametrize("punct_add", ["`"])
|
|
|
|
|
@pytest.mark.parametrize(
|
|
|
|
|
"text", ["Привет", "Привіт", "Ґелґотати", "З'єднання", "Єдність", "їхні"]
|
|
|
|
|
)
|
2019-02-07 20:05:11 +00:00
|
|
|
|
def test_uk_tokenizer_splits_two_diff_open_punct(uk_tokenizer, punct, punct_add, text):
|
|
|
|
|
tokens = uk_tokenizer(punct + punct_add + text)
|
|
|
|
|
assert len(tokens) == 3
|
|
|
|
|
assert tokens[0].text == punct
|
|
|
|
|
assert tokens[1].text == punct_add
|
|
|
|
|
assert tokens[2].text == text
|
|
|
|
|
|
|
|
|
|
|
2019-02-08 13:14:49 +00:00
|
|
|
|
@pytest.mark.parametrize("punct", PUNCT_CLOSE)
|
|
|
|
|
@pytest.mark.parametrize("punct_add", ["'"])
|
|
|
|
|
@pytest.mark.parametrize(
|
|
|
|
|
"text", ["Привет", "Привіт", "Ґелґотати", "З'єднання", "Єдність", "їхні"]
|
|
|
|
|
)
|
2019-02-07 20:05:11 +00:00
|
|
|
|
def test_uk_tokenizer_splits_two_diff_close_punct(uk_tokenizer, punct, punct_add, text):
|
|
|
|
|
tokens = uk_tokenizer(text + punct + punct_add)
|
|
|
|
|
assert len(tokens) == 3
|
|
|
|
|
assert tokens[0].text == text
|
|
|
|
|
assert tokens[1].text == punct
|
|
|
|
|
assert tokens[2].text == punct_add
|
|
|
|
|
|
|
|
|
|
|
2019-02-08 13:14:49 +00:00
|
|
|
|
@pytest.mark.parametrize("punct", PUNCT_OPEN)
|
|
|
|
|
@pytest.mark.parametrize(
|
|
|
|
|
"text", ["Привет", "Привіт", "Ґелґотати", "З'єднання", "Єдність", "їхні"]
|
|
|
|
|
)
|
2019-02-07 20:05:11 +00:00
|
|
|
|
def test_uk_tokenizer_splits_same_open_punct(uk_tokenizer, punct, text):
|
|
|
|
|
tokens = uk_tokenizer(punct + punct + punct + text)
|
|
|
|
|
assert len(tokens) == 4
|
|
|
|
|
assert tokens[0].text == punct
|
|
|
|
|
assert tokens[3].text == text
|
|
|
|
|
|
|
|
|
|
|
2019-02-08 13:14:49 +00:00
|
|
|
|
@pytest.mark.parametrize("punct", PUNCT_CLOSE)
|
|
|
|
|
@pytest.mark.parametrize(
|
|
|
|
|
"text", ["Привет", "Привіт", "Ґелґотати", "З'єднання", "Єдність", "їхні"]
|
|
|
|
|
)
|
2019-02-07 20:05:11 +00:00
|
|
|
|
def test_uk_tokenizer_splits_same_close_punct(uk_tokenizer, punct, text):
|
|
|
|
|
tokens = uk_tokenizer(text + punct + punct + punct)
|
|
|
|
|
assert len(tokens) == 4
|
|
|
|
|
assert tokens[0].text == text
|
|
|
|
|
assert tokens[1].text == punct
|
|
|
|
|
|
|
|
|
|
|
2019-02-08 13:14:49 +00:00
|
|
|
|
@pytest.mark.parametrize("text", ["'Тест"])
|
2019-02-07 20:05:11 +00:00
|
|
|
|
def test_uk_tokenizer_splits_open_appostrophe(uk_tokenizer, text):
|
|
|
|
|
tokens = uk_tokenizer(text)
|
|
|
|
|
assert len(tokens) == 2
|
|
|
|
|
assert tokens[0].text == "'"
|
|
|
|
|
|
|
|
|
|
|
2019-02-08 13:14:49 +00:00
|
|
|
|
@pytest.mark.parametrize("text", ["Тест''"])
|
2019-02-07 20:05:11 +00:00
|
|
|
|
def test_uk_tokenizer_splits_double_end_quote(uk_tokenizer, text):
|
|
|
|
|
tokens = uk_tokenizer(text)
|
|
|
|
|
assert len(tokens) == 2
|
|
|
|
|
tokens_punct = uk_tokenizer("''")
|
|
|
|
|
assert len(tokens_punct) == 1
|
|
|
|
|
|
|
|
|
|
|
2019-02-08 13:14:49 +00:00
|
|
|
|
@pytest.mark.parametrize("punct_open,punct_close", PUNCT_PAIRED)
|
|
|
|
|
@pytest.mark.parametrize(
|
|
|
|
|
"text", ["Привет", "Привіт", "Ґелґотати", "З'єднання", "Єдність", "їхні"]
|
|
|
|
|
)
|
|
|
|
|
def test_uk_tokenizer_splits_open_close_punct(
|
|
|
|
|
uk_tokenizer, punct_open, punct_close, text
|
|
|
|
|
):
|
2019-02-07 20:05:11 +00:00
|
|
|
|
tokens = uk_tokenizer(punct_open + text + punct_close)
|
|
|
|
|
assert len(tokens) == 3
|
|
|
|
|
assert tokens[0].text == punct_open
|
|
|
|
|
assert tokens[1].text == text
|
|
|
|
|
assert tokens[2].text == punct_close
|
|
|
|
|
|
|
|
|
|
|
2019-02-08 13:14:49 +00:00
|
|
|
|
@pytest.mark.parametrize("punct_open,punct_close", PUNCT_PAIRED)
|
|
|
|
|
@pytest.mark.parametrize("punct_open2,punct_close2", [("`", "'")])
|
|
|
|
|
@pytest.mark.parametrize(
|
|
|
|
|
"text", ["Привет", "Привіт", "Ґелґотати", "З'єднання", "Єдність", "їхні"]
|
|
|
|
|
)
|
|
|
|
|
def test_uk_tokenizer_two_diff_punct(
|
|
|
|
|
uk_tokenizer, punct_open, punct_close, punct_open2, punct_close2, text
|
|
|
|
|
):
|
2019-02-07 20:05:11 +00:00
|
|
|
|
tokens = uk_tokenizer(punct_open2 + punct_open + text + punct_close + punct_close2)
|
|
|
|
|
assert len(tokens) == 5
|
|
|
|
|
assert tokens[0].text == punct_open2
|
|
|
|
|
assert tokens[1].text == punct_open
|
|
|
|
|
assert tokens[2].text == text
|
|
|
|
|
assert tokens[3].text == punct_close
|
|
|
|
|
assert tokens[4].text == punct_close2
|
|
|
|
|
|
|
|
|
|
|
2019-02-08 13:14:49 +00:00
|
|
|
|
@pytest.mark.parametrize(
|
|
|
|
|
"text", ["Привет.", "Привіт.", "Ґелґотати.", "З'єднання.", "Єдність.", "їхні."]
|
|
|
|
|
)
|
2019-02-07 20:05:11 +00:00
|
|
|
|
def test_uk_tokenizer_splits_trailing_dot(uk_tokenizer, text):
|
|
|
|
|
tokens = uk_tokenizer(text)
|
|
|
|
|
assert tokens[1].text == "."
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def test_uk_tokenizer_splits_bracket_period(uk_tokenizer):
|
|
|
|
|
text = "(Раз, два, три, проверка)."
|
|
|
|
|
tokens = uk_tokenizer(text)
|
|
|
|
|
assert tokens[len(tokens) - 1].text == "."
|
2022-06-28 13:35:32 +00:00
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def test_uk_tokenizer_handles_final_diacritics(uk_tokenizer):
|
|
|
|
|
text = "Хлібі́в не було́. Хлібі́в не було́."
|
|
|
|
|
tokens = uk_tokenizer(text)
|
|
|
|
|
assert tokens[2].text == "було́"
|
|
|
|
|
assert tokens[3].text == "."
|