2017-02-04 11:47:29 +00:00
|
|
|
import pytest
|
|
|
|
|
2017-05-08 22:02:37 +00:00
|
|
|
|
2017-02-04 11:47:29 +00:00
|
|
|
ABBREVIATION_TESTS = [
|
2018-11-27 00:09:36 +00:00
|
|
|
(
|
|
|
|
"Hyvää uutta vuotta t. siht. Niemelä!",
|
|
|
|
["Hyvää", "uutta", "vuotta", "t.", "siht.", "Niemelä", "!"],
|
|
|
|
),
|
|
|
|
("Paino on n. 2.2 kg", ["Paino", "on", "n.", "2.2", "kg"]),
|
2017-02-04 11:47:29 +00:00
|
|
|
]
|
|
|
|
|
2019-12-03 11:55:28 +00:00
|
|
|
HYPHENATED_TESTS = [
|
|
|
|
(
|
|
|
|
"1700-luvulle sijoittuva taide-elokuva",
|
2019-12-21 18:04:17 +00:00
|
|
|
["1700-luvulle", "sijoittuva", "taide-elokuva"],
|
2019-12-03 11:55:28 +00:00
|
|
|
)
|
|
|
|
]
|
|
|
|
|
2020-02-11 01:32:43 +00:00
|
|
|
ABBREVIATION_INFLECTION_TESTS = [
|
|
|
|
(
|
|
|
|
"VTT:ssa ennen v:ta 2010 suoritetut mittaukset",
|
|
|
|
["VTT:ssa", "ennen", "v:ta", "2010", "suoritetut", "mittaukset"]
|
|
|
|
),
|
|
|
|
(
|
|
|
|
"ALV:n osuus on 24 %.",
|
|
|
|
["ALV:n", "osuus", "on", "24", "%", "."]
|
|
|
|
),
|
|
|
|
(
|
|
|
|
"Hiihtäjä oli kilpailun 14:s.",
|
|
|
|
["Hiihtäjä", "oli", "kilpailun", "14:s", "."]
|
|
|
|
)
|
|
|
|
]
|
|
|
|
|
2017-02-04 11:47:29 +00:00
|
|
|
|
2018-11-27 00:09:36 +00:00
|
|
|
@pytest.mark.parametrize("text,expected_tokens", ABBREVIATION_TESTS)
|
2019-12-03 11:55:28 +00:00
|
|
|
def test_fi_tokenizer_abbreviations(fi_tokenizer, text, expected_tokens):
|
|
|
|
tokens = fi_tokenizer(text)
|
|
|
|
token_list = [token.text for token in tokens if not token.is_space]
|
|
|
|
assert expected_tokens == token_list
|
|
|
|
|
|
|
|
|
|
|
|
@pytest.mark.parametrize("text,expected_tokens", HYPHENATED_TESTS)
|
|
|
|
def test_fi_tokenizer_hyphenated_words(fi_tokenizer, text, expected_tokens):
|
2017-02-04 11:47:29 +00:00
|
|
|
tokens = fi_tokenizer(text)
|
|
|
|
token_list = [token.text for token in tokens if not token.is_space]
|
|
|
|
assert expected_tokens == token_list
|
2020-02-11 01:32:43 +00:00
|
|
|
|
|
|
|
|
|
|
|
@pytest.mark.parametrize("text,expected_tokens", ABBREVIATION_INFLECTION_TESTS)
|
|
|
|
def test_fi_tokenizer_abbreviation_inflections(fi_tokenizer, text, expected_tokens):
|
|
|
|
tokens = fi_tokenizer(text)
|
|
|
|
token_list = [token.text for token in tokens if not token.is_space]
|
|
|
|
assert expected_tokens == token_list
|