mirror of https://github.com/explosion/spaCy.git
57 lines
1.7 KiB
Python
57 lines
1.7 KiB
Python
# coding: utf8
|
|
from __future__ import unicode_literals
|
|
|
|
import pytest
|
|
|
|
|
|
ABBREVIATION_TESTS = [
|
|
(
|
|
"Hyvää uutta vuotta t. siht. Niemelä!",
|
|
["Hyvää", "uutta", "vuotta", "t.", "siht.", "Niemelä", "!"],
|
|
),
|
|
("Paino on n. 2.2 kg", ["Paino", "on", "n.", "2.2", "kg"]),
|
|
]
|
|
|
|
HYPHENATED_TESTS = [
|
|
(
|
|
"1700-luvulle sijoittuva taide-elokuva",
|
|
["1700-luvulle", "sijoittuva", "taide-elokuva"],
|
|
)
|
|
]
|
|
|
|
ABBREVIATION_INFLECTION_TESTS = [
|
|
(
|
|
"VTT:ssa ennen v:ta 2010 suoritetut mittaukset",
|
|
["VTT:ssa", "ennen", "v:ta", "2010", "suoritetut", "mittaukset"]
|
|
),
|
|
(
|
|
"ALV:n osuus on 24 %.",
|
|
["ALV:n", "osuus", "on", "24", "%", "."]
|
|
),
|
|
(
|
|
"Hiihtäjä oli kilpailun 14:s.",
|
|
["Hiihtäjä", "oli", "kilpailun", "14:s", "."]
|
|
)
|
|
]
|
|
|
|
|
|
@pytest.mark.parametrize("text,expected_tokens", ABBREVIATION_TESTS)
|
|
def test_fi_tokenizer_abbreviations(fi_tokenizer, text, expected_tokens):
|
|
tokens = fi_tokenizer(text)
|
|
token_list = [token.text for token in tokens if not token.is_space]
|
|
assert expected_tokens == token_list
|
|
|
|
|
|
@pytest.mark.parametrize("text,expected_tokens", HYPHENATED_TESTS)
|
|
def test_fi_tokenizer_hyphenated_words(fi_tokenizer, text, expected_tokens):
|
|
tokens = fi_tokenizer(text)
|
|
token_list = [token.text for token in tokens if not token.is_space]
|
|
assert expected_tokens == token_list
|
|
|
|
|
|
@pytest.mark.parametrize("text,expected_tokens", ABBREVIATION_INFLECTION_TESTS)
|
|
def test_fi_tokenizer_abbreviation_inflections(fi_tokenizer, text, expected_tokens):
|
|
tokens = fi_tokenizer(text)
|
|
token_list = [token.text for token in tokens if not token.is_space]
|
|
assert expected_tokens == token_list
|