diff --git a/spacy/tests/sv/__init__.py b/spacy/tests/sv/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/spacy/tests/sv/test_tokenizer.py b/spacy/tests/sv/test_tokenizer.py new file mode 100644 index 000000000..c3305ca7b --- /dev/null +++ b/spacy/tests/sv/test_tokenizer.py @@ -0,0 +1,24 @@ +# encoding: utf8 +from __future__ import unicode_literals + +import pytest + + +SV_TOKEN_EXCEPTION_TESTS = [ + ('Smörsåsen används bl.a. till fisk', ['Smörsåsen', 'används', 'bl.a.', 'till', 'fisk']), + ('Jag kommer först kl. 13 p.g.a. diverse förseningar', ['Jag', 'kommer', 'först', 'kl.', '13', 'p.g.a.', 'diverse', 'förseningar']) +] + + +@pytest.mark.parametrize('text,expected_tokens', SV_TOKEN_EXCEPTION_TESTS) +def test_tokenizer_handles_exception_cases(sv_tokenizer, text, expected_tokens): + tokens = sv_tokenizer(text) + token_list = [token.text for token in tokens if not token.is_space] + assert expected_tokens == token_list + + +@pytest.mark.parametrize('text', ["driveru", "hajaru", "Serru", "Fixaru"]) +def test_tokenizer_handles_verb_exceptions(sv_tokenizer, text): + tokens = sv_tokenizer(text) + assert len(tokens) == 2 + assert tokens[1].text == "u"