From 654fe447b11676ee004df8f5f74bc1f94b000304 Mon Sep 17 00:00:00 2001 From: ines Date: Sun, 5 Feb 2017 11:47:07 +0100 Subject: [PATCH] Add Swedish tokenizer tests (see #807) --- spacy/tests/sv/__init__.py | 0 spacy/tests/sv/test_tokenizer.py | 24 ++++++++++++++++++++++++ 2 files changed, 24 insertions(+) create mode 100644 spacy/tests/sv/__init__.py create mode 100644 spacy/tests/sv/test_tokenizer.py diff --git a/spacy/tests/sv/__init__.py b/spacy/tests/sv/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/spacy/tests/sv/test_tokenizer.py b/spacy/tests/sv/test_tokenizer.py new file mode 100644 index 000000000..c3305ca7b --- /dev/null +++ b/spacy/tests/sv/test_tokenizer.py @@ -0,0 +1,24 @@ +# encoding: utf8 +from __future__ import unicode_literals + +import pytest + + +SV_TOKEN_EXCEPTION_TESTS = [ + ('Smörsåsen används bl.a. till fisk', ['Smörsåsen', 'används', 'bl.a.', 'till', 'fisk']), + ('Jag kommer först kl. 13 p.g.a. diverse förseningar', ['Jag', 'kommer', 'först', 'kl.', '13', 'p.g.a.', 'diverse', 'förseningar']) +] + + +@pytest.mark.parametrize('text,expected_tokens', SV_TOKEN_EXCEPTION_TESTS) +def test_tokenizer_handles_exception_cases(sv_tokenizer, text, expected_tokens): + tokens = sv_tokenizer(text) + token_list = [token.text for token in tokens if not token.is_space] + assert expected_tokens == token_list + + +@pytest.mark.parametrize('text', ["driveru", "hajaru", "Serru", "Fixaru"]) +def test_tokenizer_handles_verb_exceptions(sv_tokenizer, text): + tokens = sv_tokenizer(text) + assert len(tokens) == 2 + assert tokens[1].text == "u"