2019-08-22 09:43:07 +00:00
|
|
|
|
# coding: utf-8
|
|
|
|
|
from __future__ import unicode_literals
|
|
|
|
|
|
|
|
|
|
import pytest
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
@pytest.mark.parametrize(
|
|
|
|
|
"text,norms,lemmas",
|
2019-08-31 11:39:06 +00:00
|
|
|
|
[
|
|
|
|
|
("о.г.", ["ове године"], ["ова година"]),
|
|
|
|
|
("чет.", ["четвртак"], ["четвртак"]),
|
|
|
|
|
("гђа", ["госпођа"], ["госпођа"]),
|
|
|
|
|
("ил'", ["или"], ["или"]),
|
|
|
|
|
],
|
|
|
|
|
)
|
2019-08-22 09:43:07 +00:00
|
|
|
|
def test_sr_tokenizer_abbrev_exceptions(sr_tokenizer, text, norms, lemmas):
|
|
|
|
|
tokens = sr_tokenizer(text)
|
|
|
|
|
assert len(tokens) == 1
|
|
|
|
|
assert [token.norm_ for token in tokens] == norms
|