2019-10-14 10:27:50 +00:00
|
|
|
|
import pytest
|
|
|
|
|
|
2019-12-21 18:04:17 +00:00
|
|
|
|
|
2019-10-14 10:27:50 +00:00
|
|
|
|
@pytest.mark.parametrize("text", ["z.B.", "Jan."])
|
|
|
|
|
def test_lb_tokenizer_handles_abbr(lb_tokenizer, text):
|
|
|
|
|
tokens = lb_tokenizer(text)
|
|
|
|
|
assert len(tokens) == 1
|
2019-12-01 22:08:21 +00:00
|
|
|
|
|
2019-12-21 18:04:17 +00:00
|
|
|
|
|
2019-12-01 22:08:21 +00:00
|
|
|
|
@pytest.mark.parametrize("text", ["d'Saach", "d'Kanner", "d’Welt", "d’Suen"])
|
|
|
|
|
def test_lb_tokenizer_splits_contractions(lb_tokenizer, text):
|
|
|
|
|
tokens = lb_tokenizer(text)
|
|
|
|
|
assert len(tokens) == 2
|
|
|
|
|
|
2019-12-21 18:04:17 +00:00
|
|
|
|
|
2019-12-01 22:08:21 +00:00
|
|
|
|
def test_lb_tokenizer_handles_exc_in_text(lb_tokenizer):
|
|
|
|
|
text = "Mee 't ass net evident, d'Liewen."
|
|
|
|
|
tokens = lb_tokenizer(text)
|
|
|
|
|
assert len(tokens) == 9
|
|
|
|
|
assert tokens[1].text == "'t"
|