mirror of https://github.com/explosion/spaCy.git
24 lines
811 B
Python
24 lines
811 B
Python
![]() |
# coding: utf-8
|
||
|
from __future__ import unicode_literals
|
||
|
|
||
|
import pytest
|
||
|
from spacy.language import Language
|
||
|
from spacy.lang.punctuation import TOKENIZER_INFIXES
|
||
|
from spacy.lang.char_classes import ALPHA
|
||
|
|
||
|
|
||
|
@pytest.mark.parametrize('text,expected_tokens', [
|
||
|
("l'avion", ["l'", "avion"]), ("j'ai", ["j'", "ai"])])
|
||
|
def test_issue768(text, expected_tokens):
|
||
|
"""Allow zero-width 'infix' token during the tokenization process."""
|
||
|
SPLIT_INFIX = r'(?<=[{a}]\')(?=[{a}])'.format(a=ALPHA)
|
||
|
|
||
|
class FrenchTest(Language):
|
||
|
class Defaults(Language.Defaults):
|
||
|
infixes = TOKENIZER_INFIXES + [SPLIT_INFIX]
|
||
|
|
||
|
fr_tokenizer_w_infix = FrenchTest.Defaults.create_tokenizer()
|
||
|
tokens = fr_tokenizer_w_infix(text)
|
||
|
assert len(tokens) == 2
|
||
|
assert [t.text for t in tokens] == expected_tokens
|