mirror of https://github.com/explosion/spaCy.git
45 lines
1.1 KiB
Python
45 lines
1.1 KiB
Python
from __future__ import unicode_literals
|
|
|
|
from spacy import lex_of
|
|
from spacy.en import lookup
|
|
from spacy.en import tokenize
|
|
from spacy.en import unhash
|
|
|
|
import pytest
|
|
|
|
|
|
@pytest.fixture
|
|
def close_puncts():
|
|
return [')', ']', '}', '*']
|
|
|
|
|
|
def test_close(close_puncts):
|
|
word_str = 'Hello'
|
|
for p in close_puncts:
|
|
string = word_str + p
|
|
tokens = tokenize(string)
|
|
assert len(tokens) == 2
|
|
assert unhash(lex_of(tokens[1])) == p
|
|
assert unhash(lex_of(tokens[0])) == word_str
|
|
|
|
|
|
def test_two_different_close(close_puncts):
|
|
word_str = 'Hello'
|
|
for p in close_puncts:
|
|
string = word_str + p + "'"
|
|
tokens = tokenize(string)
|
|
assert len(tokens) == 3
|
|
assert unhash(lex_of(tokens[0])) == word_str
|
|
assert unhash(lex_of(tokens[1])) == p
|
|
assert unhash(lex_of(tokens[2])) == "'"
|
|
|
|
|
|
def test_three_same_close(close_puncts):
|
|
word_str = 'Hello'
|
|
for p in close_puncts:
|
|
string = word_str + p + p + p
|
|
tokens = tokenize(string)
|
|
assert len(tokens) == 4
|
|
assert unhash(lex_of(tokens[0])) == word_str
|
|
assert unhash(lex_of(tokens[1])) == p
|