2014-07-05 18:51:42 +00:00
|
|
|
from __future__ import unicode_literals
|
|
|
|
import pytest
|
|
|
|
|
|
|
|
|
|
|
|
@pytest.fixture
|
|
|
|
def close_puncts():
|
|
|
|
return [')', ']', '}', '*']
|
|
|
|
|
|
|
|
|
2015-06-07 15:24:49 +00:00
|
|
|
def test_close(close_puncts, en_tokenizer):
|
2014-07-05 18:51:42 +00:00
|
|
|
word_str = 'Hello'
|
|
|
|
for p in close_puncts:
|
|
|
|
string = word_str + p
|
2015-06-07 15:24:49 +00:00
|
|
|
tokens = en_tokenizer(string)
|
2014-07-05 18:51:42 +00:00
|
|
|
assert len(tokens) == 2
|
2014-08-27 17:38:57 +00:00
|
|
|
assert tokens[1].string == p
|
|
|
|
assert tokens[0].string == word_str
|
2014-07-05 18:51:42 +00:00
|
|
|
|
|
|
|
|
2015-06-07 15:24:49 +00:00
|
|
|
def test_two_different_close(close_puncts, en_tokenizer):
|
2014-07-05 18:51:42 +00:00
|
|
|
word_str = 'Hello'
|
|
|
|
for p in close_puncts:
|
|
|
|
string = word_str + p + "'"
|
2015-06-07 15:24:49 +00:00
|
|
|
tokens = en_tokenizer(string)
|
2014-07-05 18:51:42 +00:00
|
|
|
assert len(tokens) == 3
|
2014-08-27 17:38:57 +00:00
|
|
|
assert tokens[0].string == word_str
|
|
|
|
assert tokens[1].string == p
|
|
|
|
assert tokens[2].string == "'"
|
2014-07-05 18:51:42 +00:00
|
|
|
|
|
|
|
|
2015-06-07 15:24:49 +00:00
|
|
|
def test_three_same_close(close_puncts, en_tokenizer):
|
2014-07-05 18:51:42 +00:00
|
|
|
word_str = 'Hello'
|
|
|
|
for p in close_puncts:
|
|
|
|
string = word_str + p + p + p
|
2015-06-07 15:24:49 +00:00
|
|
|
tokens = en_tokenizer(string)
|
2014-07-06 18:02:00 +00:00
|
|
|
assert len(tokens) == 4
|
2014-08-27 17:38:57 +00:00
|
|
|
assert tokens[0].string == word_str
|
|
|
|
assert tokens[1].string == p
|
2014-11-02 10:24:09 +00:00
|
|
|
|
|
|
|
|
2015-06-07 15:24:49 +00:00
|
|
|
def test_double_end_quote(en_tokenizer):
|
|
|
|
assert len(en_tokenizer("Hello''")) == 2
|
|
|
|
assert len(en_tokenizer("''")) == 1
|