From a6d9fb5bb65066887e5a7e5d44b078e722b2b002 Mon Sep 17 00:00:00 2001 From: Vimos Tan Date: Wed, 30 Aug 2017 14:49:14 +0800 Subject: [PATCH] fix issue #1292 --- .../tokenizer/test_customized_tokenizer.py | 46 +++++++++++++++++++ spacy/tokenizer.pyx | 3 +- 2 files changed, 48 insertions(+), 1 deletion(-) create mode 100644 spacy/tests/tokenizer/test_customized_tokenizer.py diff --git a/spacy/tests/tokenizer/test_customized_tokenizer.py b/spacy/tests/tokenizer/test_customized_tokenizer.py new file mode 100644 index 000000000..97a7db64c --- /dev/null +++ b/spacy/tests/tokenizer/test_customized_tokenizer.py @@ -0,0 +1,46 @@ +# coding: utf-8 +from __future__ import unicode_literals + +from ... import load +from ...tokenizer import Tokenizer +from ... import util + +import pytest + + +def test_customized_tokenizer_handles_infixes(): + def custom_tokenizer(nlp_model): + prefix_re = util.compile_prefix_regex(nlp_model.Defaults.prefixes) + suffix_re = util.compile_suffix_regex(nlp_model.Defaults.suffixes) + custom_infixes = ['\.\.\.+', + '(?<=[0-9])-(?=[0-9])', + # '(?<=[0-9]+),(?=[0-9]+)', + '[0-9]+(,[0-9]+)+', + u'[\[\]!&:,()\*—–\/-]'] + + infix_re = util.compile_infix_regex(custom_infixes) + + # infix_re = re.compile(ur'[\[\]!&:,()]') + + tokenizer = Tokenizer(nlp_model.vocab, + nlp_model.Defaults.tokenizer_exceptions, + prefix_re.search, + suffix_re.search, + infix_re.finditer, + token_match=None) + return lambda text: tokenizer(text) + + nlp = load('en', create_make_doc=custom_tokenizer) + + sentence = "The 8 and 10-county definitions are not used for the greater Southern California Megaregion." + context = [word.text for word in nlp(sentence)] + assert context == [u'The', u'8', u'and', u'10', u'-', u'county', u'definitions', u'are', u'not', u'used', + u'for', + u'the', u'greater', u'Southern', u'California', u'Megaregion', u'.'] + + # the trailing '-' may cause Assertion Error + sentence = "The 8- and 10-county definitions are not used for the greater Southern California Megaregion." + context = [word.text for word in nlp(sentence)] + assert context == [u'The', u'8', u'-', u'and', u'10', u'-', u'county', u'definitions', u'are', u'not', u'used', + u'for', + u'the', u'greater', u'Southern', u'California', u'Megaregion', u'.'] diff --git a/spacy/tokenizer.pyx b/spacy/tokenizer.pyx index 276f0ef20..799e4bdaa 100644 --- a/spacy/tokenizer.pyx +++ b/spacy/tokenizer.pyx @@ -312,7 +312,8 @@ cdef class Tokenizer: start = infix_end span = string[start:] - tokens.push_back(self.vocab.get(tokens.mem, span), False) + if span: + tokens.push_back(self.vocab.get(tokens.mem, span), False) cdef vector[const LexemeC*].reverse_iterator it = suffixes.rbegin() while it != suffixes.rend(): lexeme = deref(it)