2014-12-19 20:54:49 +00:00
|
|
|
# cython: embedsignature=True
|
2017-11-15 12:55:46 +00:00
|
|
|
# cython: profile=True
|
2017-04-15 10:05:47 +00:00
|
|
|
# coding: utf8
|
2014-12-19 20:54:49 +00:00
|
|
|
from __future__ import unicode_literals
|
|
|
|
|
|
|
|
from cython.operator cimport dereference as deref
|
|
|
|
from cython.operator cimport preincrement as preinc
|
|
|
|
from cymem.cymem cimport Pool
|
|
|
|
from preshed.maps cimport PreshMap
|
2015-07-22 02:49:39 +00:00
|
|
|
cimport cython
|
2014-12-19 20:54:49 +00:00
|
|
|
|
2019-03-08 10:42:26 +00:00
|
|
|
from collections import OrderedDict
|
|
|
|
import re
|
|
|
|
|
2015-07-13 18:20:58 +00:00
|
|
|
from .tokens.doc cimport Doc
|
2017-10-27 19:07:59 +00:00
|
|
|
from .strings cimport hash_string
|
Fix tokenizer on Python2.7 (#3460)
spaCy v2.1 switched to the built-in re module, where v2.0 had been using
the third-party regex library. When the tokenizer was deserialized on
Python2.7, the `re.compile()` function was called with expressions that
featured escaped unicode codepoints that were not in Python2.7's unicode
database.
Problems occurred when we had a range between two of these unknown
codepoints, like this:
```
'[\\uAA77-\\uAA79]'
```
On Python2.7, the unknown codepoints are not unescaped correctly,
resulting in arbitrary out-of-range characters being matched by the
expression.
This problem does not occur if we instead have a range between two
unicode literals, rather than the escape sequences. To fix the bug, we
therefore add a new compat function that unescapes unicode sequences
using the `ast.literal_eval()` function. Care is taken to ensure we
do not also escape non-unicode sequences.
Closes #3356.
- [x] I have submitted the spaCy Contributor Agreement.
- [x] I ran the tests, and all new and existing tests passed.
- [x] My changes don't require a change to the documentation, or if they do, I've added all required information.
2019-03-22 12:42:47 +00:00
|
|
|
from .compat import unescape_unicode
|
2019-11-20 12:07:25 +00:00
|
|
|
from .attrs import intify_attrs
|
|
|
|
from .symbols import ORTH
|
2019-03-08 10:42:26 +00:00
|
|
|
|
2018-04-03 13:50:31 +00:00
|
|
|
from .errors import Errors, Warnings, deprecation_warning
|
2017-10-27 19:07:59 +00:00
|
|
|
from . import util
|
2014-12-19 20:54:49 +00:00
|
|
|
|
|
|
|
|
|
|
|
cdef class Tokenizer:
|
2017-05-21 11:18:14 +00:00
|
|
|
"""Segment text, and create Doc objects with the discovered segment
|
|
|
|
boundaries.
|
2019-03-08 10:42:26 +00:00
|
|
|
|
|
|
|
DOCS: https://spacy.io/api/tokenizer
|
2017-04-15 09:59:21 +00:00
|
|
|
"""
|
2017-10-24 14:07:44 +00:00
|
|
|
def __init__(self, Vocab vocab, rules=None, prefix_search=None,
|
2017-10-27 19:07:59 +00:00
|
|
|
suffix_search=None, infix_finditer=None, token_match=None):
|
2017-05-21 11:18:14 +00:00
|
|
|
"""Create a `Tokenizer`, to create `Doc` objects given unicode text.
|
|
|
|
|
|
|
|
vocab (Vocab): A storage container for lexical types.
|
|
|
|
rules (dict): Exceptions and special-cases for the tokenizer.
|
|
|
|
prefix_search (callable): A function matching the signature of
|
|
|
|
`re.compile(string).search` to match prefixes.
|
|
|
|
suffix_search (callable): A function matching the signature of
|
|
|
|
`re.compile(string).search` to match suffixes.
|
|
|
|
`infix_finditer` (callable): A function matching the signature of
|
|
|
|
`re.compile(string).finditer` to find infixes.
|
|
|
|
token_match (callable): A boolean function matching strings to be
|
|
|
|
recognised as tokens.
|
|
|
|
RETURNS (Tokenizer): The newly constructed object.
|
|
|
|
|
|
|
|
EXAMPLE:
|
|
|
|
>>> tokenizer = Tokenizer(nlp.vocab)
|
|
|
|
>>> tokenizer = English().Defaults.create_tokenizer(nlp)
|
2019-03-08 10:42:26 +00:00
|
|
|
|
|
|
|
DOCS: https://spacy.io/api/tokenizer#init
|
2017-04-15 09:59:21 +00:00
|
|
|
"""
|
2014-12-19 20:54:49 +00:00
|
|
|
self.mem = Pool()
|
|
|
|
self._cache = PreshMap()
|
|
|
|
self._specials = PreshMap()
|
2017-01-03 17:17:57 +00:00
|
|
|
self.token_match = token_match
|
2016-09-24 13:42:01 +00:00
|
|
|
self.prefix_search = prefix_search
|
|
|
|
self.suffix_search = suffix_search
|
|
|
|
self.infix_finditer = infix_finditer
|
2014-12-21 20:25:43 +00:00
|
|
|
self.vocab = vocab
|
2016-04-13 08:38:26 +00:00
|
|
|
self._rules = {}
|
2019-11-20 12:07:25 +00:00
|
|
|
self._load_special_tokenization(rules)
|
2015-10-24 05:18:47 +00:00
|
|
|
|
2019-09-08 18:52:46 +00:00
|
|
|
property token_match:
|
|
|
|
def __get__(self):
|
|
|
|
return self._token_match
|
|
|
|
|
|
|
|
def __set__(self, token_match):
|
|
|
|
self._token_match = token_match
|
|
|
|
self._flush_cache()
|
|
|
|
|
|
|
|
property prefix_search:
|
|
|
|
def __get__(self):
|
|
|
|
return self._prefix_search
|
|
|
|
|
|
|
|
def __set__(self, prefix_search):
|
|
|
|
self._prefix_search = prefix_search
|
|
|
|
self._flush_cache()
|
|
|
|
|
|
|
|
property suffix_search:
|
|
|
|
def __get__(self):
|
|
|
|
return self._suffix_search
|
|
|
|
|
|
|
|
def __set__(self, suffix_search):
|
|
|
|
self._suffix_search = suffix_search
|
|
|
|
self._flush_cache()
|
|
|
|
|
|
|
|
property infix_finditer:
|
|
|
|
def __get__(self):
|
|
|
|
return self._infix_finditer
|
|
|
|
|
|
|
|
def __set__(self, infix_finditer):
|
|
|
|
self._infix_finditer = infix_finditer
|
|
|
|
self._flush_cache()
|
|
|
|
|
2019-11-20 12:07:25 +00:00
|
|
|
property rules:
|
|
|
|
def __get__(self):
|
|
|
|
return self._rules
|
|
|
|
|
|
|
|
def __set__(self, rules):
|
|
|
|
self._rules = {}
|
|
|
|
self._reset_cache([key for key in self._cache])
|
|
|
|
self._reset_specials()
|
|
|
|
self._cache = PreshMap()
|
|
|
|
self._specials = PreshMap()
|
|
|
|
self._load_special_tokenization(rules)
|
|
|
|
|
2015-10-24 05:18:47 +00:00
|
|
|
def __reduce__(self):
|
2016-09-25 12:49:53 +00:00
|
|
|
args = (self.vocab,
|
|
|
|
self._rules,
|
2017-05-17 10:04:50 +00:00
|
|
|
self.prefix_search,
|
|
|
|
self.suffix_search,
|
|
|
|
self.infix_finditer,
|
2017-01-03 17:17:57 +00:00
|
|
|
self.token_match)
|
2015-10-24 05:18:47 +00:00
|
|
|
return (self.__class__, args, None, None)
|
2017-04-15 10:05:47 +00:00
|
|
|
|
2015-07-08 16:53:00 +00:00
|
|
|
cpdef Doc tokens_from_list(self, list strings):
|
2018-04-03 13:50:31 +00:00
|
|
|
deprecation_warning(Warnings.W002)
|
2016-11-04 18:18:07 +00:00
|
|
|
return Doc(self.vocab, words=strings)
|
2014-12-19 20:54:49 +00:00
|
|
|
|
2015-07-22 02:49:39 +00:00
|
|
|
@cython.boundscheck(False)
|
2015-01-17 05:21:17 +00:00
|
|
|
def __call__(self, unicode string):
|
2017-05-21 11:18:14 +00:00
|
|
|
"""Tokenize a string.
|
2014-12-19 20:54:49 +00:00
|
|
|
|
2017-05-21 11:18:14 +00:00
|
|
|
string (unicode): The string to tokenize.
|
|
|
|
RETURNS (Doc): A container for linguistic annotations.
|
2019-03-08 10:42:26 +00:00
|
|
|
|
|
|
|
DOCS: https://spacy.io/api/tokenizer#call
|
2014-12-19 20:54:49 +00:00
|
|
|
"""
|
2015-10-15 17:54:16 +00:00
|
|
|
if len(string) >= (2 ** 30):
|
2018-04-03 13:50:31 +00:00
|
|
|
raise ValueError(Errors.E025.format(length=len(string)))
|
2014-12-19 20:54:49 +00:00
|
|
|
cdef int length = len(string)
|
2017-10-17 16:21:41 +00:00
|
|
|
cdef Doc doc = Doc(self.vocab)
|
2014-12-19 20:54:49 +00:00
|
|
|
if length == 0:
|
2017-10-17 16:21:41 +00:00
|
|
|
return doc
|
2014-12-19 20:54:49 +00:00
|
|
|
cdef int i = 0
|
|
|
|
cdef int start = 0
|
|
|
|
cdef bint cache_hit
|
2016-05-04 13:53:36 +00:00
|
|
|
cdef bint in_ws = string[0].isspace()
|
2015-07-22 02:49:39 +00:00
|
|
|
cdef unicode span
|
|
|
|
# The task here is much like string.split, but not quite
|
|
|
|
# We find spans of whitespace and non-space characters, and ignore
|
|
|
|
# spans that are exactly ' '. So, our sequences will all be separated
|
|
|
|
# by either ' ' or nothing.
|
2015-07-28 12:45:37 +00:00
|
|
|
for uc in string:
|
|
|
|
if uc.isspace() != in_ws:
|
2014-12-19 20:54:49 +00:00
|
|
|
if start < i:
|
2015-07-28 12:45:37 +00:00
|
|
|
# When we want to make this fast, get the data buffer once
|
|
|
|
# with PyUnicode_AS_DATA, and then maintain a start_byte
|
|
|
|
# and end_byte, so we can call hash64 directly. That way
|
|
|
|
# we don't have to create the slice when we hit the cache.
|
|
|
|
span = string[start:i]
|
|
|
|
key = hash_string(span)
|
2017-10-17 16:21:41 +00:00
|
|
|
cache_hit = self._try_cache(key, doc)
|
2014-12-19 20:54:49 +00:00
|
|
|
if not cache_hit:
|
2017-10-17 16:21:41 +00:00
|
|
|
self._tokenize(doc, span, key)
|
2015-07-22 11:38:45 +00:00
|
|
|
if uc == ' ':
|
2017-10-17 16:21:41 +00:00
|
|
|
doc.c[doc.length - 1].spacy = True
|
2015-07-22 11:38:45 +00:00
|
|
|
start = i + 1
|
|
|
|
else:
|
|
|
|
start = i
|
2016-05-04 13:53:36 +00:00
|
|
|
in_ws = not in_ws
|
2015-07-28 12:45:37 +00:00
|
|
|
i += 1
|
2014-12-19 20:54:49 +00:00
|
|
|
if start < i:
|
2015-07-28 12:45:37 +00:00
|
|
|
span = string[start:]
|
|
|
|
key = hash_string(span)
|
2017-10-17 16:21:41 +00:00
|
|
|
cache_hit = self._try_cache(key, doc)
|
2014-12-19 20:54:49 +00:00
|
|
|
if not cache_hit:
|
2017-10-17 16:21:41 +00:00
|
|
|
self._tokenize(doc, span, key)
|
2019-03-08 10:42:26 +00:00
|
|
|
doc.c[doc.length - 1].spacy = string[-1] == " " and not in_ws
|
2017-10-17 16:21:41 +00:00
|
|
|
return doc
|
2014-12-19 20:54:49 +00:00
|
|
|
|
2019-03-15 15:24:26 +00:00
|
|
|
def pipe(self, texts, batch_size=1000, n_threads=-1):
|
2017-05-21 11:18:14 +00:00
|
|
|
"""Tokenize a stream of texts.
|
|
|
|
|
|
|
|
texts: A sequence of unicode texts.
|
2017-10-27 19:07:59 +00:00
|
|
|
batch_size (int): Number of texts to accumulate in an internal buffer.
|
2019-03-28 11:48:02 +00:00
|
|
|
Defaults to 1000.
|
2017-05-21 11:18:14 +00:00
|
|
|
YIELDS (Doc): A sequence of Doc objects, in order.
|
2019-03-08 10:42:26 +00:00
|
|
|
|
|
|
|
DOCS: https://spacy.io/api/tokenizer#pipe
|
2016-11-02 22:15:39 +00:00
|
|
|
"""
|
2019-03-15 15:38:44 +00:00
|
|
|
if n_threads != -1:
|
|
|
|
deprecation_warning(Warnings.W016)
|
2016-02-03 01:32:37 +00:00
|
|
|
for text in texts:
|
|
|
|
yield self(text)
|
|
|
|
|
2019-09-08 18:52:46 +00:00
|
|
|
def _flush_cache(self):
|
|
|
|
self._reset_cache([key for key in self._cache if not key in self._specials])
|
|
|
|
|
2017-11-15 16:11:12 +00:00
|
|
|
def _reset_cache(self, keys):
|
|
|
|
for k in keys:
|
|
|
|
del self._cache[k]
|
2019-09-08 18:52:46 +00:00
|
|
|
if not k in self._specials:
|
|
|
|
cached = <_Cached*>self._cache.get(k)
|
|
|
|
if cached is not NULL:
|
|
|
|
self.mem.free(cached)
|
|
|
|
|
|
|
|
def _reset_specials(self):
|
|
|
|
for k in self._specials:
|
|
|
|
cached = <_Cached*>self._specials.get(k)
|
|
|
|
del self._specials[k]
|
|
|
|
if cached is not NULL:
|
|
|
|
self.mem.free(cached)
|
2017-11-14 18:15:04 +00:00
|
|
|
|
2015-07-13 19:46:02 +00:00
|
|
|
cdef int _try_cache(self, hash_t key, Doc tokens) except -1:
|
2014-12-19 20:54:49 +00:00
|
|
|
cached = <_Cached*>self._cache.get(key)
|
|
|
|
if cached == NULL:
|
|
|
|
return False
|
|
|
|
cdef int i
|
|
|
|
if cached.is_lex:
|
2015-07-13 22:10:51 +00:00
|
|
|
for i in range(cached.length):
|
2015-07-13 19:46:02 +00:00
|
|
|
tokens.push_back(cached.data.lexemes[i], False)
|
2014-12-19 20:54:49 +00:00
|
|
|
else:
|
2015-07-13 22:10:51 +00:00
|
|
|
for i in range(cached.length):
|
2015-07-13 19:46:02 +00:00
|
|
|
tokens.push_back(&cached.data.tokens[i], False)
|
2014-12-19 20:54:49 +00:00
|
|
|
return True
|
|
|
|
|
2015-07-22 02:49:39 +00:00
|
|
|
cdef int _tokenize(self, Doc tokens, unicode span, hash_t orig_key) except -1:
|
2015-01-11 23:26:22 +00:00
|
|
|
cdef vector[LexemeC*] prefixes
|
|
|
|
cdef vector[LexemeC*] suffixes
|
2014-12-19 20:54:49 +00:00
|
|
|
cdef int orig_size
|
💫 Small efficiency fixes to tokenizer (#2587)
This patch improves tokenizer speed by about 10%, and reduces memory usage in the `Vocab` by removing a redundant index. The `vocab._by_orth` and `vocab._by_hash` indexed on different data in v1, but in v2 the orth and the hash are identical.
The patch also fixes an uninitialized variable in the tokenizer, the `has_special` flag. This checks whether a chunk we're tokenizing triggers a special-case rule. If it does, then we avoid caching within the chunk. This check led to incorrectly rejecting some chunks from the cache.
With the `en_core_web_md` model, we now tokenize the IMDB train data at 503,104k words per second. Prior to this patch, we had 465,764k words per second.
Before switching to the regex library and supporting more languages, we had 1.3m words per second for the tokenizer. In order to recover the missing speed, we need to:
* Fix the variable-length lookarounds in the suffix, infix and `token_match` rules
* Improve the performance of the `token_match` regex
* Switch back from the `regex` library to the `re` library.
## Checklist
<!--- Before you submit the PR, go over this checklist and make sure you can
tick off all the boxes. [] -> [x] -->
- [x] I have submitted the spaCy Contributor Agreement.
- [x] I ran the tests, and all new and existing tests passed.
- [x] My changes don't require a change to the documentation, or if they do, I've added all required information.
2018-07-24 21:35:54 +00:00
|
|
|
cdef int has_special = 0
|
2014-12-19 20:54:49 +00:00
|
|
|
orig_size = tokens.length
|
2017-10-24 14:07:44 +00:00
|
|
|
span = self._split_affixes(tokens.mem, span, &prefixes, &suffixes,
|
|
|
|
&has_special)
|
2015-07-22 02:49:39 +00:00
|
|
|
self._attach_tokens(tokens, span, &prefixes, &suffixes)
|
2017-10-24 14:07:44 +00:00
|
|
|
self._save_cached(&tokens.c[orig_size], orig_key, has_special,
|
|
|
|
tokens.length - orig_size)
|
2014-12-19 20:54:49 +00:00
|
|
|
|
2016-04-13 08:38:26 +00:00
|
|
|
cdef unicode _split_affixes(self, Pool mem, unicode string,
|
|
|
|
vector[const LexemeC*] *prefixes,
|
2017-10-24 14:07:44 +00:00
|
|
|
vector[const LexemeC*] *suffixes,
|
|
|
|
int* has_special):
|
2014-12-19 20:54:49 +00:00
|
|
|
cdef size_t i
|
2015-07-22 02:49:39 +00:00
|
|
|
cdef unicode prefix
|
|
|
|
cdef unicode suffix
|
|
|
|
cdef unicode minus_pre
|
|
|
|
cdef unicode minus_suf
|
2014-12-19 20:54:49 +00:00
|
|
|
cdef size_t last_size = 0
|
2015-07-22 02:49:39 +00:00
|
|
|
while string and len(string) != last_size:
|
2019-09-08 18:52:46 +00:00
|
|
|
if self._specials.get(hash_string(string)) != NULL:
|
|
|
|
has_special[0] = 1
|
|
|
|
break
|
2015-07-22 02:49:39 +00:00
|
|
|
last_size = len(string)
|
|
|
|
pre_len = self.find_prefix(string)
|
2014-12-19 20:54:49 +00:00
|
|
|
if pre_len != 0:
|
2015-07-22 02:49:39 +00:00
|
|
|
prefix = string[:pre_len]
|
|
|
|
minus_pre = string[pre_len:]
|
2014-12-19 20:54:49 +00:00
|
|
|
# Check whether we've hit a special-case
|
2015-07-22 02:49:39 +00:00
|
|
|
if minus_pre and self._specials.get(hash_string(minus_pre)) != NULL:
|
|
|
|
string = minus_pre
|
2016-02-21 23:17:47 +00:00
|
|
|
prefixes.push_back(self.vocab.get(mem, prefix))
|
2017-10-24 14:07:44 +00:00
|
|
|
has_special[0] = 1
|
2014-12-19 20:54:49 +00:00
|
|
|
break
|
2015-07-22 02:49:39 +00:00
|
|
|
suf_len = self.find_suffix(string)
|
2014-12-19 20:54:49 +00:00
|
|
|
if suf_len != 0:
|
2015-07-22 02:49:39 +00:00
|
|
|
suffix = string[-suf_len:]
|
|
|
|
minus_suf = string[:-suf_len]
|
2014-12-19 20:54:49 +00:00
|
|
|
# Check whether we've hit a special-case
|
2015-07-22 02:49:39 +00:00
|
|
|
if minus_suf and (self._specials.get(hash_string(minus_suf)) != NULL):
|
|
|
|
string = minus_suf
|
2016-02-21 23:17:47 +00:00
|
|
|
suffixes.push_back(self.vocab.get(mem, suffix))
|
2017-10-24 14:07:44 +00:00
|
|
|
has_special[0] = 1
|
2014-12-19 20:54:49 +00:00
|
|
|
break
|
2015-07-22 02:49:39 +00:00
|
|
|
if pre_len and suf_len and (pre_len + suf_len) <= len(string):
|
|
|
|
string = string[pre_len:-suf_len]
|
2016-02-21 23:17:47 +00:00
|
|
|
prefixes.push_back(self.vocab.get(mem, prefix))
|
|
|
|
suffixes.push_back(self.vocab.get(mem, suffix))
|
2014-12-19 20:54:49 +00:00
|
|
|
elif pre_len:
|
2015-07-22 02:49:39 +00:00
|
|
|
string = minus_pre
|
2016-02-21 23:17:47 +00:00
|
|
|
prefixes.push_back(self.vocab.get(mem, prefix))
|
2014-12-19 20:54:49 +00:00
|
|
|
elif suf_len:
|
2015-07-22 02:49:39 +00:00
|
|
|
string = minus_suf
|
2016-02-21 23:17:47 +00:00
|
|
|
suffixes.push_back(self.vocab.get(mem, suffix))
|
2015-07-22 02:49:39 +00:00
|
|
|
if string and (self._specials.get(hash_string(string)) != NULL):
|
2017-10-24 14:07:44 +00:00
|
|
|
has_special[0] = 1
|
2014-12-19 20:54:49 +00:00
|
|
|
break
|
|
|
|
return string
|
|
|
|
|
2015-07-22 02:49:39 +00:00
|
|
|
cdef int _attach_tokens(self, Doc tokens, unicode string,
|
2015-01-11 23:26:22 +00:00
|
|
|
vector[const LexemeC*] *prefixes,
|
|
|
|
vector[const LexemeC*] *suffixes) except -1:
|
2014-12-19 20:54:49 +00:00
|
|
|
cdef bint cache_hit
|
2015-07-18 20:45:00 +00:00
|
|
|
cdef int split, end
|
2015-01-11 23:26:22 +00:00
|
|
|
cdef const LexemeC* const* lexemes
|
2015-07-13 19:46:02 +00:00
|
|
|
cdef const LexemeC* lexeme
|
2015-07-22 02:49:39 +00:00
|
|
|
cdef unicode span
|
2014-12-19 20:54:49 +00:00
|
|
|
cdef int i
|
|
|
|
if prefixes.size():
|
|
|
|
for i in range(prefixes.size()):
|
2015-07-13 19:46:02 +00:00
|
|
|
tokens.push_back(prefixes[0][i], False)
|
2015-07-22 02:49:39 +00:00
|
|
|
if string:
|
|
|
|
cache_hit = self._try_cache(hash_string(string), tokens)
|
2017-01-03 17:17:57 +00:00
|
|
|
if cache_hit:
|
|
|
|
pass
|
2017-04-15 10:05:47 +00:00
|
|
|
elif self.token_match and self.token_match(string):
|
2017-03-08 13:33:32 +00:00
|
|
|
# We're always saying 'no' to spaces here -- the caller will
|
|
|
|
# fix up the outermost one, with reference to the original.
|
|
|
|
# See Issue #859
|
|
|
|
tokens.push_back(self.vocab.get(tokens.mem, string), False)
|
2017-01-03 17:17:57 +00:00
|
|
|
else:
|
2016-04-13 08:38:26 +00:00
|
|
|
matches = self.find_infix(string)
|
|
|
|
if not matches:
|
2015-07-13 19:46:02 +00:00
|
|
|
tokens.push_back(self.vocab.get(tokens.mem, string), False)
|
2014-12-19 20:54:49 +00:00
|
|
|
else:
|
2019-03-08 10:42:26 +00:00
|
|
|
# Let's say we have dyn-o-mite-dave - the regex finds the
|
2017-10-27 19:07:59 +00:00
|
|
|
# start and end positions of the hyphens
|
2016-04-13 08:38:26 +00:00
|
|
|
start = 0
|
2017-11-28 16:17:12 +00:00
|
|
|
start_before_infixes = start
|
2016-04-13 08:38:26 +00:00
|
|
|
for match in matches:
|
|
|
|
infix_start = match.start()
|
|
|
|
infix_end = match.end()
|
2016-11-02 19:35:48 +00:00
|
|
|
|
2017-11-28 16:17:12 +00:00
|
|
|
if infix_start == start_before_infixes:
|
2016-05-09 11:23:47 +00:00
|
|
|
continue
|
2016-11-02 19:35:48 +00:00
|
|
|
|
2017-11-19 14:14:40 +00:00
|
|
|
if infix_start != start:
|
|
|
|
span = string[start:infix_start]
|
|
|
|
tokens.push_back(self.vocab.get(tokens.mem, span), False)
|
2017-01-23 17:28:01 +00:00
|
|
|
|
|
|
|
if infix_start != infix_end:
|
|
|
|
# If infix_start != infix_end, it means the infix
|
|
|
|
# token is non-empty. Empty infix tokens are useful
|
|
|
|
# for tokenization in some languages (see
|
|
|
|
# https://github.com/explosion/spaCy/issues/768)
|
|
|
|
infix_span = string[infix_start:infix_end]
|
|
|
|
tokens.push_back(self.vocab.get(tokens.mem, infix_span), False)
|
2016-04-13 08:38:26 +00:00
|
|
|
start = infix_end
|
|
|
|
span = string[start:]
|
2017-10-14 11:28:46 +00:00
|
|
|
if span:
|
|
|
|
tokens.push_back(self.vocab.get(tokens.mem, span), False)
|
2015-01-11 23:26:22 +00:00
|
|
|
cdef vector[const LexemeC*].reverse_iterator it = suffixes.rbegin()
|
2014-12-19 20:54:49 +00:00
|
|
|
while it != suffixes.rend():
|
2015-07-13 19:46:02 +00:00
|
|
|
lexeme = deref(it)
|
2014-12-19 20:54:49 +00:00
|
|
|
preinc(it)
|
2015-07-13 19:46:02 +00:00
|
|
|
tokens.push_back(lexeme, False)
|
2014-12-19 20:54:49 +00:00
|
|
|
|
2017-10-24 14:07:44 +00:00
|
|
|
cdef int _save_cached(self, const TokenC* tokens, hash_t key,
|
|
|
|
int has_special, int n) except -1:
|
2014-12-19 20:54:49 +00:00
|
|
|
cdef int i
|
2019-10-22 14:54:33 +00:00
|
|
|
if n <= 0:
|
|
|
|
# avoid mem alloc of zero length
|
|
|
|
return 0
|
2014-12-19 20:54:49 +00:00
|
|
|
for i in range(n):
|
💫 Small efficiency fixes to tokenizer (#2587)
This patch improves tokenizer speed by about 10%, and reduces memory usage in the `Vocab` by removing a redundant index. The `vocab._by_orth` and `vocab._by_hash` indexed on different data in v1, but in v2 the orth and the hash are identical.
The patch also fixes an uninitialized variable in the tokenizer, the `has_special` flag. This checks whether a chunk we're tokenizing triggers a special-case rule. If it does, then we avoid caching within the chunk. This check led to incorrectly rejecting some chunks from the cache.
With the `en_core_web_md` model, we now tokenize the IMDB train data at 503,104k words per second. Prior to this patch, we had 465,764k words per second.
Before switching to the regex library and supporting more languages, we had 1.3m words per second for the tokenizer. In order to recover the missing speed, we need to:
* Fix the variable-length lookarounds in the suffix, infix and `token_match` rules
* Improve the performance of the `token_match` regex
* Switch back from the `regex` library to the `re` library.
## Checklist
<!--- Before you submit the PR, go over this checklist and make sure you can
tick off all the boxes. [] -> [x] -->
- [x] I have submitted the spaCy Contributor Agreement.
- [x] I ran the tests, and all new and existing tests passed.
- [x] My changes don't require a change to the documentation, or if they do, I've added all required information.
2018-07-24 21:35:54 +00:00
|
|
|
if self.vocab._by_orth.get(tokens[i].lex.orth) == NULL:
|
2014-12-19 20:54:49 +00:00
|
|
|
return 0
|
2019-03-08 10:42:26 +00:00
|
|
|
# See #1250
|
2017-10-24 14:07:44 +00:00
|
|
|
if has_special:
|
|
|
|
return 0
|
2014-12-19 20:54:49 +00:00
|
|
|
cached = <_Cached*>self.mem.alloc(1, sizeof(_Cached))
|
|
|
|
cached.length = n
|
|
|
|
cached.is_lex = True
|
2015-01-11 23:26:22 +00:00
|
|
|
lexemes = <const LexemeC**>self.mem.alloc(n, sizeof(LexemeC**))
|
2014-12-19 20:54:49 +00:00
|
|
|
for i in range(n):
|
|
|
|
lexemes[i] = tokens[i].lex
|
2015-01-11 23:26:22 +00:00
|
|
|
cached.data.lexemes = <const LexemeC* const*>lexemes
|
2014-12-19 20:54:49 +00:00
|
|
|
self._cache.set(key, cached)
|
|
|
|
|
2015-07-22 02:49:39 +00:00
|
|
|
def find_infix(self, unicode string):
|
2017-05-21 11:18:14 +00:00
|
|
|
"""Find internal split points of the string, such as hyphens.
|
2016-11-02 22:15:39 +00:00
|
|
|
|
|
|
|
string (unicode): The string to segment.
|
2017-05-21 11:18:14 +00:00
|
|
|
RETURNS (list): A list of `re.MatchObject` objects that have `.start()`
|
|
|
|
and `.end()` methods, denoting the placement of internal segment
|
|
|
|
separators, e.g. hyphens.
|
2019-03-08 10:42:26 +00:00
|
|
|
|
|
|
|
DOCS: https://spacy.io/api/tokenizer#find_infix
|
2016-11-02 22:15:39 +00:00
|
|
|
"""
|
2016-11-02 19:35:48 +00:00
|
|
|
if self.infix_finditer is None:
|
|
|
|
return 0
|
2016-09-24 13:42:01 +00:00
|
|
|
return list(self.infix_finditer(string))
|
2015-04-19 08:31:31 +00:00
|
|
|
|
2015-07-22 02:49:39 +00:00
|
|
|
def find_prefix(self, unicode string):
|
2017-10-27 19:07:59 +00:00
|
|
|
"""Find the length of a prefix that should be segmented from the
|
|
|
|
string, or None if no prefix rules match.
|
2016-11-02 22:15:39 +00:00
|
|
|
|
2017-05-21 11:18:14 +00:00
|
|
|
string (unicode): The string to segment.
|
|
|
|
RETURNS (int): The length of the prefix if present, otherwise `None`.
|
2019-03-08 10:42:26 +00:00
|
|
|
|
|
|
|
DOCS: https://spacy.io/api/tokenizer#find_prefix
|
2016-11-02 22:15:39 +00:00
|
|
|
"""
|
2016-11-02 19:35:48 +00:00
|
|
|
if self.prefix_search is None:
|
|
|
|
return 0
|
2016-09-24 13:42:01 +00:00
|
|
|
match = self.prefix_search(string)
|
2014-12-19 20:54:49 +00:00
|
|
|
return (match.end() - match.start()) if match is not None else 0
|
|
|
|
|
2015-07-22 02:49:39 +00:00
|
|
|
def find_suffix(self, unicode string):
|
2017-10-27 19:07:59 +00:00
|
|
|
"""Find the length of a suffix that should be segmented from the
|
|
|
|
string, or None if no suffix rules match.
|
2016-11-02 22:15:39 +00:00
|
|
|
|
2017-05-21 11:18:14 +00:00
|
|
|
string (unicode): The string to segment.
|
|
|
|
Returns (int): The length of the suffix if present, otherwise `None`.
|
2019-03-08 10:42:26 +00:00
|
|
|
|
|
|
|
DOCS: https://spacy.io/api/tokenizer#find_suffix
|
2016-11-02 22:15:39 +00:00
|
|
|
"""
|
2016-11-02 19:35:48 +00:00
|
|
|
if self.suffix_search is None:
|
|
|
|
return 0
|
2016-09-24 13:42:01 +00:00
|
|
|
match = self.suffix_search(string)
|
2014-12-19 20:54:49 +00:00
|
|
|
return (match.end() - match.start()) if match is not None else 0
|
|
|
|
|
2015-08-26 17:20:11 +00:00
|
|
|
def _load_special_tokenization(self, special_cases):
|
2017-05-21 11:18:14 +00:00
|
|
|
"""Add special-case tokenization rules."""
|
2019-11-20 12:07:25 +00:00
|
|
|
if special_cases is not None:
|
|
|
|
for chunk, substrings in sorted(special_cases.items()):
|
|
|
|
self.add_special_case(chunk, substrings)
|
2017-04-15 10:05:47 +00:00
|
|
|
|
2016-11-02 22:15:39 +00:00
|
|
|
def add_special_case(self, unicode string, substrings):
|
2017-05-21 11:18:14 +00:00
|
|
|
"""Add a special-case tokenization rule.
|
|
|
|
|
|
|
|
string (unicode): The string to specially tokenize.
|
2019-07-15 09:19:34 +00:00
|
|
|
substrings (iterable): A sequence of dicts, where each dict describes
|
2017-10-27 19:07:59 +00:00
|
|
|
a token and its attributes. The `ORTH` fields of the attributes
|
|
|
|
must exactly match the string when they are concatenated.
|
2019-03-08 10:42:26 +00:00
|
|
|
|
|
|
|
DOCS: https://spacy.io/api/tokenizer#add_special_case
|
2017-04-15 09:59:21 +00:00
|
|
|
"""
|
2016-04-13 08:38:26 +00:00
|
|
|
substrings = list(substrings)
|
|
|
|
cached = <_Cached*>self.mem.alloc(1, sizeof(_Cached))
|
|
|
|
cached.length = len(substrings)
|
|
|
|
cached.is_lex = False
|
|
|
|
cached.data.tokens = self.vocab.make_fused_token(substrings)
|
2016-11-02 22:15:39 +00:00
|
|
|
key = hash_string(string)
|
2019-09-08 18:52:46 +00:00
|
|
|
stale_special = <_Cached*>self._specials.get(key)
|
|
|
|
stale_cached = <_Cached*>self._cache.get(key)
|
|
|
|
self._flush_cache()
|
2016-04-13 08:38:26 +00:00
|
|
|
self._specials.set(key, cached)
|
|
|
|
self._cache.set(key, cached)
|
2019-09-08 18:52:46 +00:00
|
|
|
if stale_special is not NULL:
|
|
|
|
self.mem.free(stale_special)
|
|
|
|
if stale_special != stale_cached and stale_cached is not NULL:
|
|
|
|
self.mem.free(stale_cached)
|
2016-11-02 22:15:39 +00:00
|
|
|
self._rules[string] = substrings
|
2017-05-21 11:18:14 +00:00
|
|
|
|
2019-11-20 12:07:25 +00:00
|
|
|
def explain(self, text):
|
|
|
|
"""A debugging tokenizer that provides information about which
|
|
|
|
tokenizer rule or pattern was matched for each token. The tokens
|
|
|
|
produced are identical to `nlp.tokenizer()` except for whitespace
|
|
|
|
tokens.
|
|
|
|
|
|
|
|
string (unicode): The string to tokenize.
|
|
|
|
RETURNS (list): A list of (pattern_string, token_string) tuples
|
|
|
|
|
|
|
|
DOCS: https://spacy.io/api/tokenizer#explain
|
|
|
|
"""
|
|
|
|
prefix_search = self.prefix_search
|
|
|
|
suffix_search = self.suffix_search
|
|
|
|
infix_finditer = self.infix_finditer
|
|
|
|
token_match = self.token_match
|
|
|
|
special_cases = {}
|
|
|
|
for orth, special_tokens in self.rules.items():
|
|
|
|
special_cases[orth] = [intify_attrs(special_token, strings_map=self.vocab.strings, _do_deprecated=True) for special_token in special_tokens]
|
|
|
|
tokens = []
|
|
|
|
for substring in text.split():
|
|
|
|
suffixes = []
|
|
|
|
while substring:
|
|
|
|
while prefix_search(substring) or suffix_search(substring):
|
|
|
|
if substring in special_cases:
|
|
|
|
tokens.extend(("SPECIAL-" + str(i + 1), self.vocab.strings[e[ORTH]]) for i, e in enumerate(special_cases[substring]))
|
|
|
|
substring = ''
|
|
|
|
break
|
|
|
|
if prefix_search(substring):
|
|
|
|
split = prefix_search(substring).end()
|
2019-11-20 15:31:29 +00:00
|
|
|
# break if pattern matches the empty string
|
|
|
|
if split == 0:
|
|
|
|
break
|
2019-11-20 12:07:25 +00:00
|
|
|
tokens.append(("PREFIX", substring[:split]))
|
|
|
|
substring = substring[split:]
|
|
|
|
if substring in special_cases:
|
|
|
|
continue
|
|
|
|
if suffix_search(substring):
|
|
|
|
split = suffix_search(substring).start()
|
|
|
|
# break if pattern matches the empty string
|
|
|
|
if split == len(substring):
|
|
|
|
break
|
2019-11-20 15:31:29 +00:00
|
|
|
suffixes.append(("SUFFIX", substring[split:]))
|
|
|
|
substring = substring[:split]
|
2019-11-20 12:07:25 +00:00
|
|
|
if substring in special_cases:
|
|
|
|
tokens.extend(("SPECIAL-" + str(i + 1), self.vocab.strings[e[ORTH]]) for i, e in enumerate(special_cases[substring]))
|
|
|
|
substring = ''
|
|
|
|
elif token_match(substring):
|
|
|
|
tokens.append(("TOKEN_MATCH", substring))
|
|
|
|
substring = ''
|
|
|
|
elif list(infix_finditer(substring)):
|
|
|
|
infixes = infix_finditer(substring)
|
|
|
|
offset = 0
|
|
|
|
for match in infixes:
|
2019-11-20 15:31:29 +00:00
|
|
|
if substring[offset : match.start()]:
|
|
|
|
tokens.append(("TOKEN", substring[offset : match.start()]))
|
|
|
|
if substring[match.start() : match.end()]:
|
|
|
|
tokens.append(("INFIX", substring[match.start() : match.end()]))
|
2019-11-20 12:07:25 +00:00
|
|
|
offset = match.end()
|
|
|
|
if substring[offset:]:
|
|
|
|
tokens.append(("TOKEN", substring[offset:]))
|
|
|
|
substring = ''
|
|
|
|
elif substring:
|
|
|
|
tokens.append(("TOKEN", substring))
|
|
|
|
substring = ''
|
|
|
|
tokens.extend(reversed(suffixes))
|
|
|
|
return tokens
|
|
|
|
|
2019-03-10 18:16:45 +00:00
|
|
|
def to_disk(self, path, **kwargs):
|
2017-05-21 11:18:14 +00:00
|
|
|
"""Save the current state to a directory.
|
|
|
|
|
|
|
|
path (unicode or Path): A path to a directory, which will be created if
|
2019-03-10 18:16:45 +00:00
|
|
|
it doesn't exist.
|
|
|
|
exclude (list): String names of serialization fields to exclude.
|
2019-03-08 10:42:26 +00:00
|
|
|
|
|
|
|
DOCS: https://spacy.io/api/tokenizer#to_disk
|
2017-05-21 11:18:14 +00:00
|
|
|
"""
|
2019-03-08 10:42:26 +00:00
|
|
|
with path.open("wb") as file_:
|
2019-03-10 18:16:45 +00:00
|
|
|
file_.write(self.to_bytes(**kwargs))
|
2017-05-21 11:18:14 +00:00
|
|
|
|
2019-03-10 18:16:45 +00:00
|
|
|
def from_disk(self, path, **kwargs):
|
2017-05-21 11:18:14 +00:00
|
|
|
"""Loads state from a directory. Modifies the object in place and
|
|
|
|
returns it.
|
|
|
|
|
2019-03-10 18:16:45 +00:00
|
|
|
path (unicode or Path): A path to a directory.
|
|
|
|
exclude (list): String names of serialization fields to exclude.
|
2017-05-21 11:18:14 +00:00
|
|
|
RETURNS (Tokenizer): The modified `Tokenizer` object.
|
2019-03-08 10:42:26 +00:00
|
|
|
|
|
|
|
DOCS: https://spacy.io/api/tokenizer#from_disk
|
2017-05-21 11:18:14 +00:00
|
|
|
"""
|
2019-03-08 10:42:26 +00:00
|
|
|
with path.open("rb") as file_:
|
2017-05-31 11:43:31 +00:00
|
|
|
bytes_data = file_.read()
|
2019-03-10 18:16:45 +00:00
|
|
|
self.from_bytes(bytes_data, **kwargs)
|
2017-05-29 10:24:41 +00:00
|
|
|
return self
|
2017-05-21 11:18:14 +00:00
|
|
|
|
2019-03-10 18:16:45 +00:00
|
|
|
def to_bytes(self, exclude=tuple(), **kwargs):
|
2017-05-21 11:18:14 +00:00
|
|
|
"""Serialize the current state to a binary string.
|
|
|
|
|
2019-03-10 18:16:45 +00:00
|
|
|
exclude (list): String names of serialization fields to exclude.
|
2017-05-21 11:18:14 +00:00
|
|
|
RETURNS (bytes): The serialized form of the `Tokenizer` object.
|
2019-03-08 10:42:26 +00:00
|
|
|
|
|
|
|
DOCS: https://spacy.io/api/tokenizer#to_bytes
|
2017-05-21 11:18:14 +00:00
|
|
|
"""
|
2017-06-03 15:05:09 +00:00
|
|
|
serializers = OrderedDict((
|
2019-03-08 10:42:26 +00:00
|
|
|
("vocab", lambda: self.vocab.to_bytes()),
|
|
|
|
("prefix_search", lambda: _get_regex_pattern(self.prefix_search)),
|
|
|
|
("suffix_search", lambda: _get_regex_pattern(self.suffix_search)),
|
|
|
|
("infix_finditer", lambda: _get_regex_pattern(self.infix_finditer)),
|
|
|
|
("token_match", lambda: _get_regex_pattern(self.token_match)),
|
|
|
|
("exceptions", lambda: OrderedDict(sorted(self._rules.items())))
|
2017-06-03 15:05:09 +00:00
|
|
|
))
|
2019-03-10 18:16:45 +00:00
|
|
|
exclude = util.get_serialization_exclude(serializers, exclude, kwargs)
|
2017-05-29 10:24:41 +00:00
|
|
|
return util.to_bytes(serializers, exclude)
|
2017-05-21 11:18:14 +00:00
|
|
|
|
2019-03-10 18:16:45 +00:00
|
|
|
def from_bytes(self, bytes_data, exclude=tuple(), **kwargs):
|
2017-05-21 11:18:14 +00:00
|
|
|
"""Load state from a binary string.
|
|
|
|
|
|
|
|
bytes_data (bytes): The data to load from.
|
2019-03-10 18:16:45 +00:00
|
|
|
exclude (list): String names of serialization fields to exclude.
|
2017-05-21 11:18:14 +00:00
|
|
|
RETURNS (Tokenizer): The `Tokenizer` object.
|
2019-03-08 10:42:26 +00:00
|
|
|
|
|
|
|
DOCS: https://spacy.io/api/tokenizer#from_bytes
|
2017-05-21 11:18:14 +00:00
|
|
|
"""
|
2017-06-03 15:05:09 +00:00
|
|
|
data = OrderedDict()
|
|
|
|
deserializers = OrderedDict((
|
2019-03-08 10:42:26 +00:00
|
|
|
("vocab", lambda b: self.vocab.from_bytes(b)),
|
|
|
|
("prefix_search", lambda b: data.setdefault("prefix_search", b)),
|
|
|
|
("suffix_search", lambda b: data.setdefault("suffix_search", b)),
|
|
|
|
("infix_finditer", lambda b: data.setdefault("infix_finditer", b)),
|
|
|
|
("token_match", lambda b: data.setdefault("token_match", b)),
|
|
|
|
("exceptions", lambda b: data.setdefault("rules", b))
|
2017-06-03 15:05:09 +00:00
|
|
|
))
|
2019-03-10 18:16:45 +00:00
|
|
|
exclude = util.get_serialization_exclude(deserializers, exclude, kwargs)
|
2017-05-29 10:24:41 +00:00
|
|
|
msg = util.from_bytes(bytes_data, deserializers, exclude)
|
Fix tokenizer on Python2.7 (#3460)
spaCy v2.1 switched to the built-in re module, where v2.0 had been using
the third-party regex library. When the tokenizer was deserialized on
Python2.7, the `re.compile()` function was called with expressions that
featured escaped unicode codepoints that were not in Python2.7's unicode
database.
Problems occurred when we had a range between two of these unknown
codepoints, like this:
```
'[\\uAA77-\\uAA79]'
```
On Python2.7, the unknown codepoints are not unescaped correctly,
resulting in arbitrary out-of-range characters being matched by the
expression.
This problem does not occur if we instead have a range between two
unicode literals, rather than the escape sequences. To fix the bug, we
therefore add a new compat function that unescapes unicode sequences
using the `ast.literal_eval()` function. Care is taken to ensure we
do not also escape non-unicode sequences.
Closes #3356.
- [x] I have submitted the spaCy Contributor Agreement.
- [x] I ran the tests, and all new and existing tests passed.
- [x] My changes don't require a change to the documentation, or if they do, I've added all required information.
2019-03-22 12:42:47 +00:00
|
|
|
for key in ["prefix_search", "suffix_search", "infix_finditer"]:
|
|
|
|
if key in data:
|
|
|
|
data[key] = unescape_unicode(data[key])
|
2019-03-08 10:42:26 +00:00
|
|
|
if data.get("prefix_search"):
|
|
|
|
self.prefix_search = re.compile(data["prefix_search"]).search
|
|
|
|
if data.get("suffix_search"):
|
|
|
|
self.suffix_search = re.compile(data["suffix_search"]).search
|
|
|
|
if data.get("infix_finditer"):
|
|
|
|
self.infix_finditer = re.compile(data["infix_finditer"]).finditer
|
|
|
|
if data.get("token_match"):
|
|
|
|
self.token_match = re.compile(data["token_match"]).match
|
2019-08-28 12:17:44 +00:00
|
|
|
if data.get("rules"):
|
|
|
|
# make sure to hard reset the cache to remove data from the default exceptions
|
|
|
|
self._rules = {}
|
2019-09-08 18:52:46 +00:00
|
|
|
self._reset_cache([key for key in self._cache])
|
|
|
|
self._reset_specials()
|
2019-08-28 12:17:44 +00:00
|
|
|
self._cache = PreshMap()
|
2019-09-08 18:52:46 +00:00
|
|
|
self._specials = PreshMap()
|
2019-11-20 12:07:25 +00:00
|
|
|
self._load_special_tokenization(data.get("rules", {}))
|
2019-08-28 12:17:44 +00:00
|
|
|
|
2017-06-03 11:26:13 +00:00
|
|
|
return self
|
2018-07-06 10:23:04 +00:00
|
|
|
|
2018-07-06 10:33:42 +00:00
|
|
|
|
2018-07-06 10:23:04 +00:00
|
|
|
def _get_regex_pattern(regex):
|
2018-07-06 10:33:42 +00:00
|
|
|
"""Get a pattern string for a regex, or None if the pattern is None."""
|
2018-07-06 10:23:04 +00:00
|
|
|
return None if regex is None else regex.__self__.pattern
|