diff --git a/spacy/tokenizer.pxd b/spacy/tokenizer.pxd index 44f6ee522..fa38a1015 100644 --- a/spacy/tokenizer.pxd +++ b/spacy/tokenizer.pxd @@ -23,8 +23,10 @@ cdef class Tokenizer: cdef object _infix_finditer cdef object _rules cdef PhraseMatcher _special_matcher - cdef int _property_init_count # TODO: unused, remove in v3.1 - cdef int _property_init_max # TODO: unused, remove in v3.1 + # TODO next two are unused and should be removed in v4 + # https://github.com/explosion/spaCy/pull/9150 + cdef int _unused_int1 + cdef int _unused_int2 cdef Doc _tokenize_affixes(self, str string, bint with_special_cases) cdef int _apply_special_cases(self, Doc doc) except -1 diff --git a/spacy/vocab.pxd b/spacy/vocab.pxd index 9b556247b..b28ad3e85 100644 --- a/spacy/vocab.pxd +++ b/spacy/vocab.pxd @@ -32,7 +32,7 @@ cdef class Vocab: cdef public object writing_system cdef public object get_noun_chunks cdef readonly int length - cdef public object data_dir + cdef public object _unused_object # TODO remove in v4, see #9150 cdef public object lex_attr_getters cdef public object cfg diff --git a/spacy/vocab.pyi b/spacy/vocab.pyi index 0a8ef6198..7c0d0598e 100644 --- a/spacy/vocab.pyi +++ b/spacy/vocab.pyi @@ -71,7 +71,7 @@ def unpickle_vocab( sstore: StringStore, vectors: Any, morphology: Any, - data_dir: Any, + _unused_object: Any, lex_attr_getters: Any, lookups: Any, get_noun_chunks: Any, diff --git a/spacy/vocab.pyx b/spacy/vocab.pyx index 552898a98..402528f28 100644 --- a/spacy/vocab.pyx +++ b/spacy/vocab.pyx @@ -552,21 +552,21 @@ def pickle_vocab(vocab): sstore = vocab.strings vectors = vocab.vectors morph = vocab.morphology - data_dir = vocab.data_dir + _unused_object = vocab._unused_object lex_attr_getters = srsly.pickle_dumps(vocab.lex_attr_getters) lookups = vocab.lookups get_noun_chunks = vocab.get_noun_chunks return (unpickle_vocab, - (sstore, vectors, morph, data_dir, lex_attr_getters, lookups, get_noun_chunks)) + (sstore, vectors, morph, _unused_object, lex_attr_getters, lookups, get_noun_chunks)) -def unpickle_vocab(sstore, vectors, morphology, data_dir, +def unpickle_vocab(sstore, vectors, morphology, _unused_object, lex_attr_getters, lookups, get_noun_chunks): cdef Vocab vocab = Vocab() vocab.vectors = vectors vocab.strings = sstore vocab.morphology = morphology - vocab.data_dir = data_dir + vocab._unused_object = _unused_object vocab.lex_attr_getters = srsly.pickle_loads(lex_attr_getters) vocab.lookups = lookups vocab.get_noun_chunks = get_noun_chunks