From 4dddc8a69b3a8f7b7dd867565f037c7b92641a74 Mon Sep 17 00:00:00 2001 From: Matthew Honnibal Date: Sat, 18 Jul 2015 22:39:57 +0200 Subject: [PATCH] * Fix type declarations for attr_t. Remove unused id_t. --- spacy/_ml.pxd | 2 +- spacy/en/pos.pyx | 4 ++-- spacy/lexeme.pxd | 2 +- spacy/strings.pxd | 7 +++++-- spacy/strings.pyx | 16 +++++++++------- spacy/structs.pxd | 2 +- spacy/typedefs.pxd | 1 - spacy/vocab.pxd | 2 +- tests/vocab/test_iter_lexicon.py | 12 ------------ 9 files changed, 20 insertions(+), 28 deletions(-) delete mode 100644 tests/vocab/test_iter_lexicon.py diff --git a/spacy/_ml.pxd b/spacy/_ml.pxd index 21637724e..c2c7ffded 100644 --- a/spacy/_ml.pxd +++ b/spacy/_ml.pxd @@ -9,7 +9,7 @@ from thinc.api cimport ExampleC from preshed.maps cimport PreshMapArray -from .typedefs cimport hash_t, id_t +from .typedefs cimport hash_t cdef int arg_max(const weight_t* scores, const int n_classes) nogil diff --git a/spacy/en/pos.pyx b/spacy/en/pos.pyx index 6632ffce6..db1679c28 100644 --- a/spacy/en/pos.pyx +++ b/spacy/en/pos.pyx @@ -12,13 +12,13 @@ from ..parts_of_speech cimport univ_pos_t from ..parts_of_speech cimport NO_TAG, ADJ, ADV, ADP, CONJ, DET, NOUN, NUM, PRON from ..parts_of_speech cimport PRT, VERB, X, PUNCT, EOL, SPACE -from ..typedefs cimport id_t from ..structs cimport TokenC, Morphology, LexemeC from ..tokens.doc cimport Doc from ..morphology cimport set_morph_from_dict from .._ml cimport arg_max from .attrs cimport IS_ALPHA, IS_PUNCT, LIKE_NUM, LIKE_URL +from ..typedefs cimport attr_t from .lemmatizer import Lemmatizer @@ -342,7 +342,7 @@ cdef class EnPosTagger: cdef dict entries cdef dict props cdef int lemma - cdef id_t orth + cdef attr_t orth cdef int pos for pos_str, entries in exc.items(): pos = self.tag_names.index(pos_str) diff --git a/spacy/lexeme.pxd b/spacy/lexeme.pxd index 2c4e55d68..a8deb3c52 100644 --- a/spacy/lexeme.pxd +++ b/spacy/lexeme.pxd @@ -1,4 +1,4 @@ -from .typedefs cimport attr_t, hash_t, flags_t, id_t, len_t, tag_t +from .typedefs cimport attr_t, hash_t, flags_t, len_t, tag_t from .attrs cimport attr_id_t from .attrs cimport ID, ORTH, LOWER, NORM, SHAPE, PREFIX, SUFFIX, LENGTH, CLUSTER diff --git a/spacy/strings.pxd b/spacy/strings.pxd index c86d33d1c..66e07e207 100644 --- a/spacy/strings.pxd +++ b/spacy/strings.pxd @@ -1,6 +1,9 @@ from cymem.cymem cimport Pool from preshed.maps cimport PreshMap from murmurhash.mrmr cimport hash64 +from .typedefs cimport attr_t + +from libc.stdint cimport int64_t from .structs cimport Utf8Str, UniStr from .typedefs cimport hash_t @@ -17,9 +20,9 @@ cdef inline void slice_unicode(UniStr* s, Py_UNICODE* chars, int start, int end) cdef class StringStore: cdef Pool mem cdef Utf8Str* strings - cdef size_t size + cdef int64_t size cdef PreshMap _map cdef size_t _resize_at - cdef const Utf8Str* intern(self, char* chars, int length, int* id_) except NULL + cdef const Utf8Str* intern(self, char* chars, int length, attr_t* id_) except NULL diff --git a/spacy/strings.pyx b/spacy/strings.pyx index 8b1d30adf..e4887067f 100644 --- a/spacy/strings.pyx +++ b/spacy/strings.pyx @@ -3,8 +3,10 @@ import codecs from libc.string cimport memcpy from murmurhash.mrmr cimport hash64 +from libc.stdint cimport int64_t -from .typedefs cimport hash_t + +from .typedefs cimport hash_t, attr_t SEPARATOR = '\n|-SEP-|\n' @@ -34,7 +36,7 @@ cdef class StringStore: def __getitem__(self, object string_or_id): cdef bytes byte_string cdef const Utf8Str* utf8str - cdef int id_ + cdef attr_t id_ if isinstance(string_or_id, int) or isinstance(string_or_id, long): if string_or_id == 0: return u'' @@ -52,26 +54,26 @@ cdef class StringStore: else: raise TypeError(type(string_or_id)) - cdef const Utf8Str* intern(self, char* chars, int length, int* id_) except NULL: + cdef const Utf8Str* intern(self, char* chars, int length, attr_t* id_) except NULL: # 0 means missing, but we don't bother offsetting the index. We waste # slot 0 to simplify the code, because it doesn't matter. assert length != 0 cdef hash_t key = hash64(chars, length * sizeof(char), 0) cdef void* value = self._map.get(key) - cdef size_t i + cdef int64_t i if value == NULL: if self.size == self._resize_at: self._resize_at *= 2 self.strings = self.mem.realloc( self.strings, self._resize_at * sizeof(Utf8Str)) - i = self.size + i = self.size self.strings[i].chars = self.mem.alloc(length, sizeof(char)) memcpy(self.strings[i].chars, chars, length) self.strings[i].length = length self._map.set(key, self.size) self.size += 1 else: - i = value + i = value id_[0] = i return &self.strings[i] @@ -92,7 +94,7 @@ cdef class StringStore: strings = file_.read().split(SEPARATOR) cdef unicode string cdef bytes byte_string - cdef int id_ + cdef attr_t id_ for string in strings[1:]: byte_string = string.encode('utf8') self.intern(byte_string, len(byte_string), &id_) diff --git a/spacy/structs.pxd b/spacy/structs.pxd index d24a53a68..c3a71f7e0 100644 --- a/spacy/structs.pxd +++ b/spacy/structs.pxd @@ -1,6 +1,6 @@ from libc.stdint cimport uint8_t, uint32_t, int32_t -from .typedefs cimport flags_t, attr_t, id_t, hash_t +from .typedefs cimport flags_t, attr_t, hash_t from .parts_of_speech cimport univ_pos_t diff --git a/spacy/typedefs.pxd b/spacy/typedefs.pxd index 0257f0e81..bd863d247 100644 --- a/spacy/typedefs.pxd +++ b/spacy/typedefs.pxd @@ -6,6 +6,5 @@ ctypedef uint64_t hash_t ctypedef char* utf8_t ctypedef int32_t attr_t ctypedef uint64_t flags_t -ctypedef uint32_t id_t ctypedef uint16_t len_t ctypedef uint16_t tag_t diff --git a/spacy/vocab.pxd b/spacy/vocab.pxd index df0f001be..552cbffbe 100644 --- a/spacy/vocab.pxd +++ b/spacy/vocab.pxd @@ -5,7 +5,7 @@ from cymem.cymem cimport Pool from murmurhash.mrmr cimport hash64 from .structs cimport LexemeC, TokenC, UniStr -from .typedefs cimport utf8_t, id_t, hash_t +from .typedefs cimport utf8_t, hash_t from .strings cimport StringStore diff --git a/tests/vocab/test_iter_lexicon.py b/tests/vocab/test_iter_lexicon.py deleted file mode 100644 index 5551d24c6..000000000 --- a/tests/vocab/test_iter_lexicon.py +++ /dev/null @@ -1,12 +0,0 @@ -import pytest - - -def test_range_iter(en_vocab): - for i in range(len(en_vocab)): - lex = en_vocab[i] - - -def test_iter(en_vocab): - i = 0 - for lex in en_vocab: - i += 1