2015-07-23 11:24:20 +00:00
|
|
|
from __future__ import unicode_literals
|
|
|
|
|
2014-12-19 19:54:03 +00:00
|
|
|
from libc.stdio cimport fopen, fclose, fread, fwrite, FILE
|
2014-12-24 06:42:00 +00:00
|
|
|
from libc.string cimport memset
|
2015-07-27 08:58:15 +00:00
|
|
|
from libc.stdint cimport int32_t
|
2015-09-08 12:23:58 +00:00
|
|
|
from libc.stdint cimport uint64_t
|
2014-12-19 19:54:03 +00:00
|
|
|
|
2015-01-17 05:21:17 +00:00
|
|
|
import bz2
|
2014-12-19 19:54:03 +00:00
|
|
|
from os import path
|
2015-09-30 18:20:09 +00:00
|
|
|
import io
|
2015-02-07 13:44:42 +00:00
|
|
|
import math
|
2015-07-22 23:18:19 +00:00
|
|
|
import json
|
2015-10-12 05:41:31 +00:00
|
|
|
import tempfile
|
2014-12-19 19:54:03 +00:00
|
|
|
|
|
|
|
from .lexeme cimport EMPTY_LEXEME
|
2015-01-17 05:21:17 +00:00
|
|
|
from .lexeme cimport Lexeme
|
2015-01-11 23:26:22 +00:00
|
|
|
from .strings cimport hash_string
|
2015-01-05 16:18:22 +00:00
|
|
|
from .orth cimport word_shape
|
2015-01-31 05:38:58 +00:00
|
|
|
from .typedefs cimport attr_t
|
2015-07-22 23:18:19 +00:00
|
|
|
from .cfile cimport CFile
|
2015-08-28 00:02:33 +00:00
|
|
|
from .lemmatizer import Lemmatizer
|
2016-01-13 18:46:17 +00:00
|
|
|
from .util import get_package
|
2014-12-24 06:42:00 +00:00
|
|
|
|
2015-10-10 06:58:29 +00:00
|
|
|
from . import attrs
|
2015-10-10 11:12:06 +00:00
|
|
|
from . import symbols
|
2015-10-10 06:58:29 +00:00
|
|
|
|
2015-01-17 05:21:17 +00:00
|
|
|
from cymem.cymem cimport Address
|
2015-07-19 13:18:17 +00:00
|
|
|
from . import util
|
|
|
|
from .serialize.packer cimport Packer
|
2015-08-26 17:21:46 +00:00
|
|
|
from .attrs cimport PROB
|
2015-01-17 05:21:17 +00:00
|
|
|
|
2015-10-13 09:04:40 +00:00
|
|
|
try:
|
|
|
|
import copy_reg
|
|
|
|
except ImportError:
|
|
|
|
import copyreg as copy_reg
|
|
|
|
|
2014-12-24 06:42:00 +00:00
|
|
|
|
2015-01-11 23:26:22 +00:00
|
|
|
DEF MAX_VEC_SIZE = 100000
|
2014-12-24 06:42:00 +00:00
|
|
|
|
|
|
|
|
2015-01-11 23:26:22 +00:00
|
|
|
cdef float[MAX_VEC_SIZE] EMPTY_VEC
|
|
|
|
memset(EMPTY_VEC, 0, sizeof(EMPTY_VEC))
|
|
|
|
memset(&EMPTY_LEXEME, 0, sizeof(LexemeC))
|
2015-11-03 12:47:59 +00:00
|
|
|
EMPTY_LEXEME.vector = EMPTY_VEC
|
2015-01-11 23:26:22 +00:00
|
|
|
|
|
|
|
|
2014-12-19 19:54:03 +00:00
|
|
|
cdef class Vocab:
|
2015-01-11 23:26:22 +00:00
|
|
|
'''A map container for a language's LexemeC structs.
|
2014-12-19 19:54:03 +00:00
|
|
|
'''
|
2015-08-26 17:21:46 +00:00
|
|
|
@classmethod
|
2016-01-15 17:01:02 +00:00
|
|
|
def load(cls, via, get_lex_attr=None):
|
2016-01-16 09:00:57 +00:00
|
|
|
return cls.from_package(get_package(via), get_lex_attr=get_lex_attr)
|
|
|
|
|
|
|
|
@classmethod
|
|
|
|
def from_package(cls, package, get_lex_attr=None):
|
2015-12-31 03:13:15 +00:00
|
|
|
tag_map = package.load_json(('vocab', 'tag_map.json'), default={})
|
2015-12-07 05:01:28 +00:00
|
|
|
|
2016-01-16 09:00:57 +00:00
|
|
|
lemmatizer = Lemmatizer.from_package(package)
|
2015-12-07 05:01:28 +00:00
|
|
|
|
2015-12-31 03:13:15 +00:00
|
|
|
serializer_freqs = package.load_json(('vocab', 'serializer.json'), default={})
|
2015-12-07 05:01:28 +00:00
|
|
|
|
2015-09-15 04:47:14 +00:00
|
|
|
cdef Vocab self = cls(get_lex_attr=get_lex_attr, tag_map=tag_map,
|
2015-09-10 13:22:48 +00:00
|
|
|
lemmatizer=lemmatizer, serializer_freqs=serializer_freqs)
|
2015-08-28 01:44:54 +00:00
|
|
|
|
2015-12-29 17:00:48 +00:00
|
|
|
with package.open(('vocab', 'strings.json')) as file_:
|
|
|
|
self.strings.load(file_)
|
|
|
|
self.load_lexemes(package.file_path('vocab', 'lexemes.bin'))
|
2015-12-07 05:01:28 +00:00
|
|
|
|
2015-12-29 17:00:48 +00:00
|
|
|
if package.has_file('vocab', 'vec.bin'):
|
2015-12-07 05:01:28 +00:00
|
|
|
self.vectors_length = self.load_vectors_from_bin_loc(
|
2015-12-18 08:52:55 +00:00
|
|
|
package.file_path('vocab', 'vec.bin'))
|
2015-08-26 17:21:46 +00:00
|
|
|
return self
|
2015-07-22 23:18:19 +00:00
|
|
|
|
2015-09-15 04:41:48 +00:00
|
|
|
def __init__(self, get_lex_attr=None, tag_map=None, lemmatizer=None, serializer_freqs=None):
|
2015-09-10 12:49:10 +00:00
|
|
|
if tag_map is None:
|
|
|
|
tag_map = {}
|
|
|
|
if lemmatizer is None:
|
|
|
|
lemmatizer = Lemmatizer({}, {}, {})
|
|
|
|
self.mem = Pool()
|
|
|
|
self._by_hash = PreshMap()
|
|
|
|
self._by_orth = PreshMap()
|
|
|
|
self.strings = StringStore()
|
2015-10-06 13:39:50 +00:00
|
|
|
# Load strings in a special order, so that we have an onset number for
|
|
|
|
# the vocabulary. This way, when words are added in order, the orth ID
|
|
|
|
# is the frequency rank of the word, plus a certain offset. The structural
|
|
|
|
# strings are loaded first, because the vocab is open-class, and these
|
|
|
|
# symbols are closed class.
|
2015-10-10 11:12:06 +00:00
|
|
|
for name in symbols.NAMES + list(sorted(tag_map.keys())):
|
2015-10-10 07:27:03 +00:00
|
|
|
if name:
|
|
|
|
_ = self.strings[name]
|
2015-09-10 12:49:10 +00:00
|
|
|
self.get_lex_attr = get_lex_attr
|
|
|
|
self.morphology = Morphology(self.strings, tag_map, lemmatizer)
|
2015-09-10 13:22:48 +00:00
|
|
|
self.serializer_freqs = serializer_freqs
|
2015-09-10 12:49:10 +00:00
|
|
|
|
|
|
|
self.length = 1
|
|
|
|
self._serializer = None
|
|
|
|
|
2015-07-22 23:18:19 +00:00
|
|
|
property serializer:
|
|
|
|
def __get__(self):
|
|
|
|
if self._serializer is None:
|
|
|
|
freqs = []
|
2015-09-10 13:22:48 +00:00
|
|
|
self._serializer = Packer(self, self.serializer_freqs)
|
2015-07-22 23:18:19 +00:00
|
|
|
return self._serializer
|
2015-07-19 13:18:17 +00:00
|
|
|
|
2014-12-19 19:54:03 +00:00
|
|
|
def __len__(self):
|
2014-12-27 07:45:16 +00:00
|
|
|
"""The current number of lexemes stored."""
|
2015-07-18 20:42:15 +00:00
|
|
|
return self.length
|
2014-12-19 19:54:03 +00:00
|
|
|
|
2015-10-12 05:41:31 +00:00
|
|
|
def __reduce__(self):
|
2015-10-12 06:00:01 +00:00
|
|
|
tmp_dir = tempfile.mkdtemp()
|
2015-10-12 05:41:31 +00:00
|
|
|
lex_loc = path.join(tmp_dir, 'lexemes.bin')
|
2015-10-22 10:13:03 +00:00
|
|
|
str_loc = path.join(tmp_dir, 'strings.json')
|
2015-10-26 01:33:04 +00:00
|
|
|
vec_loc = path.join(tmp_dir, 'vec.bin')
|
2015-10-12 05:41:31 +00:00
|
|
|
|
|
|
|
self.dump(lex_loc)
|
2015-10-22 10:13:03 +00:00
|
|
|
with io.open(str_loc, 'w', encoding='utf8') as file_:
|
|
|
|
self.strings.dump(file_)
|
2015-10-26 01:33:04 +00:00
|
|
|
|
|
|
|
self.dump_vectors(vec_loc)
|
2015-10-12 08:33:11 +00:00
|
|
|
|
|
|
|
state = (str_loc, lex_loc, vec_loc, self.morphology, self.get_lex_attr,
|
|
|
|
self.serializer_freqs, self.data_dir)
|
|
|
|
return (unpickle_vocab, state, None, None)
|
2015-10-12 05:41:31 +00:00
|
|
|
|
2015-07-22 02:49:39 +00:00
|
|
|
cdef const LexemeC* get(self, Pool mem, unicode string) except NULL:
|
2015-01-11 23:26:22 +00:00
|
|
|
'''Get a pointer to a LexemeC from the lexicon, creating a new Lexeme
|
2014-12-19 19:54:03 +00:00
|
|
|
if necessary, using memory acquired from the given pool. If the pool
|
|
|
|
is the lexicon's own memory, the lexeme is saved in the lexicon.'''
|
2015-07-25 22:18:30 +00:00
|
|
|
if string == u'':
|
|
|
|
return &EMPTY_LEXEME
|
2015-01-11 23:26:22 +00:00
|
|
|
cdef LexemeC* lex
|
2015-07-22 02:49:39 +00:00
|
|
|
cdef hash_t key = hash_string(string)
|
|
|
|
lex = <LexemeC*>self._by_hash.get(key)
|
2015-08-23 18:49:18 +00:00
|
|
|
cdef size_t addr
|
2014-12-19 19:54:03 +00:00
|
|
|
if lex != NULL:
|
2015-10-05 23:34:59 +00:00
|
|
|
if lex.orth != self.strings[string]:
|
|
|
|
raise LookupError.mismatched_strings(
|
2015-11-05 13:48:08 +00:00
|
|
|
lex.orth, self.strings[string], self.strings[lex.orth], string)
|
2014-12-19 19:54:03 +00:00
|
|
|
return lex
|
2015-07-19 23:37:34 +00:00
|
|
|
else:
|
2015-08-22 20:04:34 +00:00
|
|
|
return self._new_lexeme(mem, string)
|
2014-12-19 19:54:03 +00:00
|
|
|
|
2015-07-22 23:18:19 +00:00
|
|
|
cdef const LexemeC* get_by_orth(self, Pool mem, attr_t orth) except NULL:
|
|
|
|
'''Get a pointer to a LexemeC from the lexicon, creating a new Lexeme
|
|
|
|
if necessary, using memory acquired from the given pool. If the pool
|
|
|
|
is the lexicon's own memory, the lexeme is saved in the lexicon.'''
|
2015-07-26 17:26:41 +00:00
|
|
|
if orth == 0:
|
2015-07-26 16:39:27 +00:00
|
|
|
return &EMPTY_LEXEME
|
2015-07-22 23:18:19 +00:00
|
|
|
cdef LexemeC* lex
|
|
|
|
lex = <LexemeC*>self._by_orth.get(orth)
|
|
|
|
if lex != NULL:
|
|
|
|
return lex
|
2015-08-22 20:04:34 +00:00
|
|
|
else:
|
|
|
|
return self._new_lexeme(mem, self.strings[orth])
|
|
|
|
|
|
|
|
cdef const LexemeC* _new_lexeme(self, Pool mem, unicode string) except NULL:
|
2015-08-23 18:49:18 +00:00
|
|
|
cdef hash_t key
|
2015-09-22 00:10:01 +00:00
|
|
|
cdef bint is_oov = mem is not self.mem
|
2015-08-26 17:21:46 +00:00
|
|
|
if len(string) < 3:
|
|
|
|
mem = self.mem
|
2015-07-22 23:18:19 +00:00
|
|
|
lex = <LexemeC*>mem.alloc(sizeof(LexemeC), 1)
|
2015-08-23 18:49:18 +00:00
|
|
|
lex.orth = self.strings[string]
|
2015-08-26 17:21:46 +00:00
|
|
|
lex.length = len(string)
|
2015-08-23 18:49:18 +00:00
|
|
|
lex.id = self.length
|
2015-11-03 12:47:59 +00:00
|
|
|
lex.vector = <float*>mem.alloc(self.vectors_length, sizeof(float))
|
2015-08-23 18:49:18 +00:00
|
|
|
if self.get_lex_attr is not None:
|
|
|
|
for attr, func in self.get_lex_attr.items():
|
|
|
|
value = func(string)
|
|
|
|
if isinstance(value, unicode):
|
|
|
|
value = self.strings[value]
|
2015-08-26 17:21:46 +00:00
|
|
|
if attr == PROB:
|
|
|
|
lex.prob = value
|
|
|
|
else:
|
|
|
|
Lexeme.set_struct_attr(lex, attr, value)
|
2015-07-22 23:18:19 +00:00
|
|
|
if is_oov:
|
|
|
|
lex.id = 0
|
|
|
|
else:
|
2015-08-23 18:49:18 +00:00
|
|
|
key = hash_string(string)
|
2015-08-22 20:04:34 +00:00
|
|
|
self._add_lex_to_vocab(key, lex)
|
|
|
|
assert lex != NULL, string
|
2015-07-22 23:18:19 +00:00
|
|
|
return lex
|
|
|
|
|
2015-01-13 13:03:48 +00:00
|
|
|
cdef int _add_lex_to_vocab(self, hash_t key, const LexemeC* lex) except -1:
|
2015-07-18 20:42:15 +00:00
|
|
|
self._by_hash.set(key, <void*>lex)
|
|
|
|
self._by_orth.set(lex.orth, <void*>lex)
|
|
|
|
self.length += 1
|
|
|
|
|
|
|
|
def __iter__(self):
|
|
|
|
cdef attr_t orth
|
|
|
|
cdef size_t addr
|
|
|
|
for orth, addr in self._by_orth.items():
|
2015-08-23 18:49:18 +00:00
|
|
|
yield Lexeme(self, orth)
|
2015-01-13 13:03:48 +00:00
|
|
|
|
2014-12-19 19:54:03 +00:00
|
|
|
def __getitem__(self, id_or_string):
|
|
|
|
'''Retrieve a lexeme, given an int ID or a unicode string. If a previously
|
2015-01-13 13:03:48 +00:00
|
|
|
unseen unicode string is given, a new lexeme is created and stored.
|
2014-12-19 19:54:03 +00:00
|
|
|
|
|
|
|
Args:
|
2015-01-14 13:33:16 +00:00
|
|
|
id_or_string (int or unicode):
|
|
|
|
The integer ID of a word, or its unicode string. If an int >= Lexicon.size,
|
|
|
|
IndexError is raised. If id_or_string is neither an int nor a unicode string,
|
|
|
|
ValueError is raised.
|
2014-12-19 19:54:03 +00:00
|
|
|
|
|
|
|
Returns:
|
2015-01-14 13:33:16 +00:00
|
|
|
lexeme (Lexeme):
|
|
|
|
An instance of the Lexeme Python class, with data copied on
|
|
|
|
instantiation.
|
2014-12-19 19:54:03 +00:00
|
|
|
'''
|
2015-07-18 20:42:15 +00:00
|
|
|
cdef attr_t orth
|
2015-08-23 18:49:18 +00:00
|
|
|
if type(id_or_string) == unicode:
|
|
|
|
orth = self.strings[id_or_string]
|
2015-01-14 13:33:16 +00:00
|
|
|
else:
|
2015-08-23 18:49:18 +00:00
|
|
|
orth = id_or_string
|
|
|
|
return Lexeme(self, orth)
|
2014-12-19 19:54:03 +00:00
|
|
|
|
2015-08-28 00:02:33 +00:00
|
|
|
cdef const TokenC* make_fused_token(self, substrings) except NULL:
|
|
|
|
cdef int i
|
|
|
|
tokens = <TokenC*>self.mem.alloc(len(substrings) + 1, sizeof(TokenC))
|
|
|
|
for i, props in enumerate(substrings):
|
|
|
|
token = &tokens[i]
|
|
|
|
# Set the special tokens up to have morphology and lemmas if
|
|
|
|
# specified, otherwise use the part-of-speech tag (if specified)
|
|
|
|
token.lex = <LexemeC*>self.get(self.mem, props['F'])
|
|
|
|
if 'pos' in props:
|
|
|
|
self.morphology.assign_tag(token, props['pos'])
|
|
|
|
if 'L' in props:
|
|
|
|
tokens[i].lemma = self.strings[props['L']]
|
|
|
|
for feature, value in props.get('morph', {}).items():
|
|
|
|
self.morphology.assign_feature(&token.morph, feature, value)
|
|
|
|
return tokens
|
|
|
|
|
2014-12-19 19:54:03 +00:00
|
|
|
def dump(self, loc):
|
|
|
|
if path.exists(loc):
|
|
|
|
assert not path.isdir(loc)
|
|
|
|
cdef bytes bytes_loc = loc.encode('utf8') if type(loc) == unicode else loc
|
2015-07-22 23:18:19 +00:00
|
|
|
|
|
|
|
cdef CFile fp = CFile(bytes_loc, 'wb')
|
2015-07-27 08:58:15 +00:00
|
|
|
cdef size_t st
|
2015-07-18 20:42:15 +00:00
|
|
|
cdef size_t addr
|
2014-12-19 19:54:03 +00:00
|
|
|
cdef hash_t key
|
2015-07-27 08:58:15 +00:00
|
|
|
for key, addr in self._by_hash.items():
|
2015-07-18 20:42:15 +00:00
|
|
|
lexeme = <LexemeC*>addr
|
2015-07-27 08:58:15 +00:00
|
|
|
fp.write_from(&lexeme.orth, sizeof(lexeme.orth), 1)
|
2015-09-08 12:23:58 +00:00
|
|
|
fp.write_from(&lexeme.flags, sizeof(lexeme.flags), 1)
|
2015-09-10 12:49:10 +00:00
|
|
|
fp.write_from(&lexeme.id, sizeof(lexeme.id), 1)
|
2015-09-08 12:23:58 +00:00
|
|
|
fp.write_from(&lexeme.length, sizeof(lexeme.length), 1)
|
|
|
|
fp.write_from(&lexeme.orth, sizeof(lexeme.orth), 1)
|
|
|
|
fp.write_from(&lexeme.lower, sizeof(lexeme.lower), 1)
|
|
|
|
fp.write_from(&lexeme.norm, sizeof(lexeme.norm), 1)
|
|
|
|
fp.write_from(&lexeme.shape, sizeof(lexeme.shape), 1)
|
|
|
|
fp.write_from(&lexeme.prefix, sizeof(lexeme.prefix), 1)
|
|
|
|
fp.write_from(&lexeme.suffix, sizeof(lexeme.suffix), 1)
|
|
|
|
fp.write_from(&lexeme.cluster, sizeof(lexeme.cluster), 1)
|
|
|
|
fp.write_from(&lexeme.prob, sizeof(lexeme.prob), 1)
|
|
|
|
fp.write_from(&lexeme.sentiment, sizeof(lexeme.sentiment), 1)
|
|
|
|
fp.write_from(&lexeme.l2_norm, sizeof(lexeme.l2_norm), 1)
|
2015-07-22 23:18:19 +00:00
|
|
|
fp.close()
|
2014-12-19 19:54:03 +00:00
|
|
|
|
2015-10-22 10:13:03 +00:00
|
|
|
def load_lexemes(self, loc):
|
2014-12-19 19:54:03 +00:00
|
|
|
if not path.exists(loc):
|
2015-01-11 23:26:22 +00:00
|
|
|
raise IOError('LexemeCs file not found at %s' % loc)
|
2015-09-08 12:23:58 +00:00
|
|
|
fp = CFile(loc, 'rb')
|
2015-07-27 08:58:15 +00:00
|
|
|
cdef LexemeC* lexeme
|
2014-12-19 19:54:03 +00:00
|
|
|
cdef hash_t key
|
2015-01-31 05:38:58 +00:00
|
|
|
cdef unicode py_str
|
2015-09-09 12:33:26 +00:00
|
|
|
cdef attr_t orth
|
2015-09-09 09:49:51 +00:00
|
|
|
assert sizeof(orth) == sizeof(lexeme.orth)
|
2015-07-27 08:58:15 +00:00
|
|
|
i = 0
|
|
|
|
while True:
|
2015-09-08 12:23:58 +00:00
|
|
|
try:
|
|
|
|
fp.read_into(&orth, 1, sizeof(orth))
|
|
|
|
except IOError:
|
2015-07-27 08:58:15 +00:00
|
|
|
break
|
2015-09-09 12:33:26 +00:00
|
|
|
lexeme = <LexemeC*>self.mem.alloc(sizeof(LexemeC), 1)
|
2015-09-08 12:23:58 +00:00
|
|
|
# Copy data from the file into the lexeme
|
|
|
|
fp.read_into(&lexeme.flags, 1, sizeof(lexeme.flags))
|
|
|
|
fp.read_into(&lexeme.id, 1, sizeof(lexeme.id))
|
|
|
|
fp.read_into(&lexeme.length, 1, sizeof(lexeme.length))
|
|
|
|
fp.read_into(&lexeme.orth, 1, sizeof(lexeme.orth))
|
|
|
|
fp.read_into(&lexeme.lower, 1, sizeof(lexeme.lower))
|
|
|
|
fp.read_into(&lexeme.norm, 1, sizeof(lexeme.norm))
|
|
|
|
fp.read_into(&lexeme.shape, 1, sizeof(lexeme.shape))
|
|
|
|
fp.read_into(&lexeme.prefix, 1, sizeof(lexeme.prefix))
|
|
|
|
fp.read_into(&lexeme.suffix, 1, sizeof(lexeme.suffix))
|
|
|
|
fp.read_into(&lexeme.cluster, 1, sizeof(lexeme.cluster))
|
|
|
|
fp.read_into(&lexeme.prob, 1, sizeof(lexeme.prob))
|
|
|
|
fp.read_into(&lexeme.sentiment, 1, sizeof(lexeme.sentiment))
|
|
|
|
fp.read_into(&lexeme.l2_norm, 1, sizeof(lexeme.l2_norm))
|
|
|
|
|
2015-11-03 12:47:59 +00:00
|
|
|
lexeme.vector = EMPTY_VEC
|
2015-09-09 12:33:26 +00:00
|
|
|
py_str = self.strings[lexeme.orth]
|
2015-01-31 05:38:58 +00:00
|
|
|
key = hash_string(py_str)
|
2015-07-27 08:58:15 +00:00
|
|
|
self._by_hash.set(key, lexeme)
|
|
|
|
self._by_orth.set(lexeme.orth, lexeme)
|
|
|
|
self.length += 1
|
|
|
|
i += 1
|
2015-09-08 12:23:58 +00:00
|
|
|
fp.close()
|
2015-04-19 08:31:31 +00:00
|
|
|
|
2015-10-26 01:33:04 +00:00
|
|
|
def dump_vectors(self, out_loc):
|
|
|
|
cdef int32_t vec_len = self.vectors_length
|
|
|
|
cdef int32_t word_len
|
|
|
|
cdef bytes word_str
|
|
|
|
cdef char* chars
|
|
|
|
|
|
|
|
cdef Lexeme lexeme
|
|
|
|
cdef CFile out_file = CFile(out_loc, 'wb')
|
|
|
|
for lexeme in self:
|
|
|
|
word_str = lexeme.orth_.encode('utf8')
|
2015-11-03 12:47:59 +00:00
|
|
|
vec = lexeme.c.vector
|
2015-10-26 01:33:04 +00:00
|
|
|
word_len = len(word_str)
|
|
|
|
|
|
|
|
out_file.write_from(&word_len, 1, sizeof(word_len))
|
|
|
|
out_file.write_from(&vec_len, 1, sizeof(vec_len))
|
|
|
|
|
|
|
|
chars = <char*>word_str
|
|
|
|
out_file.write_from(chars, word_len, sizeof(char))
|
|
|
|
out_file.write_from(vec, vec_len, sizeof(float))
|
|
|
|
out_file.close()
|
|
|
|
|
2015-10-11 13:51:43 +00:00
|
|
|
def load_vectors(self, file_):
|
2015-09-17 02:58:23 +00:00
|
|
|
cdef LexemeC* lexeme
|
|
|
|
cdef attr_t orth
|
2015-09-21 08:03:08 +00:00
|
|
|
cdef int32_t vec_len = -1
|
2015-10-11 13:51:43 +00:00
|
|
|
for line_num, line in enumerate(file_):
|
2015-09-23 13:51:08 +00:00
|
|
|
pieces = line.split()
|
|
|
|
word_str = pieces.pop(0)
|
|
|
|
if vec_len == -1:
|
|
|
|
vec_len = len(pieces)
|
|
|
|
elif vec_len != len(pieces):
|
2015-10-11 13:51:43 +00:00
|
|
|
raise VectorReadError.mismatched_sizes(file_, line_num,
|
2015-09-23 13:51:08 +00:00
|
|
|
vec_len, len(pieces))
|
|
|
|
orth = self.strings[word_str]
|
|
|
|
lexeme = <LexemeC*><void*>self.get_by_orth(self.mem, orth)
|
2015-11-03 12:47:59 +00:00
|
|
|
lexeme.vector = <float*>self.mem.alloc(self.vectors_length, sizeof(float))
|
2015-09-17 02:58:23 +00:00
|
|
|
|
2015-09-23 13:51:08 +00:00
|
|
|
for i, val_str in enumerate(pieces):
|
2015-11-03 12:47:59 +00:00
|
|
|
lexeme.vector[i] = float(val_str)
|
2015-09-21 08:03:08 +00:00
|
|
|
return vec_len
|
2015-09-17 02:58:23 +00:00
|
|
|
|
2015-09-23 13:51:08 +00:00
|
|
|
def load_vectors_from_bin_loc(self, loc):
|
2015-07-22 23:18:19 +00:00
|
|
|
cdef CFile file_ = CFile(loc, b'rb')
|
2015-01-17 05:21:17 +00:00
|
|
|
cdef int32_t word_len
|
2015-09-21 08:08:32 +00:00
|
|
|
cdef int32_t vec_len = 0
|
2015-06-05 14:26:39 +00:00
|
|
|
cdef int32_t prev_vec_len = 0
|
2015-01-17 05:21:17 +00:00
|
|
|
cdef float* vec
|
|
|
|
cdef Address mem
|
2015-07-18 20:42:15 +00:00
|
|
|
cdef attr_t string_id
|
2015-01-17 05:21:17 +00:00
|
|
|
cdef bytes py_word
|
|
|
|
cdef vector[float*] vectors
|
2015-09-21 08:08:32 +00:00
|
|
|
cdef int line_num = 0
|
2015-07-22 23:18:19 +00:00
|
|
|
cdef Pool tmp_mem = Pool()
|
2015-01-17 05:21:17 +00:00
|
|
|
while True:
|
|
|
|
try:
|
2015-07-22 23:18:19 +00:00
|
|
|
file_.read_into(&word_len, sizeof(word_len), 1)
|
2015-01-17 05:21:17 +00:00
|
|
|
except IOError:
|
|
|
|
break
|
2015-07-22 23:18:19 +00:00
|
|
|
file_.read_into(&vec_len, sizeof(vec_len), 1)
|
2015-06-05 14:26:39 +00:00
|
|
|
if prev_vec_len != 0 and vec_len != prev_vec_len:
|
2015-09-21 08:08:32 +00:00
|
|
|
raise VectorReadError.mismatched_sizes(loc, line_num,
|
|
|
|
vec_len, prev_vec_len)
|
2015-06-05 14:26:39 +00:00
|
|
|
if 0 >= vec_len >= MAX_VEC_SIZE:
|
|
|
|
raise VectorReadError.bad_size(loc, vec_len)
|
2015-01-17 05:21:17 +00:00
|
|
|
|
2015-07-22 23:18:19 +00:00
|
|
|
chars = <char*>file_.alloc_read(tmp_mem, word_len, sizeof(char))
|
|
|
|
vec = <float*>file_.alloc_read(self.mem, vec_len, sizeof(float))
|
2015-01-17 05:21:17 +00:00
|
|
|
|
|
|
|
string_id = self.strings[chars[:word_len]]
|
|
|
|
while string_id >= vectors.size():
|
|
|
|
vectors.push_back(EMPTY_VEC)
|
|
|
|
assert vec != NULL
|
|
|
|
vectors[string_id] = vec
|
2015-09-21 08:08:32 +00:00
|
|
|
line_num += 1
|
2015-01-11 23:26:22 +00:00
|
|
|
cdef LexemeC* lex
|
2015-07-18 20:42:15 +00:00
|
|
|
cdef size_t lex_addr
|
2015-09-21 08:08:32 +00:00
|
|
|
cdef int i
|
2015-07-18 20:42:15 +00:00
|
|
|
for orth, lex_addr in self._by_orth.items():
|
|
|
|
lex = <LexemeC*>lex_addr
|
2015-01-23 19:17:03 +00:00
|
|
|
if lex.lower < vectors.size():
|
2015-11-03 12:47:59 +00:00
|
|
|
lex.vector = vectors[lex.lower]
|
2015-02-07 13:44:42 +00:00
|
|
|
for i in range(vec_len):
|
2015-11-03 12:47:59 +00:00
|
|
|
lex.l2_norm += (lex.vector[i] * lex.vector[i])
|
2015-02-07 13:44:42 +00:00
|
|
|
lex.l2_norm = math.sqrt(lex.l2_norm)
|
2015-01-17 05:21:17 +00:00
|
|
|
else:
|
2015-11-03 12:47:59 +00:00
|
|
|
lex.vector = EMPTY_VEC
|
2015-06-05 14:26:39 +00:00
|
|
|
return vec_len
|
2015-01-17 05:21:17 +00:00
|
|
|
|
|
|
|
|
2015-10-12 08:33:11 +00:00
|
|
|
def unpickle_vocab(strings_loc, lex_loc, vec_loc, morphology, get_lex_attr,
|
|
|
|
serializer_freqs, data_dir):
|
|
|
|
cdef Vocab vocab = Vocab()
|
|
|
|
|
|
|
|
vocab.get_lex_attr = get_lex_attr
|
|
|
|
vocab.morphology = morphology
|
|
|
|
vocab.strings = morphology.strings
|
|
|
|
vocab.data_dir = data_dir
|
|
|
|
vocab.serializer_freqs = serializer_freqs
|
|
|
|
|
2015-10-22 10:13:03 +00:00
|
|
|
with io.open(strings_loc, 'r', encoding='utf8') as file_:
|
|
|
|
vocab.strings.load(file_)
|
|
|
|
vocab.load_lexemes(lex_loc)
|
2015-10-12 08:33:11 +00:00
|
|
|
if vec_loc is not None:
|
2015-10-26 01:05:08 +00:00
|
|
|
vocab.vectors_length = vocab.load_vectors_from_bin_loc(vec_loc)
|
2015-10-12 08:33:11 +00:00
|
|
|
return vocab
|
|
|
|
|
2015-10-12 06:00:01 +00:00
|
|
|
|
|
|
|
copy_reg.constructor(unpickle_vocab)
|
2015-10-12 05:41:31 +00:00
|
|
|
|
|
|
|
|
2015-01-17 05:21:17 +00:00
|
|
|
def write_binary_vectors(in_loc, out_loc):
|
2015-07-22 23:18:19 +00:00
|
|
|
cdef CFile out_file = CFile(out_loc, 'wb')
|
2015-01-17 05:21:17 +00:00
|
|
|
cdef Address mem
|
|
|
|
cdef int32_t word_len
|
|
|
|
cdef int32_t vec_len
|
|
|
|
cdef char* chars
|
|
|
|
with bz2.BZ2File(in_loc, 'r') as file_:
|
|
|
|
for line in file_:
|
|
|
|
pieces = line.split()
|
|
|
|
word = pieces.pop(0)
|
|
|
|
mem = Address(len(pieces), sizeof(float))
|
|
|
|
vec = <float*>mem.ptr
|
|
|
|
for i, val_str in enumerate(pieces):
|
|
|
|
vec[i] = float(val_str)
|
|
|
|
|
|
|
|
word_len = len(word)
|
|
|
|
vec_len = len(pieces)
|
|
|
|
|
2015-07-22 23:18:19 +00:00
|
|
|
out_file.write_from(&word_len, 1, sizeof(word_len))
|
|
|
|
out_file.write_from(&vec_len, 1, sizeof(vec_len))
|
2015-01-17 05:21:17 +00:00
|
|
|
|
|
|
|
chars = <char*>word
|
2015-07-22 23:18:19 +00:00
|
|
|
out_file.write_from(chars, len(word), sizeof(char))
|
|
|
|
out_file.write_from(vec, vec_len, sizeof(float))
|
2015-06-05 14:26:39 +00:00
|
|
|
|
|
|
|
|
2015-10-05 23:34:59 +00:00
|
|
|
class LookupError(Exception):
|
|
|
|
@classmethod
|
|
|
|
def mismatched_strings(cls, id_, id_string, original_string):
|
|
|
|
return cls(
|
|
|
|
"Error fetching a Lexeme from the Vocab. When looking up a string, "
|
|
|
|
"the lexeme returned had an orth ID that did not match the query string. "
|
|
|
|
"This means that the cached lexeme structs are mismatched to the "
|
|
|
|
"string encoding table. The mismatched:\n"
|
|
|
|
"Query string: {query}\n"
|
|
|
|
"Orth cached: {orth_str}\n"
|
|
|
|
"ID of orth: {orth_id}".format(
|
2015-11-05 13:48:08 +00:00
|
|
|
query=repr(original_string), orth_str=repr(id_string), orth_id=id_)
|
2015-10-05 23:34:59 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
|
2015-06-05 14:26:39 +00:00
|
|
|
class VectorReadError(Exception):
|
|
|
|
@classmethod
|
2015-09-21 08:08:32 +00:00
|
|
|
def mismatched_sizes(cls, loc, line_num, prev_size, curr_size):
|
2015-06-05 14:26:39 +00:00
|
|
|
return cls(
|
2015-09-21 08:08:32 +00:00
|
|
|
"Error reading word vectors from %s on line %d.\n"
|
2015-06-05 14:26:39 +00:00
|
|
|
"All vectors must be the same size.\n"
|
|
|
|
"Prev size: %d\n"
|
2015-09-21 08:08:32 +00:00
|
|
|
"Curr size: %d" % (loc, line_num, prev_size, curr_size))
|
2015-06-05 14:26:39 +00:00
|
|
|
|
|
|
|
@classmethod
|
|
|
|
def bad_size(cls, loc, size):
|
|
|
|
return cls(
|
|
|
|
"Error reading word vectors from %s.\n"
|
|
|
|
"Vector size: %d\n"
|
|
|
|
"Max size: %d\n"
|
|
|
|
"Min size: 1\n" % (loc, size, MAX_VEC_SIZE))
|