2015-08-26 17:17:21 +00:00
|
|
|
from os import path
|
2015-08-27 07:16:11 +00:00
|
|
|
from .lemmatizer import Lemmatizer
|
2014-12-09 10:16:17 +00:00
|
|
|
|
2015-08-26 17:17:21 +00:00
|
|
|
try:
|
|
|
|
import ujson as json
|
|
|
|
except ImportError:
|
|
|
|
import json
|
2014-12-09 14:02:04 +00:00
|
|
|
|
2015-08-28 00:02:33 +00:00
|
|
|
from .parts_of_speech import UNIV_POS_NAMES
|
|
|
|
from .parts_of_speech cimport ADJ, VERB, NOUN
|
2015-08-27 21:11:51 +00:00
|
|
|
|
|
|
|
|
2015-08-26 17:17:21 +00:00
|
|
|
cdef class Morphology:
|
2015-08-27 07:16:11 +00:00
|
|
|
@classmethod
|
|
|
|
def from_dir(cls, data_dir, lemmatizer=None):
|
|
|
|
tag_map = json.load(open(path.join(data_dir, 'tag_map.json')))
|
|
|
|
if lemmatizer is None:
|
|
|
|
lemmatizer = Lemmatizer.from_dir(data_dir)
|
|
|
|
return cls(tag_map, {}, lemmatizer)
|
|
|
|
|
2015-08-28 00:02:33 +00:00
|
|
|
def __init__(self, string_store, tag_map, lemmatizer):
|
|
|
|
self.mem = Pool()
|
|
|
|
self.strings = string_store
|
2015-08-27 07:16:11 +00:00
|
|
|
self.lemmatizer = lemmatizer
|
2015-08-26 17:17:21 +00:00
|
|
|
self.n_tags = len(tag_map)
|
|
|
|
self.tag_names = tuple(sorted(tag_map.keys()))
|
2015-08-28 00:02:33 +00:00
|
|
|
self.reverse_index = {}
|
|
|
|
for i, (tag_str, props) in enumerate(sorted(tag_map.items())):
|
|
|
|
self.rich_tags[i].id = i
|
|
|
|
self.rich_tags[i].name = self.strings[tag_str]
|
|
|
|
self.rich_tags[i].morph = 0
|
|
|
|
self.reverse_index[self.rich_tags[i].name] = i
|
|
|
|
self._cache = PreshMapArray(self.n_tags)
|
2015-08-26 17:17:21 +00:00
|
|
|
|
2015-08-27 21:11:51 +00:00
|
|
|
cdef int assign_tag(self, TokenC* token, tag) except -1:
|
2015-08-28 00:02:33 +00:00
|
|
|
cdef int tag_id = self.strings[tag] if isinstance(tag, basestring) else tag
|
|
|
|
analysis = <MorphAnalysisC*>self._cache.get(tag_id, token.lex.orth)
|
2015-08-27 21:11:51 +00:00
|
|
|
if analysis is NULL:
|
|
|
|
analysis = <MorphAnalysisC*>self.mem.alloc(1, sizeof(MorphAnalysisC))
|
2015-08-28 00:02:33 +00:00
|
|
|
analysis.tag = self.rich_tags[tag_id]
|
|
|
|
analysis.lemma = self.lemmatize(tag, token.lex.orth)
|
2015-08-27 21:11:51 +00:00
|
|
|
token.lemma = analysis.lemma
|
2015-08-28 00:02:33 +00:00
|
|
|
token.pos = analysis.tag.pos
|
|
|
|
token.tag = analysis.tag.name
|
|
|
|
token.morph = analysis.tag.morph
|
2015-08-26 17:17:21 +00:00
|
|
|
|
2015-08-28 00:02:33 +00:00
|
|
|
cdef int assign_feature(self, uint64_t* morph, feature, value) except -1:
|
2015-08-26 17:17:21 +00:00
|
|
|
pass
|
|
|
|
|
|
|
|
def load_morph_exceptions(self, dict exc):
|
2015-08-28 00:02:33 +00:00
|
|
|
# Map (form, pos) to (lemma, rich tag)
|
2015-08-27 21:11:51 +00:00
|
|
|
cdef unicode pos_str
|
|
|
|
cdef unicode form_str
|
|
|
|
cdef unicode lemma_str
|
|
|
|
cdef dict entries
|
|
|
|
cdef dict props
|
|
|
|
cdef int lemma
|
|
|
|
cdef attr_t orth
|
|
|
|
cdef int pos
|
2015-08-28 00:02:33 +00:00
|
|
|
for tag_str, entries in exc.items():
|
|
|
|
tag = self.strings[tag_str]
|
|
|
|
rich_tag = self.rich_tags[self.reverse_index[tag]]
|
2015-08-27 21:11:51 +00:00
|
|
|
for form_str, props in entries.items():
|
|
|
|
cached = <MorphAnalysisC*>self.mem.alloc(1, sizeof(MorphAnalysisC))
|
2015-08-28 00:02:33 +00:00
|
|
|
orth = self.strings[form_str]
|
|
|
|
for name_str, value_str in props.items():
|
|
|
|
if name_str == 'L':
|
|
|
|
cached.lemma = self.strings[value_str]
|
|
|
|
else:
|
|
|
|
self.assign_feature(&cached.tag.morph, name_str, value_str)
|
|
|
|
if cached.lemma == 0:
|
|
|
|
cached.lemma = self.lemmatize(rich_tag.pos, orth)
|
|
|
|
self._cache.set(rich_tag.pos, orth, <void*>cached)
|
2015-08-26 17:17:21 +00:00
|
|
|
|
2015-08-28 00:02:33 +00:00
|
|
|
def lemmatize(self, const univ_pos_t pos, attr_t orth):
|
|
|
|
if self.lemmatizer is None:
|
|
|
|
return orth
|
|
|
|
cdef unicode py_string = self.strings[orth]
|
|
|
|
if pos != NOUN and pos != VERB and pos != ADJ:
|
|
|
|
return orth
|
|
|
|
cdef set lemma_strings
|
|
|
|
cdef unicode lemma_string
|
|
|
|
lemma_strings = self.lemmatizer(py_string, pos)
|
|
|
|
lemma_string = sorted(lemma_strings)[0]
|
|
|
|
lemma = self.strings[lemma_string]
|
|
|
|
return lemma
|