spaCy/spacy/lemmatizer.py

119 lines
3.8 KiB
Python
Raw Normal View History

from __future__ import unicode_literals, print_function
2015-01-04 19:01:32 +00:00
import codecs
2016-09-24 18:26:17 +00:00
import pathlib
2014-12-23 04:16:57 +00:00
2016-10-20 19:23:26 +00:00
import ujson as json
from .symbols import POS, NOUN, VERB, ADJ, PUNCT
2014-12-23 04:16:57 +00:00
class Lemmatizer(object):
@classmethod
2016-12-18 14:50:09 +00:00
def load(cls, path, rules=None):
index = {}
exc = {}
for pos in ['adj', 'noun', 'verb']:
2016-09-24 18:26:17 +00:00
pos_index_path = path / 'wordnet' / 'index.{pos}'.format(pos=pos)
if pos_index_path.exists():
with pos_index_path.open() as file_:
index[pos] = read_index(file_)
else:
index[pos] = set()
pos_exc_path = path / 'wordnet' / '{pos}.exc'.format(pos=pos)
if pos_exc_path.exists():
with pos_exc_path.open() as file_:
exc[pos] = read_exc(file_)
else:
exc[pos] = {}
2016-12-18 14:50:09 +00:00
if rules is None and (path / 'vocab' / 'lemma_rules.json').exists():
with (path / 'vocab' / 'lemma_rules.json').open('r', encoding='utf8') as file_:
rules = json.load(file_)
elif rules is None:
rules = {}
return cls(index, exc, rules)
2014-12-23 04:16:57 +00:00
def __init__(self, index, exceptions, rules):
self.index = index
self.exc = exceptions
self.rules = rules
2015-08-25 13:46:19 +00:00
def __call__(self, string, univ_pos, morphology=None):
2016-09-27 12:16:22 +00:00
if univ_pos == NOUN:
univ_pos = 'noun'
elif univ_pos == VERB:
univ_pos = 'verb'
elif univ_pos == ADJ:
univ_pos = 'adj'
elif univ_pos == PUNCT:
univ_pos = 'punct'
# See Issue #435 for example of where this logic is requied.
if self.is_base_form(univ_pos, morphology):
return set([string.lower()])
2016-09-27 15:47:05 +00:00
lemmas = lemmatize(string, self.index.get(univ_pos, {}),
self.exc.get(univ_pos, {}),
self.rules.get(univ_pos, []))
2015-09-08 13:38:03 +00:00
return lemmas
2014-12-23 04:16:57 +00:00
def is_base_form(self, univ_pos, morphology=None):
'''Check whether we're dealing with an uninflected paradigm, so we can
avoid lemmatization entirely.'''
morphology = {} if morphology is None else morphology
others = [key for key in morphology if key not in (POS, 'number', 'pos', 'verbform')]
2016-09-27 15:47:05 +00:00
if univ_pos == 'noun' and morphology.get('number') == 'sing' and not others:
return True
2016-09-27 15:47:05 +00:00
elif univ_pos == 'verb' and morphology.get('verbform') == 'inf' and not others:
return True
else:
return False
2014-12-23 04:16:57 +00:00
def noun(self, string, morphology=None):
return self(string, 'noun', morphology)
2014-12-23 04:16:57 +00:00
def verb(self, string, morphology=None):
return self(string, 'verb', morphology)
2014-12-23 04:16:57 +00:00
def adj(self, string, morphology=None):
return self(string, 'adj', morphology)
def punct(self, string, morphology=None):
return self(string, 'punct', morphology)
2014-12-23 04:16:57 +00:00
def lemmatize(string, index, exceptions, rules):
string = string.lower()
forms = []
# TODO: Is this correct? See discussion in Issue #435.
#if string in index:
# forms.append(string)
2014-12-23 04:16:57 +00:00
forms.extend(exceptions.get(string, []))
for old, new in rules:
if string.endswith(old):
form = string[:len(string) - len(old)] + new
if form in index or not form.isalpha():
2014-12-23 04:16:57 +00:00
forms.append(form)
if not forms:
forms.append(string)
return set(forms)
2015-12-07 05:01:28 +00:00
def read_index(fileobj):
2014-12-23 04:16:57 +00:00
index = set()
2015-12-07 05:01:28 +00:00
for line in fileobj:
2014-12-23 04:16:57 +00:00
if line.startswith(' '):
continue
pieces = line.split()
word = pieces[0]
if word.count('_') == 0:
index.add(word)
return index
2015-12-07 05:01:28 +00:00
def read_exc(fileobj):
2014-12-23 04:16:57 +00:00
exceptions = {}
2015-12-07 05:01:28 +00:00
for line in fileobj:
2014-12-23 04:16:57 +00:00
if line.startswith(' '):
continue
pieces = line.split()
exceptions[pieces[0]] = tuple(pieces[1:])
return exceptions