spaCy/spacy/pipeline/morphologizer.pyx

166 lines
6.6 KiB
Cython
Raw Normal View History

from __future__ import unicode_literals
from collections import OrderedDict, defaultdict
import numpy
cimport numpy as np
from thinc.api import chain
2018-09-25 20:12:54 +00:00
from thinc.neural.util import to_categorical, copy_array, get_array_module
2019-03-07 09:46:27 +00:00
from .. import util
from .pipes import Pipe
from ..language import component
2019-03-07 09:46:27 +00:00
from .._ml import Tok2Vec, build_morphologizer_model
from .._ml import link_vectors_to_models, zero_init, flatten
from .._ml import create_default_optimizer
from ..errors import Errors, TempErrors
from ..compat import basestring_
from ..tokens.doc cimport Doc
from ..vocab cimport Vocab
from ..morphology cimport Morphology
@component("morphologizer", assigns=["token.morph", "token.pos"])
class Morphologizer(Pipe):
@classmethod
2019-03-09 19:55:33 +00:00
def Model(cls, **cfg):
if cfg.get('pretrained_dims') and not cfg.get('pretrained_vectors'):
raise ValueError(TempErrors.T008)
2019-03-09 19:55:33 +00:00
class_map = Morphology.create_class_map()
return build_morphologizer_model(class_map.field_sizes, **cfg)
def __init__(self, vocab, model=True, **cfg):
self.vocab = vocab
self.model = model
self.cfg = OrderedDict(sorted(cfg.items()))
self.cfg.setdefault('cnn_maxout_pieces', 2)
2019-03-09 19:55:33 +00:00
self._class_map = self.vocab.morphology.create_class_map()
@property
def labels(self):
return self.vocab.morphology.tag_names
@property
def tok2vec(self):
if self.model in (None, True, False):
return None
else:
return chain(self.model.tok2vec, flatten)
def __call__(self, doc):
features, tokvecs = self.predict([doc])
2018-09-25 08:58:13 +00:00
self.set_annotations([doc], features, tensors=tokvecs)
return doc
def pipe(self, stream, batch_size=128, n_threads=-1):
2019-03-06 23:16:51 +00:00
for docs in util.minibatch(stream, size=batch_size):
docs = list(docs)
features, tokvecs = self.predict(docs)
self.set_annotations(docs, features, tensors=tokvecs)
yield from docs
def predict(self, docs):
if not any(len(doc) for doc in docs):
# Handle case where there are no tokens in any docs.
n_labels = self.model.nO
guesses = [self.model.ops.allocate((0, n_labels)) for doc in docs]
tokvecs = self.model.ops.allocate((0, self.model.tok2vec.nO))
return guesses, tokvecs
tokvecs = self.model.tok2vec(docs)
scores = self.model.softmax(tokvecs)
2018-09-26 19:02:13 +00:00
return scores, tokvecs
2018-09-26 19:02:13 +00:00
def set_annotations(self, docs, batch_scores, tensors=None):
if isinstance(docs, Doc):
docs = [docs]
cdef Doc doc
cdef Vocab vocab = self.vocab
2019-03-09 19:55:33 +00:00
offsets = [self._class_map.get_field_offset(field)
for field in self._class_map.fields]
for i, doc in enumerate(docs):
2018-09-26 19:02:13 +00:00
doc_scores = batch_scores[i]
doc_guesses = scores_to_guesses(doc_scores, self.model.softmax.out_sizes)
# Convert the neuron indices into feature IDs.
2019-03-09 19:55:33 +00:00
doc_feat_ids = numpy.zeros((len(doc), len(self._class_map.fields)), dtype='i')
2018-09-26 19:02:13 +00:00
for j in range(len(doc)):
for k, offset in enumerate(offsets):
if doc_guesses[j, k] == 0:
doc_feat_ids[j, k] = 0
else:
2019-03-09 22:54:59 +00:00
doc_feat_ids[j, k] = offset + doc_guesses[j, k]
2019-03-08 17:54:25 +00:00
# Get the set of feature names.
2019-03-09 22:54:59 +00:00
feats = {self._class_map.col2info[f][2] for f in doc_feat_ids[j]}
if "NIL" in feats:
feats.remove("NIL")
2018-09-26 19:02:13 +00:00
# Now add the analysis, and set the hash.
2019-03-09 22:54:59 +00:00
doc.c[j].morph = self.vocab.morphology.add(feats)
if doc[j].morph.pos != 0:
doc.c[j].pos = doc[j].morph.pos
def update(self, docs, golds, drop=0., sgd=None, losses=None):
if losses is not None and self.name not in losses:
losses[self.name] = 0.
tag_scores, bp_tag_scores = self.model.begin_update(docs, drop=drop)
loss, d_tag_scores = self.get_loss(docs, golds, tag_scores)
bp_tag_scores(d_tag_scores, sgd=sgd)
if losses is not None:
losses[self.name] += loss
def get_loss(self, docs, golds, scores):
guesses = []
for doc_scores in scores:
guesses.append(scores_to_guesses(doc_scores, self.model.softmax.out_sizes))
2018-09-26 19:02:13 +00:00
guesses = self.model.ops.xp.vstack(guesses)
scores = self.model.ops.xp.vstack(scores)
2019-03-08 17:54:25 +00:00
if not isinstance(scores, numpy.ndarray):
scores = scores.get()
if not isinstance(guesses, numpy.ndarray):
guesses = guesses.get()
cdef int idx = 0
2019-03-08 17:54:25 +00:00
# Do this on CPU, as we can't vectorize easily.
target = numpy.zeros(scores.shape, dtype='f')
2018-09-26 19:02:13 +00:00
field_sizes = self.model.softmax.out_sizes
2019-03-09 22:54:59 +00:00
for doc, gold in zip(docs, golds):
for t, features in enumerate(gold.morphology):
if features is None:
2018-09-26 19:02:13 +00:00
target[idx] = scores[idx]
else:
2019-03-09 19:55:33 +00:00
gold_fields = {}
for feature in features:
2019-03-09 22:54:59 +00:00
field = self._class_map.feat2field[feature]
gold_fields[field] = self._class_map.feat2offset[feature]
for field in self._class_map.fields:
field_id = self._class_map.field2id[field]
col_offset = self._class_map.field2col[field]
if field_id in gold_fields:
target[idx, col_offset + gold_fields[field_id]] = 1.
2018-09-26 19:02:13 +00:00
else:
target[idx, col_offset] = 1.
2019-03-09 22:54:59 +00:00
#print(doc[t])
#for col, info in enumerate(self._class_map.col2info):
# print(col, info, scores[idx, col], target[idx, col])
idx += 1
2019-03-08 17:54:25 +00:00
target = self.model.ops.asarray(target, dtype='f')
scores = self.model.ops.asarray(scores, dtype='f')
d_scores = scores - target
loss = (d_scores**2).sum()
d_scores = self.model.ops.unflatten(d_scores, [len(d) for d in docs])
return float(loss), d_scores
def use_params(self, params):
with self.model.use_params(params):
yield
2018-09-25 08:58:13 +00:00
def scores_to_guesses(scores, out_sizes):
2018-09-25 20:12:54 +00:00
xp = get_array_module(scores)
guesses = xp.zeros((scores.shape[0], len(out_sizes)), dtype='i')
offset = 0
for i, size in enumerate(out_sizes):
2018-09-26 19:02:13 +00:00
slice_ = scores[:, offset : offset + size]
col_guesses = slice_.argmax(axis=1)
guesses[:, i] = col_guesses
2018-09-25 20:12:54 +00:00
offset += size
return guesses