2017-05-13 22:55:01 +00:00
|
|
|
# cython: infer_types=True
|
2017-05-15 19:46:08 +00:00
|
|
|
# cython: cdivision=True
|
|
|
|
# cython: boundscheck=False
|
2017-11-03 10:21:00 +00:00
|
|
|
# cython: profile=True
|
2017-05-13 22:55:01 +00:00
|
|
|
# coding: utf-8
|
|
|
|
from __future__ import unicode_literals, print_function
|
|
|
|
|
2017-10-27 17:45:57 +00:00
|
|
|
from collections import OrderedDict
|
2017-05-13 22:55:01 +00:00
|
|
|
import ujson
|
2017-09-26 11:44:56 +00:00
|
|
|
import json
|
2017-10-18 19:45:01 +00:00
|
|
|
import numpy
|
2017-05-13 22:55:01 +00:00
|
|
|
cimport cython.parallel
|
|
|
|
import cytoolz
|
|
|
|
import numpy.random
|
|
|
|
cimport numpy as np
|
2017-10-27 17:45:57 +00:00
|
|
|
from cpython.ref cimport PyObject, Py_XDECREF
|
2017-10-24 10:40:47 +00:00
|
|
|
from cpython.exc cimport PyErr_CheckSignals, PyErr_SetFromErrno
|
2017-10-27 17:45:57 +00:00
|
|
|
from libc.math cimport exp
|
|
|
|
from libcpp.vector cimport vector
|
2017-11-14 23:51:42 +00:00
|
|
|
from libc.string cimport memset, memcpy
|
2017-10-27 17:45:57 +00:00
|
|
|
from libc.stdlib cimport calloc, free
|
|
|
|
from cymem.cymem cimport Pool
|
|
|
|
from thinc.typedefs cimport weight_t, class_t, hash_t
|
2017-07-20 13:02:55 +00:00
|
|
|
from thinc.extra.search cimport Beam
|
2017-10-27 17:45:57 +00:00
|
|
|
from thinc.api import chain, clone
|
|
|
|
from thinc.v2v import Model, Maxout, Affine
|
2017-10-03 18:07:17 +00:00
|
|
|
from thinc.misc import LayerNorm
|
2017-10-27 17:45:57 +00:00
|
|
|
from thinc.neural.ops import CupyOps
|
2017-05-23 09:23:29 +00:00
|
|
|
from thinc.neural.util import get_array_module
|
2017-10-28 11:16:06 +00:00
|
|
|
from thinc.linalg cimport Vec, VecVec
|
2017-05-13 22:55:01 +00:00
|
|
|
|
2017-10-28 11:16:06 +00:00
|
|
|
from .._ml import zero_init, PrecomputableAffine, Tok2Vec, flatten
|
2017-11-06 13:26:26 +00:00
|
|
|
from .._ml import link_vectors_to_models, create_default_optimizer
|
2017-10-09 01:35:40 +00:00
|
|
|
from ..compat import json_dumps, copy_array
|
2017-05-13 22:55:01 +00:00
|
|
|
from ..tokens.doc cimport Doc
|
|
|
|
from ..gold cimport GoldParse
|
2017-10-27 17:45:57 +00:00
|
|
|
from .. import util
|
|
|
|
from .stateclass cimport StateClass
|
|
|
|
from ._state cimport StateC
|
|
|
|
from .transition_system cimport Transition
|
|
|
|
from . import _beam_utils, nonproj
|
2017-05-13 22:55:01 +00:00
|
|
|
|
|
|
|
|
|
|
|
def get_templates(*args, **kwargs):
|
|
|
|
return []
|
|
|
|
|
2017-10-27 17:45:57 +00:00
|
|
|
|
2017-05-13 22:55:01 +00:00
|
|
|
DEBUG = False
|
2017-10-27 17:45:57 +00:00
|
|
|
|
|
|
|
|
2017-05-13 22:55:01 +00:00
|
|
|
def set_debug(val):
|
|
|
|
global DEBUG
|
|
|
|
DEBUG = val
|
|
|
|
|
|
|
|
|
2017-05-15 19:46:08 +00:00
|
|
|
cdef class precompute_hiddens:
|
2017-10-27 12:39:30 +00:00
|
|
|
"""Allow a model to be "primed" by pre-computing input features in bulk.
|
2017-05-13 22:55:01 +00:00
|
|
|
|
|
|
|
This is used for the parser, where we want to take a batch of documents,
|
|
|
|
and compute vectors for each (token, position) pair. These vectors can then
|
|
|
|
be reused, especially for beam-search.
|
|
|
|
|
|
|
|
Let's say we're using 12 features for each state, e.g. word at start of
|
|
|
|
buffer, three words on stack, their children, etc. In the normal arc-eager
|
|
|
|
system, a document of length N is processed in 2*N states. This means we'll
|
|
|
|
create 2*N*12 feature vectors --- but if we pre-compute, we only need
|
|
|
|
N*12 vector computations. The saving for beam-search is much better:
|
|
|
|
if we have a beam of k, we'll normally make 2*N*12*K computations --
|
|
|
|
so we can save the factor k. This also gives a nice CPU/GPU division:
|
|
|
|
we can do all our hard maths up front, packed into large multiplications,
|
|
|
|
and do the hard-to-program parsing on the CPU.
|
2017-10-27 12:39:30 +00:00
|
|
|
"""
|
2017-05-23 10:58:07 +00:00
|
|
|
cdef int nF, nO, nP
|
2017-05-15 19:46:08 +00:00
|
|
|
cdef bint _is_synchronized
|
|
|
|
cdef public object ops
|
|
|
|
cdef np.ndarray _features
|
|
|
|
cdef np.ndarray _cached
|
2017-10-19 16:42:11 +00:00
|
|
|
cdef np.ndarray bias
|
2017-05-15 19:46:08 +00:00
|
|
|
cdef object _cuda_stream
|
|
|
|
cdef object _bp_hiddens
|
|
|
|
|
2017-10-27 17:45:57 +00:00
|
|
|
def __init__(self, batch_size, tokvecs, lower_model, cuda_stream=None,
|
|
|
|
drop=0.):
|
2017-05-15 19:46:08 +00:00
|
|
|
gpu_cached, bp_features = lower_model.begin_update(tokvecs, drop=drop)
|
|
|
|
cdef np.ndarray cached
|
|
|
|
if not isinstance(gpu_cached, numpy.ndarray):
|
|
|
|
# Note the passing of cuda_stream here: it lets
|
|
|
|
# cupy make the copy asynchronously.
|
|
|
|
# We then have to block before first use.
|
|
|
|
cached = gpu_cached.get(stream=cuda_stream)
|
|
|
|
else:
|
|
|
|
cached = gpu_cached
|
2017-10-31 01:33:16 +00:00
|
|
|
if not isinstance(lower_model.b, numpy.ndarray):
|
|
|
|
self.bias = lower_model.b.get()
|
|
|
|
else:
|
|
|
|
self.bias = lower_model.b
|
2017-05-15 19:46:08 +00:00
|
|
|
self.nF = cached.shape[1]
|
2017-05-23 10:58:07 +00:00
|
|
|
self.nP = getattr(lower_model, 'nP', 1)
|
2017-10-23 16:16:23 +00:00
|
|
|
self.nO = cached.shape[2]
|
2017-05-15 19:46:08 +00:00
|
|
|
self.ops = lower_model.ops
|
|
|
|
self._is_synchronized = False
|
|
|
|
self._cuda_stream = cuda_stream
|
|
|
|
self._cached = cached
|
|
|
|
self._bp_hiddens = bp_features
|
|
|
|
|
2017-05-22 22:58:12 +00:00
|
|
|
cdef const float* get_feat_weights(self) except NULL:
|
2017-10-27 17:45:57 +00:00
|
|
|
if not self._is_synchronized and self._cuda_stream is not None:
|
2017-05-22 22:58:12 +00:00
|
|
|
self._cuda_stream.synchronize()
|
|
|
|
self._is_synchronized = True
|
|
|
|
return <float*>self._cached.data
|
|
|
|
|
2017-05-15 19:46:08 +00:00
|
|
|
def __call__(self, X):
|
|
|
|
return self.begin_update(X)[0]
|
|
|
|
|
|
|
|
def begin_update(self, token_ids, drop=0.):
|
2017-10-20 14:24:16 +00:00
|
|
|
cdef np.ndarray state_vector = numpy.zeros(
|
|
|
|
(token_ids.shape[0], self.nO, self.nP), dtype='f')
|
2017-05-15 19:46:08 +00:00
|
|
|
# This is tricky, but (assuming GPU available);
|
2017-05-13 22:55:01 +00:00
|
|
|
# - Input to forward on CPU
|
|
|
|
# - Output from forward on CPU
|
|
|
|
# - Input to backward on GPU!
|
|
|
|
# - Output from backward on GPU
|
2017-05-15 19:46:08 +00:00
|
|
|
bp_hiddens = self._bp_hiddens
|
|
|
|
|
2017-05-22 22:58:12 +00:00
|
|
|
feat_weights = self.get_feat_weights()
|
2017-05-13 22:55:01 +00:00
|
|
|
cdef int[:, ::1] ids = token_ids
|
2017-05-22 22:58:12 +00:00
|
|
|
sum_state_features(<float*>state_vector.data,
|
|
|
|
feat_weights, &ids[0,0],
|
2017-05-23 10:58:07 +00:00
|
|
|
token_ids.shape[0], self.nF, self.nO*self.nP)
|
2017-10-23 16:16:23 +00:00
|
|
|
state_vector += self.bias
|
2017-05-23 10:58:07 +00:00
|
|
|
state_vector, bp_nonlinearity = self._nonlinearity(state_vector)
|
2017-05-13 22:55:01 +00:00
|
|
|
|
2017-10-31 01:33:16 +00:00
|
|
|
def backward(d_state_vector_ids, sgd=None):
|
|
|
|
d_state_vector, token_ids = d_state_vector_ids
|
2017-10-19 18:27:34 +00:00
|
|
|
d_state_vector = bp_nonlinearity(d_state_vector, sgd)
|
2017-05-13 22:55:01 +00:00
|
|
|
# This will usually be on GPU
|
2017-10-19 11:45:18 +00:00
|
|
|
if not isinstance(d_state_vector, self.ops.xp.ndarray):
|
2017-05-20 11:40:10 +00:00
|
|
|
d_state_vector = self.ops.xp.array(d_state_vector)
|
2017-05-15 19:46:08 +00:00
|
|
|
d_tokens = bp_hiddens((d_state_vector, token_ids), sgd)
|
2017-05-13 22:55:01 +00:00
|
|
|
return d_tokens
|
2017-05-20 11:40:10 +00:00
|
|
|
return state_vector, backward
|
2017-05-15 19:46:08 +00:00
|
|
|
|
2017-05-23 10:58:07 +00:00
|
|
|
def _nonlinearity(self, state_vector):
|
|
|
|
if self.nP == 1:
|
2017-10-23 16:16:23 +00:00
|
|
|
state_vector = state_vector.reshape(state_vector.shape[:-1])
|
2017-10-19 16:42:11 +00:00
|
|
|
mask = state_vector >= 0.
|
2017-10-19 18:27:34 +00:00
|
|
|
state_vector *= mask
|
|
|
|
else:
|
|
|
|
state_vector, mask = self.ops.maxout(state_vector)
|
2017-05-23 10:58:07 +00:00
|
|
|
|
2017-10-19 18:27:34 +00:00
|
|
|
def backprop_nonlinearity(d_best, sgd=None):
|
|
|
|
if self.nP == 1:
|
2017-10-23 16:16:23 +00:00
|
|
|
d_best *= mask
|
|
|
|
d_best = d_best.reshape((d_best.shape + (1,)))
|
|
|
|
return d_best
|
2017-10-19 18:27:34 +00:00
|
|
|
else:
|
2017-10-20 14:24:16 +00:00
|
|
|
return self.ops.backprop_maxout(d_best, mask, self.nP)
|
2017-10-19 18:27:34 +00:00
|
|
|
return state_vector, backprop_nonlinearity
|
2017-05-23 10:58:07 +00:00
|
|
|
|
2017-06-04 19:55:24 +00:00
|
|
|
|
2017-05-22 22:58:12 +00:00
|
|
|
cdef void sum_state_features(float* output,
|
|
|
|
const float* cached, const int* token_ids, int B, int F, int O) nogil:
|
|
|
|
cdef int idx, b, f, i
|
|
|
|
cdef const float* feature
|
2017-10-31 01:33:16 +00:00
|
|
|
padding = cached
|
|
|
|
cached += F * O
|
2017-05-22 22:58:12 +00:00
|
|
|
for b in range(B):
|
|
|
|
for f in range(F):
|
|
|
|
if token_ids[f] < 0:
|
2017-10-28 16:45:54 +00:00
|
|
|
feature = &padding[f*O]
|
|
|
|
else:
|
|
|
|
idx = token_ids[f] * F * O + f*O
|
|
|
|
feature = &cached[idx]
|
2017-05-22 22:58:12 +00:00
|
|
|
for i in range(O):
|
|
|
|
output[i] += feature[i]
|
|
|
|
output += O
|
|
|
|
token_ids += F
|
2017-05-13 22:55:01 +00:00
|
|
|
|
|
|
|
|
|
|
|
cdef void cpu_log_loss(float* d_scores,
|
|
|
|
const float* costs, const int* is_valid, const float* scores,
|
|
|
|
int O) nogil:
|
|
|
|
"""Do multi-label log loss"""
|
|
|
|
cdef double max_, gmax, Z, gZ
|
|
|
|
best = arg_max_if_gold(scores, costs, is_valid, O)
|
|
|
|
guess = arg_max_if_valid(scores, is_valid, O)
|
|
|
|
Z = 1e-10
|
|
|
|
gZ = 1e-10
|
|
|
|
max_ = scores[guess]
|
|
|
|
gmax = scores[best]
|
|
|
|
for i in range(O):
|
|
|
|
if is_valid[i]:
|
|
|
|
Z += exp(scores[i] - max_)
|
|
|
|
if costs[i] <= costs[best]:
|
|
|
|
gZ += exp(scores[i] - gmax)
|
|
|
|
for i in range(O):
|
|
|
|
if not is_valid[i]:
|
|
|
|
d_scores[i] = 0.
|
|
|
|
elif costs[i] <= costs[best]:
|
|
|
|
d_scores[i] = (exp(scores[i]-max_) / Z) - (exp(scores[i]-gmax)/gZ)
|
|
|
|
else:
|
|
|
|
d_scores[i] = exp(scores[i]-max_) / Z
|
|
|
|
|
|
|
|
|
|
|
|
cdef void cpu_regression_loss(float* d_scores,
|
|
|
|
const float* costs, const int* is_valid, const float* scores,
|
|
|
|
int O) nogil:
|
|
|
|
cdef float eps = 2.
|
|
|
|
best = arg_max_if_gold(scores, costs, is_valid, O)
|
|
|
|
for i in range(O):
|
|
|
|
if not is_valid[i]:
|
|
|
|
d_scores[i] = 0.
|
|
|
|
elif scores[i] < scores[best]:
|
|
|
|
d_scores[i] = 0.
|
|
|
|
else:
|
|
|
|
# I doubt this is correct?
|
|
|
|
# Looking for something like Huber loss
|
|
|
|
diff = scores[i] - -costs[i]
|
|
|
|
if diff > eps:
|
|
|
|
d_scores[i] = eps
|
|
|
|
elif diff < -eps:
|
|
|
|
d_scores[i] = -eps
|
|
|
|
else:
|
|
|
|
d_scores[i] = diff
|
|
|
|
|
|
|
|
|
2017-11-14 01:11:40 +00:00
|
|
|
def _collect_states(beams):
|
|
|
|
cdef StateClass state
|
|
|
|
cdef Beam beam
|
|
|
|
states = []
|
|
|
|
for beam in beams:
|
|
|
|
state = StateClass.borrow(<StateC*>beam.at(0))
|
|
|
|
states.append(state)
|
|
|
|
return states
|
|
|
|
|
|
|
|
|
2017-05-13 22:55:01 +00:00
|
|
|
cdef class Parser:
|
|
|
|
"""
|
|
|
|
Base class of the DependencyParser and EntityRecognizer.
|
|
|
|
"""
|
|
|
|
@classmethod
|
2017-10-06 18:17:31 +00:00
|
|
|
def Model(cls, nr_class, **cfg):
|
2017-10-11 07:43:48 +00:00
|
|
|
depth = util.env_opt('parser_hidden_depth', cfg.get('hidden_depth', 1))
|
2017-10-18 22:42:34 +00:00
|
|
|
if depth != 1:
|
|
|
|
raise ValueError("Currently parser depth is hard-coded to 1.")
|
2017-10-27 17:45:57 +00:00
|
|
|
parser_maxout_pieces = util.env_opt('parser_maxout_pieces',
|
|
|
|
cfg.get('maxout_pieces', 2))
|
|
|
|
token_vector_width = util.env_opt('token_vector_width',
|
2017-10-28 11:16:06 +00:00
|
|
|
cfg.get('token_vector_width', 128))
|
2017-10-12 11:12:26 +00:00
|
|
|
hidden_width = util.env_opt('hidden_width', cfg.get('hidden_width', 200))
|
2017-10-06 18:17:31 +00:00
|
|
|
embed_size = util.env_opt('embed_size', cfg.get('embed_size', 7000))
|
2017-10-12 12:56:11 +00:00
|
|
|
hist_size = util.env_opt('history_feats', cfg.get('hist_size', 0))
|
|
|
|
hist_width = util.env_opt('history_width', cfg.get('hist_width', 0))
|
2017-10-18 22:42:34 +00:00
|
|
|
if hist_size != 0:
|
|
|
|
raise ValueError("Currently history size is hard-coded to 0")
|
2017-10-25 13:54:02 +00:00
|
|
|
if hist_width != 0:
|
2017-10-18 22:42:34 +00:00
|
|
|
raise ValueError("Currently history width is hard-coded to 0")
|
2017-09-21 12:59:48 +00:00
|
|
|
tok2vec = Tok2Vec(token_vector_width, embed_size,
|
|
|
|
pretrained_dims=cfg.get('pretrained_dims', 0))
|
|
|
|
tok2vec = chain(tok2vec, flatten)
|
2017-10-20 14:24:16 +00:00
|
|
|
lower = PrecomputableAffine(hidden_width,
|
|
|
|
nF=cls.nr_feature, nI=token_vector_width,
|
|
|
|
nP=parser_maxout_pieces)
|
2017-10-20 01:07:17 +00:00
|
|
|
lower.nP = parser_maxout_pieces
|
2017-05-13 22:55:01 +00:00
|
|
|
|
2017-05-15 19:46:08 +00:00
|
|
|
with Model.use_device('cpu'):
|
2017-10-18 22:42:34 +00:00
|
|
|
upper = chain(
|
|
|
|
clone(LayerNorm(Maxout(hidden_width, hidden_width)), depth-1),
|
|
|
|
zero_init(Affine(nr_class, hidden_width, drop_factor=0.0))
|
|
|
|
)
|
2017-10-05 01:06:05 +00:00
|
|
|
|
2017-05-29 08:14:20 +00:00
|
|
|
cfg = {
|
|
|
|
'nr_class': nr_class,
|
2017-10-06 18:50:52 +00:00
|
|
|
'hidden_depth': depth,
|
2017-05-29 08:14:20 +00:00
|
|
|
'token_vector_width': token_vector_width,
|
|
|
|
'hidden_width': hidden_width,
|
2017-10-06 00:38:13 +00:00
|
|
|
'maxout_pieces': parser_maxout_pieces,
|
|
|
|
'hist_size': hist_size,
|
|
|
|
'hist_width': hist_width
|
2017-05-29 08:14:20 +00:00
|
|
|
}
|
2017-09-21 12:59:48 +00:00
|
|
|
return (tok2vec, lower, upper), cfg
|
2017-05-15 19:46:08 +00:00
|
|
|
|
2017-11-06 13:26:26 +00:00
|
|
|
def create_optimizer(self):
|
|
|
|
return create_default_optimizer(self.model[0].ops,
|
|
|
|
**self.cfg.get('optimizer', {}))
|
|
|
|
|
2017-05-16 14:17:30 +00:00
|
|
|
def __init__(self, Vocab vocab, moves=True, model=True, **cfg):
|
2017-10-27 12:39:30 +00:00
|
|
|
"""Create a Parser.
|
|
|
|
|
|
|
|
vocab (Vocab): The vocabulary object. Must be shared with documents
|
|
|
|
to be processed. The value is set to the `.vocab` attribute.
|
|
|
|
moves (TransitionSystem): Defines how the parse-state is created,
|
|
|
|
updated and evaluated. The value is set to the .moves attribute
|
|
|
|
unless True (default), in which case a new instance is created with
|
|
|
|
`Parser.Moves()`.
|
|
|
|
model (object): Defines how the parse-state is created, updated and
|
|
|
|
evaluated. The value is set to the .model attribute unless True
|
|
|
|
(default), in which case a new instance is created with
|
|
|
|
`Parser.Model()`.
|
|
|
|
**cfg: Arbitrary configuration parameters. Set to the `.cfg` attribute
|
2017-05-13 22:55:01 +00:00
|
|
|
"""
|
|
|
|
self.vocab = vocab
|
2017-05-16 14:17:30 +00:00
|
|
|
if moves is True:
|
|
|
|
self.moves = self.TransitionSystem(self.vocab.strings, {})
|
|
|
|
else:
|
|
|
|
self.moves = moves
|
2017-08-18 20:38:59 +00:00
|
|
|
if 'beam_width' not in cfg:
|
|
|
|
cfg['beam_width'] = util.env_opt('beam_width', 1)
|
|
|
|
if 'beam_density' not in cfg:
|
|
|
|
cfg['beam_density'] = util.env_opt('beam_density', 0.0)
|
2017-09-19 21:42:12 +00:00
|
|
|
if 'pretrained_dims' not in cfg:
|
|
|
|
cfg['pretrained_dims'] = self.vocab.vectors.data.shape[1]
|
2017-09-22 14:38:22 +00:00
|
|
|
cfg.setdefault('cnn_maxout_pieces', 3)
|
2017-05-13 22:55:01 +00:00
|
|
|
self.cfg = cfg
|
2017-05-16 09:21:59 +00:00
|
|
|
if 'actions' in self.cfg:
|
|
|
|
for action, labels in self.cfg.get('actions', {}).items():
|
|
|
|
for label in labels:
|
|
|
|
self.moves.add_action(action, label)
|
|
|
|
self.model = model
|
2017-09-26 10:42:52 +00:00
|
|
|
self._multitasks = []
|
2017-05-13 22:55:01 +00:00
|
|
|
|
|
|
|
def __reduce__(self):
|
2017-05-17 10:04:50 +00:00
|
|
|
return (Parser, (self.vocab, self.moves, self.model), None, None)
|
2017-05-13 22:55:01 +00:00
|
|
|
|
2017-07-20 13:02:55 +00:00
|
|
|
def __call__(self, Doc doc, beam_width=None, beam_density=None):
|
2017-10-27 12:39:30 +00:00
|
|
|
"""Apply the parser or entity recognizer, setting the annotations onto
|
|
|
|
the `Doc` object.
|
2017-05-13 22:55:01 +00:00
|
|
|
|
2017-10-27 12:39:30 +00:00
|
|
|
doc (Doc): The document to be processed.
|
2017-05-13 22:55:01 +00:00
|
|
|
"""
|
2017-07-20 13:02:55 +00:00
|
|
|
if beam_width is None:
|
|
|
|
beam_width = self.cfg.get('beam_width', 1)
|
|
|
|
if beam_density is None:
|
2017-08-18 20:38:59 +00:00
|
|
|
beam_density = self.cfg.get('beam_density', 0.0)
|
2017-07-20 13:02:55 +00:00
|
|
|
cdef Beam beam
|
|
|
|
if beam_width == 1:
|
2017-11-03 10:21:00 +00:00
|
|
|
states, tokvecs = self.parse_batch([doc])
|
|
|
|
self.set_annotations([doc], states, tensors=tokvecs)
|
2017-07-20 13:02:55 +00:00
|
|
|
return doc
|
|
|
|
else:
|
2017-11-03 10:21:00 +00:00
|
|
|
beams, tokvecs = self.beam_parse([doc],
|
|
|
|
beam_width=beam_width,
|
|
|
|
beam_density=beam_density)
|
|
|
|
beam = beams[0]
|
2017-07-20 13:02:55 +00:00
|
|
|
output = self.moves.get_beam_annot(beam)
|
2017-11-14 01:11:40 +00:00
|
|
|
state = StateClass.borrow(<StateC*>beam.at(0))
|
2017-11-03 10:21:00 +00:00
|
|
|
self.set_annotations([doc], [state], tensors=tokvecs)
|
2017-07-20 13:02:55 +00:00
|
|
|
_cleanup(beam)
|
|
|
|
return output
|
|
|
|
|
2017-10-06 00:38:13 +00:00
|
|
|
def pipe(self, docs, int batch_size=256, int n_threads=2,
|
2017-08-18 20:38:59 +00:00
|
|
|
beam_width=None, beam_density=None):
|
2017-10-27 12:39:30 +00:00
|
|
|
"""Process a stream of documents.
|
|
|
|
|
|
|
|
stream: The sequence of documents to process.
|
|
|
|
batch_size (int): Number of documents to accumulate into a working set.
|
|
|
|
n_threads (int): The number of threads with which to work on the buffer
|
|
|
|
in parallel.
|
|
|
|
YIELDS (Doc): Documents, in order.
|
2017-05-13 22:55:01 +00:00
|
|
|
"""
|
2017-08-18 20:38:59 +00:00
|
|
|
if beam_width is None:
|
|
|
|
beam_width = self.cfg.get('beam_width', 1)
|
|
|
|
if beam_density is None:
|
|
|
|
beam_density = self.cfg.get('beam_density', 0.0)
|
2017-05-15 19:46:08 +00:00
|
|
|
cdef Doc doc
|
2017-10-18 19:45:01 +00:00
|
|
|
for batch in cytoolz.partition_all(batch_size, docs):
|
2017-11-14 01:11:40 +00:00
|
|
|
batch_in_order = list(batch)
|
|
|
|
by_length = sorted(batch_in_order, key=lambda doc: len(doc))
|
|
|
|
batch_beams = []
|
2017-10-18 22:25:21 +00:00
|
|
|
for subbatch in cytoolz.partition_all(8, by_length):
|
2017-10-18 19:45:01 +00:00
|
|
|
subbatch = list(subbatch)
|
|
|
|
if beam_width == 1:
|
2017-11-03 10:21:00 +00:00
|
|
|
parse_states, tokvecs = self.parse_batch(subbatch)
|
2017-10-18 19:45:01 +00:00
|
|
|
beams = []
|
|
|
|
else:
|
2017-11-03 10:21:00 +00:00
|
|
|
beams, tokvecs = self.beam_parse(subbatch,
|
|
|
|
beam_width=beam_width,
|
|
|
|
beam_density=beam_density)
|
2017-11-14 01:11:40 +00:00
|
|
|
parse_states = _collect_states(beams)
|
|
|
|
self.set_annotations(subbatch, parse_states, tensors=None)
|
|
|
|
for beam in beams:
|
|
|
|
_cleanup(beam)
|
|
|
|
for doc in batch_in_order:
|
|
|
|
yield doc
|
2017-05-13 22:55:01 +00:00
|
|
|
|
2017-09-21 12:59:48 +00:00
|
|
|
def parse_batch(self, docs):
|
2017-05-22 22:58:12 +00:00
|
|
|
cdef:
|
|
|
|
precompute_hiddens state2vec
|
2017-05-23 09:23:05 +00:00
|
|
|
Pool mem
|
2017-05-22 22:58:12 +00:00
|
|
|
const float* feat_weights
|
|
|
|
StateC* st
|
2017-11-14 01:11:40 +00:00
|
|
|
StateClass stcls
|
2017-10-18 22:25:21 +00:00
|
|
|
vector[StateC*] states
|
|
|
|
int guess, nr_class, nr_feat, nr_piece, nr_dim, nr_state, nr_step
|
|
|
|
int j
|
2017-05-22 22:58:12 +00:00
|
|
|
if isinstance(docs, Doc):
|
|
|
|
docs = [docs]
|
|
|
|
|
2017-10-27 17:45:57 +00:00
|
|
|
cuda_stream = util.get_cuda_stream()
|
|
|
|
(tokvecs, bp_tokvecs), state2vec, vec2scores = self.get_batch_model(
|
|
|
|
docs, cuda_stream, 0.0)
|
2017-05-22 22:58:12 +00:00
|
|
|
nr_state = len(docs)
|
|
|
|
nr_class = self.moves.n_moves
|
|
|
|
nr_dim = tokvecs.shape[1]
|
|
|
|
nr_feat = self.nr_feature
|
2017-05-23 16:06:49 +00:00
|
|
|
nr_piece = state2vec.nP
|
2017-05-13 22:55:01 +00:00
|
|
|
|
2017-10-18 22:25:21 +00:00
|
|
|
state_objs = self.moves.init_batch(docs)
|
|
|
|
for stcls in state_objs:
|
|
|
|
if not stcls.c.is_final():
|
|
|
|
states.push_back(stcls.c)
|
2017-10-19 11:45:18 +00:00
|
|
|
|
2017-05-22 22:58:12 +00:00
|
|
|
feat_weights = state2vec.get_feat_weights()
|
|
|
|
cdef int i
|
2017-10-27 17:45:57 +00:00
|
|
|
cdef np.ndarray hidden_weights = numpy.ascontiguousarray(
|
|
|
|
vec2scores._layers[-1].W.T)
|
2017-10-18 19:45:01 +00:00
|
|
|
cdef np.ndarray hidden_bias = vec2scores._layers[-1].b
|
|
|
|
|
|
|
|
hW = <float*>hidden_weights.data
|
|
|
|
hb = <float*>hidden_bias.data
|
2017-10-19 16:42:11 +00:00
|
|
|
bias = <float*>state2vec.bias.data
|
2017-10-18 19:45:01 +00:00
|
|
|
cdef int nr_hidden = hidden_weights.shape[0]
|
2017-10-18 23:48:39 +00:00
|
|
|
cdef int nr_task = states.size()
|
2017-10-18 22:25:21 +00:00
|
|
|
with nogil:
|
2017-10-27 01:16:55 +00:00
|
|
|
for i in range(nr_task):
|
2017-10-18 22:25:21 +00:00
|
|
|
self._parseC(states[i],
|
2017-10-19 16:42:11 +00:00
|
|
|
feat_weights, bias, hW, hb,
|
2017-10-18 22:25:21 +00:00
|
|
|
nr_class, nr_hidden, nr_feat, nr_piece)
|
2017-10-24 10:40:47 +00:00
|
|
|
PyErr_CheckSignals()
|
2017-11-03 10:21:00 +00:00
|
|
|
tokvecs = self.model[0].ops.unflatten(tokvecs,
|
|
|
|
[len(doc) for doc in docs])
|
|
|
|
return state_objs, tokvecs
|
2017-10-18 22:25:21 +00:00
|
|
|
|
|
|
|
cdef void _parseC(self, StateC* state,
|
2017-10-19 16:42:11 +00:00
|
|
|
const float* feat_weights, const float* bias,
|
|
|
|
const float* hW, const float* hb,
|
2017-10-18 22:25:21 +00:00
|
|
|
int nr_class, int nr_hidden, int nr_feat, int nr_piece) nogil:
|
|
|
|
token_ids = <int*>calloc(nr_feat, sizeof(int))
|
|
|
|
is_valid = <int*>calloc(nr_class, sizeof(int))
|
|
|
|
vectors = <float*>calloc(nr_hidden * nr_piece, sizeof(float))
|
|
|
|
scores = <float*>calloc(nr_class, sizeof(float))
|
2017-10-24 10:40:47 +00:00
|
|
|
if not (token_ids and is_valid and vectors and scores):
|
|
|
|
with gil:
|
|
|
|
PyErr_SetFromErrno(MemoryError)
|
|
|
|
PyErr_CheckSignals()
|
2017-10-28 11:16:06 +00:00
|
|
|
cdef float feature
|
2017-10-18 22:25:21 +00:00
|
|
|
while not state.is_final():
|
|
|
|
state.set_context_tokens(token_ids, nr_feat)
|
|
|
|
memset(vectors, 0, nr_hidden * nr_piece * sizeof(float))
|
|
|
|
memset(scores, 0, nr_class * sizeof(float))
|
|
|
|
sum_state_features(vectors,
|
|
|
|
feat_weights, token_ids, 1, nr_feat, nr_hidden * nr_piece)
|
2017-10-19 16:42:11 +00:00
|
|
|
for i in range(nr_hidden * nr_piece):
|
|
|
|
vectors[i] += bias[i]
|
2017-10-18 22:25:21 +00:00
|
|
|
V = vectors
|
|
|
|
W = hW
|
|
|
|
for i in range(nr_hidden):
|
2017-10-19 11:45:18 +00:00
|
|
|
if nr_piece == 1:
|
2017-10-19 16:42:11 +00:00
|
|
|
feature = V[0] if V[0] >= 0. else 0.
|
2017-10-19 11:45:18 +00:00
|
|
|
elif nr_piece == 2:
|
|
|
|
feature = V[0] if V[0] >= V[1] else V[1]
|
|
|
|
else:
|
|
|
|
feature = Vec.max(V, nr_piece)
|
2017-10-18 22:25:21 +00:00
|
|
|
for j in range(nr_class):
|
|
|
|
scores[j] += feature * W[j]
|
|
|
|
W += nr_class
|
|
|
|
V += nr_piece
|
|
|
|
for i in range(nr_class):
|
|
|
|
scores[i] += hb[i]
|
|
|
|
self.moves.set_valid(is_valid, state)
|
|
|
|
guess = arg_max_if_valid(scores, is_valid, nr_class)
|
|
|
|
action = self.moves.c[guess]
|
|
|
|
action.do(state, action.label)
|
|
|
|
state.push_hist(guess)
|
|
|
|
free(token_ids)
|
|
|
|
free(is_valid)
|
|
|
|
free(vectors)
|
|
|
|
free(scores)
|
2017-05-15 19:46:08 +00:00
|
|
|
|
2017-09-21 12:59:48 +00:00
|
|
|
def beam_parse(self, docs, int beam_width=3, float beam_density=0.001):
|
2017-07-20 13:02:55 +00:00
|
|
|
cdef Beam beam
|
|
|
|
cdef np.ndarray scores
|
|
|
|
cdef Doc doc
|
|
|
|
cdef int nr_class = self.moves.n_moves
|
2017-10-27 17:45:57 +00:00
|
|
|
cuda_stream = util.get_cuda_stream()
|
|
|
|
(tokvecs, bp_tokvecs), state2vec, vec2scores = self.get_batch_model(
|
|
|
|
docs, cuda_stream, 0.0)
|
2017-07-20 13:02:55 +00:00
|
|
|
cdef int offset = 0
|
2017-08-18 20:23:03 +00:00
|
|
|
cdef int j = 0
|
|
|
|
cdef int k
|
2017-11-14 23:51:42 +00:00
|
|
|
|
|
|
|
beams = []
|
2017-07-20 13:02:55 +00:00
|
|
|
for doc in docs:
|
|
|
|
beam = Beam(nr_class, beam_width, min_density=beam_density)
|
|
|
|
beam.initialize(self.moves.init_beam_state, doc.length, doc.c)
|
|
|
|
for i in range(beam.width):
|
2017-11-14 01:11:40 +00:00
|
|
|
state = <StateC*>beam.at(i)
|
|
|
|
state.offset = offset
|
2017-07-20 13:02:55 +00:00
|
|
|
offset += len(doc)
|
|
|
|
beam.check_done(_check_final_state, NULL)
|
2017-11-14 23:51:42 +00:00
|
|
|
beams.append(beam)
|
|
|
|
cdef np.ndarray token_ids
|
|
|
|
token_ids = numpy.zeros((len(docs) * beam_width, self.nr_feature),
|
|
|
|
dtype='i', order='C')
|
|
|
|
todo = [beam for beam in beams if not beam.is_done]
|
|
|
|
|
|
|
|
cdef int* c_ids
|
|
|
|
cdef int nr_feature = self.nr_feature
|
|
|
|
cdef int n_states
|
|
|
|
while todo:
|
|
|
|
todo = [beam for beam in beams if not beam.is_done]
|
|
|
|
token_ids.fill(-1)
|
|
|
|
c_ids = <int*>token_ids.data
|
|
|
|
n_states = 0
|
|
|
|
for beam in todo:
|
2017-07-20 13:02:55 +00:00
|
|
|
for i in range(beam.size):
|
2017-11-14 23:51:42 +00:00
|
|
|
state = <StateC*>beam.at(i)
|
2017-08-18 20:23:03 +00:00
|
|
|
# This way we avoid having to score finalized states
|
|
|
|
# We do have to take care to keep indexes aligned, though
|
2017-11-14 23:51:42 +00:00
|
|
|
if not state.is_final():
|
|
|
|
state.set_context_tokens(c_ids, nr_feature)
|
|
|
|
c_ids += nr_feature
|
|
|
|
n_states += 1
|
|
|
|
if n_states == 0:
|
|
|
|
break
|
|
|
|
vectors = state2vec(token_ids[:n_states])
|
|
|
|
scores = vec2scores(vectors)
|
|
|
|
c_scores = <float*>scores.data
|
|
|
|
for beam in todo:
|
2017-07-20 13:02:55 +00:00
|
|
|
for i in range(beam.size):
|
2017-11-14 01:11:40 +00:00
|
|
|
state = <StateC*>beam.at(i)
|
|
|
|
if not state.is_final():
|
|
|
|
self.moves.set_valid(beam.is_valid[i], state)
|
2017-11-14 23:51:42 +00:00
|
|
|
memcpy(beam.scores[i], c_scores, nr_class * sizeof(float))
|
|
|
|
c_scores += nr_class
|
2017-11-14 22:36:46 +00:00
|
|
|
beam.advance(_transition_state, NULL, <void*>self.moves.c)
|
2017-07-20 13:02:55 +00:00
|
|
|
beam.check_done(_check_final_state, NULL)
|
2017-11-03 10:21:00 +00:00
|
|
|
tokvecs = self.model[0].ops.unflatten(tokvecs,
|
|
|
|
[len(doc) for doc in docs])
|
|
|
|
return beams, tokvecs
|
2017-07-20 13:02:55 +00:00
|
|
|
|
2017-09-21 12:59:48 +00:00
|
|
|
def update(self, docs, golds, drop=0., sgd=None, losses=None):
|
2017-08-20 20:59:28 +00:00
|
|
|
if not any(self.moves.has_gold(gold) for gold in golds):
|
2017-08-20 12:41:38 +00:00
|
|
|
return None
|
2018-02-17 17:41:18 +00:00
|
|
|
assert len(docs) == len(golds)
|
2017-11-14 23:51:42 +00:00
|
|
|
if self.cfg.get('beam_width', 1) >= 2 and numpy.random.random() >= 0.0:
|
2017-09-21 12:59:48 +00:00
|
|
|
return self.update_beam(docs, golds,
|
2017-08-18 20:38:59 +00:00
|
|
|
self.cfg['beam_width'], self.cfg['beam_density'],
|
|
|
|
drop=drop, sgd=sgd, losses=losses)
|
2017-05-25 01:11:41 +00:00
|
|
|
if losses is not None and self.name not in losses:
|
|
|
|
losses[self.name] = 0.
|
2017-05-15 19:46:08 +00:00
|
|
|
if isinstance(docs, Doc) and isinstance(golds, GoldParse):
|
2017-05-16 14:17:30 +00:00
|
|
|
docs = [docs]
|
|
|
|
golds = [golds]
|
2018-02-17 17:41:18 +00:00
|
|
|
for multitask in self._multitasks:
|
|
|
|
multitask.update(docs, golds, drop=drop, sgd=sgd)
|
2017-10-27 17:45:57 +00:00
|
|
|
cuda_stream = util.get_cuda_stream()
|
2017-05-27 22:59:00 +00:00
|
|
|
states, golds, max_steps = self._init_gold_batch(docs, golds)
|
2017-09-21 12:59:48 +00:00
|
|
|
(tokvecs, bp_tokvecs), state2vec, vec2scores = self.get_batch_model(docs, cuda_stream,
|
2017-09-28 23:47:13 +00:00
|
|
|
drop)
|
2017-05-23 09:23:05 +00:00
|
|
|
todo = [(s, g) for (s, g) in zip(states, golds)
|
|
|
|
if not s.is_final() and g is not None]
|
2017-05-26 19:02:59 +00:00
|
|
|
if not todo:
|
|
|
|
return None
|
2017-05-23 08:06:53 +00:00
|
|
|
|
2017-05-23 09:23:05 +00:00
|
|
|
backprops = []
|
2017-11-03 23:23:23 +00:00
|
|
|
# Add a padding vector to the d_tokvecs gradient, so that missing
|
|
|
|
# values don't affect the real gradient.
|
|
|
|
d_tokvecs = state2vec.ops.allocate((tokvecs.shape[0]+1, tokvecs.shape[1]))
|
2017-05-23 09:23:05 +00:00
|
|
|
cdef float loss = 0.
|
2017-05-27 22:59:00 +00:00
|
|
|
n_steps = 0
|
2017-05-25 11:49:00 +00:00
|
|
|
while todo:
|
2017-05-23 09:23:05 +00:00
|
|
|
states, golds = zip(*todo)
|
|
|
|
token_ids = self.get_token_ids(states)
|
2017-05-23 20:20:45 +00:00
|
|
|
vector, bp_vector = state2vec.begin_update(token_ids, drop=0.0)
|
2017-05-25 01:11:41 +00:00
|
|
|
if drop != 0:
|
|
|
|
mask = vec2scores.ops.get_dropout_mask(vector.shape, drop)
|
|
|
|
vector *= mask
|
2017-10-03 10:44:01 +00:00
|
|
|
hists = numpy.asarray([st.history for st in states], dtype='i')
|
2017-10-06 00:38:13 +00:00
|
|
|
if self.cfg.get('hist_size', 0):
|
2017-10-03 10:44:01 +00:00
|
|
|
scores, bp_scores = vec2scores.begin_update((vector, hists), drop=drop)
|
|
|
|
else:
|
|
|
|
scores, bp_scores = vec2scores.begin_update(vector, drop=drop)
|
2017-05-23 09:23:05 +00:00
|
|
|
|
|
|
|
d_scores = self.get_batch_loss(states, golds, scores)
|
2017-08-19 14:02:57 +00:00
|
|
|
d_scores /= len(docs)
|
2017-08-18 20:23:03 +00:00
|
|
|
d_vector = bp_scores(d_scores, sgd=sgd)
|
2017-05-25 01:11:41 +00:00
|
|
|
if drop != 0:
|
|
|
|
d_vector *= mask
|
2017-05-23 09:23:05 +00:00
|
|
|
|
|
|
|
if isinstance(self.model[0].ops, CupyOps) \
|
|
|
|
and not isinstance(token_ids, state2vec.ops.xp.ndarray):
|
2017-08-18 20:23:03 +00:00
|
|
|
# Move token_ids and d_vector to GPU, asynchronously
|
2017-05-23 09:23:05 +00:00
|
|
|
backprops.append((
|
2017-10-27 17:45:57 +00:00
|
|
|
util.get_async(cuda_stream, token_ids),
|
|
|
|
util.get_async(cuda_stream, d_vector),
|
2017-05-23 09:23:05 +00:00
|
|
|
bp_vector
|
|
|
|
))
|
|
|
|
else:
|
|
|
|
backprops.append((token_ids, d_vector, bp_vector))
|
|
|
|
self.transition_batch(states, scores)
|
2017-10-03 10:44:01 +00:00
|
|
|
todo = [(st, gold) for (st, gold) in todo
|
|
|
|
if not st.is_final()]
|
2017-05-25 01:11:41 +00:00
|
|
|
if losses is not None:
|
|
|
|
losses[self.name] += (d_scores**2).sum()
|
2017-05-27 22:59:00 +00:00
|
|
|
n_steps += 1
|
|
|
|
if n_steps >= max_steps:
|
2017-05-25 16:18:59 +00:00
|
|
|
break
|
2017-05-25 11:49:00 +00:00
|
|
|
self._make_updates(d_tokvecs,
|
2017-09-21 12:59:48 +00:00
|
|
|
bp_tokvecs, backprops, sgd, cuda_stream)
|
2018-02-17 17:41:18 +00:00
|
|
|
|
2017-09-21 12:59:48 +00:00
|
|
|
def update_beam(self, docs, golds, width=None, density=None,
|
2017-08-18 20:38:59 +00:00
|
|
|
drop=0., sgd=None, losses=None):
|
2017-08-20 20:59:28 +00:00
|
|
|
if not any(self.moves.has_gold(gold) for gold in golds):
|
|
|
|
return None
|
2017-08-20 12:41:38 +00:00
|
|
|
if not golds:
|
|
|
|
return None
|
2017-08-18 20:38:59 +00:00
|
|
|
if width is None:
|
|
|
|
width = self.cfg.get('beam_width', 2)
|
|
|
|
if density is None:
|
|
|
|
density = self.cfg.get('beam_density', 0.0)
|
2017-08-13 00:22:52 +00:00
|
|
|
if losses is not None and self.name not in losses:
|
|
|
|
losses[self.name] = 0.
|
2017-08-12 22:15:16 +00:00
|
|
|
lengths = [len(d) for d in docs]
|
2017-08-13 10:37:26 +00:00
|
|
|
assert min(lengths) >= 1
|
2017-08-13 23:02:05 +00:00
|
|
|
states = self.moves.init_batch(docs)
|
|
|
|
for gold in golds:
|
|
|
|
self.moves.preprocess_gold(gold)
|
2017-10-27 17:45:57 +00:00
|
|
|
cuda_stream = util.get_cuda_stream()
|
|
|
|
(tokvecs, bp_tokvecs), state2vec, vec2scores = self.get_batch_model(
|
|
|
|
docs, cuda_stream, drop)
|
2017-11-13 17:18:26 +00:00
|
|
|
states_d_scores, backprops, beams = _beam_utils.update_beam(
|
2017-10-27 17:45:57 +00:00
|
|
|
self.moves, self.nr_feature, 500, states, golds, state2vec,
|
|
|
|
vec2scores, width, density, self.cfg.get('hist_size', 0),
|
|
|
|
drop=drop, losses=losses)
|
2017-08-12 19:47:45 +00:00
|
|
|
backprop_lower = []
|
2017-08-19 14:02:57 +00:00
|
|
|
cdef float batch_size = len(docs)
|
2017-08-12 19:47:45 +00:00
|
|
|
for i, d_scores in enumerate(states_d_scores):
|
2017-08-19 14:02:57 +00:00
|
|
|
d_scores /= batch_size
|
2017-08-13 10:37:26 +00:00
|
|
|
if losses is not None:
|
|
|
|
losses[self.name] += (d_scores**2).sum()
|
2017-08-12 19:47:45 +00:00
|
|
|
ids, bp_vectors, bp_scores = backprops[i]
|
|
|
|
d_vector = bp_scores(d_scores, sgd=sgd)
|
2017-08-12 22:15:16 +00:00
|
|
|
if isinstance(self.model[0].ops, CupyOps) \
|
|
|
|
and not isinstance(ids, state2vec.ops.xp.ndarray):
|
|
|
|
backprop_lower.append((
|
2017-10-27 17:45:57 +00:00
|
|
|
util.get_async(cuda_stream, ids),
|
|
|
|
util.get_async(cuda_stream, d_vector),
|
2017-08-12 22:15:16 +00:00
|
|
|
bp_vectors))
|
|
|
|
else:
|
|
|
|
backprop_lower.append((ids, d_vector, bp_vectors))
|
2017-11-03 23:23:23 +00:00
|
|
|
# Add a padding vector to the d_tokvecs gradient, so that missing
|
|
|
|
# values don't affect the real gradient.
|
|
|
|
d_tokvecs = state2vec.ops.allocate((tokvecs.shape[0]+1, tokvecs.shape[1]))
|
2017-10-27 17:45:57 +00:00
|
|
|
self._make_updates(d_tokvecs, bp_tokvecs, backprop_lower, sgd,
|
|
|
|
cuda_stream)
|
2017-11-13 17:18:26 +00:00
|
|
|
cdef Beam beam
|
|
|
|
for beam in beams:
|
|
|
|
_cleanup(beam)
|
|
|
|
|
2017-09-14 14:18:02 +00:00
|
|
|
|
2018-02-21 15:00:38 +00:00
|
|
|
def _init_gold_batch(self, whole_docs, whole_golds, min_length=5, max_length=2000):
|
2017-05-25 16:18:59 +00:00
|
|
|
"""Make a square batch, of length equal to the shortest doc. A long
|
|
|
|
doc will get multiple states. Let's say we have a doc of length 2*N,
|
|
|
|
where N is the shortest doc. We'll make two states, one representing
|
|
|
|
long_doc[:N], and another representing long_doc[N:]."""
|
2017-05-26 16:31:23 +00:00
|
|
|
cdef:
|
|
|
|
StateClass state
|
|
|
|
Transition action
|
|
|
|
whole_states = self.moves.init_batch(whole_docs)
|
2018-02-21 15:00:38 +00:00
|
|
|
max_length = max(min_length, min(max_length, min([len(doc) for doc in whole_docs])))
|
2017-05-27 22:59:00 +00:00
|
|
|
max_moves = 0
|
2017-05-25 16:18:59 +00:00
|
|
|
states = []
|
2017-05-26 16:31:23 +00:00
|
|
|
golds = []
|
|
|
|
for doc, state, gold in zip(whole_docs, whole_states, whole_golds):
|
2017-05-25 16:18:59 +00:00
|
|
|
gold = self.moves.preprocess_gold(gold)
|
2017-05-26 16:31:23 +00:00
|
|
|
if gold is None:
|
|
|
|
continue
|
|
|
|
oracle_actions = self.moves.get_oracle_sequence(doc, gold)
|
|
|
|
start = 0
|
2017-05-25 16:18:59 +00:00
|
|
|
while start < len(doc):
|
2017-05-26 16:31:23 +00:00
|
|
|
state = state.copy()
|
2017-05-27 22:59:00 +00:00
|
|
|
n_moves = 0
|
2017-05-25 16:18:59 +00:00
|
|
|
while state.B(0) < start and not state.is_final():
|
2017-05-26 16:31:23 +00:00
|
|
|
action = self.moves.c[oracle_actions.pop(0)]
|
|
|
|
action.do(state.c, action.label)
|
2017-10-06 11:08:50 +00:00
|
|
|
state.c.push_hist(action.clas)
|
2017-05-27 22:59:00 +00:00
|
|
|
n_moves += 1
|
2017-05-26 16:31:23 +00:00
|
|
|
has_gold = self.moves.has_gold(gold, start=start,
|
|
|
|
end=start+max_length)
|
|
|
|
if not state.is_final() and has_gold:
|
2017-05-25 16:18:59 +00:00
|
|
|
states.append(state)
|
2017-05-26 16:31:23 +00:00
|
|
|
golds.append(gold)
|
2017-05-27 22:59:00 +00:00
|
|
|
max_moves = max(max_moves, n_moves)
|
2017-05-26 16:31:23 +00:00
|
|
|
start += min(max_length, len(doc)-start)
|
2017-05-27 22:59:00 +00:00
|
|
|
max_moves = max(max_moves, len(oracle_actions))
|
|
|
|
return states, golds, max_moves
|
2017-05-25 16:18:59 +00:00
|
|
|
|
2017-09-21 12:59:48 +00:00
|
|
|
def _make_updates(self, d_tokvecs, bp_tokvecs, backprops, sgd, cuda_stream=None):
|
2017-05-23 09:23:05 +00:00
|
|
|
# Tells CUDA to block, so our async copies complete.
|
|
|
|
if cuda_stream is not None:
|
|
|
|
cuda_stream.synchronize()
|
2017-05-23 09:23:29 +00:00
|
|
|
xp = get_array_module(d_tokvecs)
|
|
|
|
for ids, d_vector, bp_vector in backprops:
|
2017-10-31 01:33:16 +00:00
|
|
|
d_state_features = bp_vector((d_vector, ids), sgd=sgd)
|
2017-10-27 01:16:55 +00:00
|
|
|
ids = ids.flatten()
|
|
|
|
d_state_features = d_state_features.reshape(
|
|
|
|
(ids.size, d_state_features.shape[2]))
|
|
|
|
self.model[0].ops.scatter_add(d_tokvecs, ids,
|
2017-08-18 20:38:59 +00:00
|
|
|
d_state_features)
|
2017-11-03 23:23:23 +00:00
|
|
|
# Padded -- see update()
|
|
|
|
bp_tokvecs(d_tokvecs[:-1], sgd=sgd)
|
2017-05-15 19:46:08 +00:00
|
|
|
|
2017-05-27 20:51:55 +00:00
|
|
|
@property
|
|
|
|
def move_names(self):
|
|
|
|
names = []
|
|
|
|
for i in range(self.moves.n_moves):
|
|
|
|
name = self.moves.move_name(self.moves.c[i].move, self.moves.c[i].label)
|
|
|
|
names.append(name)
|
|
|
|
return names
|
|
|
|
|
2017-09-21 12:59:48 +00:00
|
|
|
def get_batch_model(self, docs, stream, dropout):
|
|
|
|
tok2vec, lower, upper = self.model
|
|
|
|
tokvecs, bp_tokvecs = tok2vec.begin_update(docs, drop=dropout)
|
|
|
|
state2vec = precompute_hiddens(len(docs), tokvecs,
|
2017-09-28 23:47:13 +00:00
|
|
|
lower, stream, drop=0.0)
|
2017-09-21 12:59:48 +00:00
|
|
|
return (tokvecs, bp_tokvecs), state2vec, upper
|
2017-05-15 19:46:08 +00:00
|
|
|
|
2017-10-28 23:01:14 +00:00
|
|
|
nr_feature = 13
|
2017-05-17 10:04:50 +00:00
|
|
|
|
2017-05-20 16:26:23 +00:00
|
|
|
def get_token_ids(self, states):
|
2017-05-15 19:46:08 +00:00
|
|
|
cdef StateClass state
|
2017-05-20 16:26:23 +00:00
|
|
|
cdef int n_tokens = self.nr_feature
|
2017-05-22 22:58:12 +00:00
|
|
|
cdef np.ndarray ids = numpy.zeros((len(states), n_tokens),
|
|
|
|
dtype='i', order='C')
|
|
|
|
c_ids = <int*>ids.data
|
2017-05-15 19:46:08 +00:00
|
|
|
for i, state in enumerate(states):
|
2017-07-20 13:02:55 +00:00
|
|
|
if not state.is_final():
|
|
|
|
state.c.set_context_tokens(c_ids, n_tokens)
|
2017-05-22 22:58:12 +00:00
|
|
|
c_ids += ids.shape[1]
|
2017-05-20 16:26:23 +00:00
|
|
|
return ids
|
2017-05-15 19:46:08 +00:00
|
|
|
|
|
|
|
def transition_batch(self, states, float[:, ::1] scores):
|
|
|
|
cdef StateClass state
|
|
|
|
cdef int[500] is_valid # TODO: Unhack
|
|
|
|
cdef float* c_scores = &scores[0, 0]
|
|
|
|
for state in states:
|
|
|
|
self.moves.set_valid(is_valid, state.c)
|
|
|
|
guess = arg_max_if_valid(c_scores, is_valid, scores.shape[1])
|
|
|
|
action = self.moves.c[guess]
|
|
|
|
action.do(state.c, action.label)
|
|
|
|
c_scores += scores.shape[1]
|
2017-10-03 11:27:10 +00:00
|
|
|
state.c.push_hist(guess)
|
2017-05-15 19:46:08 +00:00
|
|
|
|
|
|
|
def get_batch_loss(self, states, golds, float[:, ::1] scores):
|
|
|
|
cdef StateClass state
|
|
|
|
cdef GoldParse gold
|
|
|
|
cdef Pool mem = Pool()
|
|
|
|
cdef int i
|
|
|
|
is_valid = <int*>mem.alloc(self.moves.n_moves, sizeof(int))
|
|
|
|
costs = <float*>mem.alloc(self.moves.n_moves, sizeof(float))
|
|
|
|
cdef np.ndarray d_scores = numpy.zeros((len(states), self.moves.n_moves),
|
|
|
|
dtype='f', order='C')
|
|
|
|
c_d_scores = <float*>d_scores.data
|
|
|
|
for i, (state, gold) in enumerate(zip(states, golds)):
|
|
|
|
memset(is_valid, 0, self.moves.n_moves * sizeof(int))
|
|
|
|
memset(costs, 0, self.moves.n_moves * sizeof(float))
|
|
|
|
self.moves.set_costs(is_valid, costs, state, gold)
|
|
|
|
cpu_log_loss(c_d_scores,
|
|
|
|
costs, is_valid, &scores[i, 0], d_scores.shape[1])
|
|
|
|
c_d_scores += d_scores.shape[1]
|
|
|
|
return d_scores
|
|
|
|
|
2017-11-03 10:21:00 +00:00
|
|
|
def set_annotations(self, docs, states, tensors=None):
|
2017-05-15 19:46:08 +00:00
|
|
|
cdef StateClass state
|
|
|
|
cdef Doc doc
|
2017-11-03 10:21:00 +00:00
|
|
|
for i, (state, doc) in enumerate(zip(states, docs)):
|
2017-05-15 19:46:08 +00:00
|
|
|
self.moves.finalize_state(state.c)
|
2017-11-03 10:21:00 +00:00
|
|
|
for j in range(doc.length):
|
|
|
|
doc.c[j] = state.c._sent[j]
|
|
|
|
if tensors is not None:
|
2017-11-05 14:34:40 +00:00
|
|
|
if isinstance(doc.tensor, numpy.ndarray) \
|
|
|
|
and not isinstance(tensors[i], numpy.ndarray):
|
|
|
|
doc.extend_tensor(tensors[i].get())
|
|
|
|
else:
|
|
|
|
doc.extend_tensor(tensors[i])
|
2017-05-15 19:46:08 +00:00
|
|
|
self.moves.finalize_doc(doc)
|
2017-11-03 13:04:51 +00:00
|
|
|
|
2017-10-07 00:00:47 +00:00
|
|
|
for hook in self.postprocesses:
|
|
|
|
for doc in docs:
|
|
|
|
hook(doc)
|
|
|
|
|
2018-02-21 15:00:00 +00:00
|
|
|
@property
|
|
|
|
def labels(self):
|
|
|
|
class_names = [self.moves.get_class_name(i) for i in range(self.moves.n_moves)]
|
|
|
|
return class_names
|
|
|
|
|
2017-11-03 19:20:59 +00:00
|
|
|
@property
|
|
|
|
def tok2vec(self):
|
|
|
|
'''Return the embedding and convolutional layer of the model.'''
|
|
|
|
if self.model in (None, True, False):
|
|
|
|
return None
|
|
|
|
else:
|
|
|
|
return self.model[0]
|
|
|
|
|
2017-10-07 00:00:47 +00:00
|
|
|
@property
|
|
|
|
def postprocesses(self):
|
|
|
|
# Available for subclasses, e.g. to deprojectivize
|
|
|
|
return []
|
2017-05-13 22:55:01 +00:00
|
|
|
|
|
|
|
def add_label(self, label):
|
2017-10-09 01:35:40 +00:00
|
|
|
resized = False
|
2017-05-13 22:55:01 +00:00
|
|
|
for action in self.moves.action_types:
|
|
|
|
added = self.moves.add_action(action, label)
|
|
|
|
if added:
|
|
|
|
# Important that the labels be stored as a list! We need the
|
|
|
|
# order, or the model goes out of synch
|
|
|
|
self.cfg.setdefault('extra_labels', []).append(label)
|
2017-10-09 01:35:40 +00:00
|
|
|
resized = True
|
|
|
|
if self.model not in (True, False, None) and resized:
|
|
|
|
# Weights are stored in (nr_out, nr_in) format, so we're basically
|
|
|
|
# just adding rows here.
|
2017-10-18 23:48:43 +00:00
|
|
|
smaller = self.model[-1]._layers[-1]
|
|
|
|
larger = Affine(self.moves.n_moves, smaller.nI)
|
|
|
|
copy_array(larger.W[:smaller.nO], smaller.W)
|
|
|
|
copy_array(larger.b[:smaller.nO], smaller.b)
|
|
|
|
self.model[-1]._layers[-1] = larger
|
2017-05-13 22:55:01 +00:00
|
|
|
|
2017-11-06 13:26:26 +00:00
|
|
|
def begin_training(self, gold_tuples, pipeline=None, sgd=None, **cfg):
|
2017-05-16 09:21:59 +00:00
|
|
|
if 'model' in cfg:
|
|
|
|
self.model = cfg['model']
|
2017-10-27 17:45:57 +00:00
|
|
|
gold_tuples = nonproj.preprocess_training_data(gold_tuples,
|
2018-02-21 15:00:38 +00:00
|
|
|
label_freq_cutoff=30)
|
2017-05-16 09:21:59 +00:00
|
|
|
actions = self.moves.get_actions(gold_parses=gold_tuples)
|
|
|
|
for action, labels in actions.items():
|
|
|
|
for label in labels:
|
|
|
|
self.moves.add_action(action, label)
|
2018-02-02 01:32:40 +00:00
|
|
|
cfg.setdefault('token_vector_width', 128)
|
2017-05-16 09:21:59 +00:00
|
|
|
if self.model is True:
|
2017-09-16 17:47:21 +00:00
|
|
|
cfg['pretrained_dims'] = self.vocab.vectors_length
|
2017-05-29 08:14:20 +00:00
|
|
|
self.model, cfg = self.Model(self.moves.n_moves, **cfg)
|
2017-11-06 13:26:26 +00:00
|
|
|
if sgd is None:
|
|
|
|
sgd = self.create_optimizer()
|
2018-01-18 12:51:57 +00:00
|
|
|
self.model[1].begin_training(
|
|
|
|
self.model[1].ops.allocate((5, cfg['token_vector_width'])))
|
2018-02-15 22:50:21 +00:00
|
|
|
if pipeline is not None:
|
|
|
|
self.init_multitask_objectives(gold_tuples, pipeline, sgd=sgd, **cfg)
|
2017-09-22 14:38:22 +00:00
|
|
|
link_vectors_to_models(self.vocab)
|
2018-02-02 01:32:40 +00:00
|
|
|
else:
|
|
|
|
if sgd is None:
|
|
|
|
sgd = self.create_optimizer()
|
|
|
|
self.model[1].begin_training(
|
|
|
|
self.model[1].ops.allocate((5, cfg['token_vector_width'])))
|
|
|
|
self.cfg.update(cfg)
|
2017-11-06 13:26:26 +00:00
|
|
|
return sgd
|
2017-05-16 09:21:59 +00:00
|
|
|
|
2018-01-21 18:37:02 +00:00
|
|
|
def add_multitask_objective(self, target):
|
|
|
|
# Defined in subclasses, to avoid circular import
|
|
|
|
raise NotImplementedError
|
|
|
|
|
2017-09-26 10:42:52 +00:00
|
|
|
def init_multitask_objectives(self, gold_tuples, pipeline, **cfg):
|
|
|
|
'''Setup models for secondary objectives, to benefit from multi-task
|
|
|
|
learning. This method is intended to be overridden by subclasses.
|
|
|
|
|
|
|
|
For instance, the dependency parser can benefit from sharing
|
|
|
|
an input representation with a label prediction model. These auxiliary
|
|
|
|
models are discarded after training.
|
|
|
|
'''
|
|
|
|
pass
|
|
|
|
|
2017-05-21 22:53:38 +00:00
|
|
|
def preprocess_gold(self, docs_golds):
|
|
|
|
for doc, gold in docs_golds:
|
|
|
|
yield doc, gold
|
|
|
|
|
2017-05-18 13:30:59 +00:00
|
|
|
def use_params(self, params):
|
|
|
|
# Can't decorate cdef class :(. Workaround.
|
|
|
|
with self.model[0].use_params(params):
|
|
|
|
with self.model[1].use_params(params):
|
|
|
|
yield
|
|
|
|
|
2017-05-29 09:45:45 +00:00
|
|
|
def to_disk(self, path, **exclude):
|
|
|
|
serializers = {
|
2017-08-18 20:23:03 +00:00
|
|
|
'tok2vec_model': lambda p: p.open('wb').write(
|
2017-08-14 11:00:23 +00:00
|
|
|
self.model[0].to_bytes()),
|
2017-08-18 20:23:03 +00:00
|
|
|
'lower_model': lambda p: p.open('wb').write(
|
2017-08-14 11:00:23 +00:00
|
|
|
self.model[1].to_bytes()),
|
2017-08-18 20:23:03 +00:00
|
|
|
'upper_model': lambda p: p.open('wb').write(
|
|
|
|
self.model[2].to_bytes()),
|
2017-05-29 09:45:45 +00:00
|
|
|
'vocab': lambda p: self.vocab.to_disk(p),
|
|
|
|
'moves': lambda p: self.moves.to_disk(p, strings=False),
|
2017-05-31 11:42:39 +00:00
|
|
|
'cfg': lambda p: p.open('w').write(json_dumps(self.cfg))
|
2017-05-29 09:45:45 +00:00
|
|
|
}
|
|
|
|
util.to_disk(path, serializers, exclude)
|
|
|
|
|
|
|
|
def from_disk(self, path, **exclude):
|
|
|
|
deserializers = {
|
|
|
|
'vocab': lambda p: self.vocab.from_disk(p),
|
|
|
|
'moves': lambda p: self.moves.from_disk(p, strings=False),
|
2018-02-13 19:44:33 +00:00
|
|
|
'cfg': lambda p: self.cfg.update(util.read_json(p)),
|
2017-05-29 09:45:45 +00:00
|
|
|
'model': lambda p: None
|
|
|
|
}
|
|
|
|
util.from_disk(path, deserializers, exclude)
|
|
|
|
if 'model' not in exclude:
|
|
|
|
path = util.ensure_path(path)
|
|
|
|
if self.model is True:
|
2018-01-23 18:10:49 +00:00
|
|
|
self.cfg.setdefault('pretrained_dims', self.vocab.vectors_length)
|
2017-05-29 11:38:20 +00:00
|
|
|
self.model, cfg = self.Model(**self.cfg)
|
2017-05-31 11:42:39 +00:00
|
|
|
else:
|
|
|
|
cfg = {}
|
2017-08-18 20:23:03 +00:00
|
|
|
with (path / 'tok2vec_model').open('rb') as file_:
|
2017-05-31 11:42:39 +00:00
|
|
|
bytes_data = file_.read()
|
2017-08-14 11:00:23 +00:00
|
|
|
self.model[0].from_bytes(bytes_data)
|
2017-08-18 20:23:03 +00:00
|
|
|
with (path / 'lower_model').open('rb') as file_:
|
2017-08-06 16:33:46 +00:00
|
|
|
bytes_data = file_.read()
|
2017-08-14 11:00:23 +00:00
|
|
|
self.model[1].from_bytes(bytes_data)
|
2017-08-18 20:23:03 +00:00
|
|
|
with (path / 'upper_model').open('rb') as file_:
|
|
|
|
bytes_data = file_.read()
|
|
|
|
self.model[2].from_bytes(bytes_data)
|
2017-05-29 11:38:20 +00:00
|
|
|
self.cfg.update(cfg)
|
2017-05-29 09:45:45 +00:00
|
|
|
return self
|
2017-05-17 10:04:50 +00:00
|
|
|
|
2017-05-29 08:14:20 +00:00
|
|
|
def to_bytes(self, **exclude):
|
2017-06-02 19:07:56 +00:00
|
|
|
serializers = OrderedDict((
|
2017-08-18 20:23:03 +00:00
|
|
|
('tok2vec_model', lambda: self.model[0].to_bytes()),
|
|
|
|
('lower_model', lambda: self.model[1].to_bytes()),
|
|
|
|
('upper_model', lambda: self.model[2].to_bytes()),
|
2017-06-02 19:07:56 +00:00
|
|
|
('vocab', lambda: self.vocab.to_bytes()),
|
|
|
|
('moves', lambda: self.moves.to_bytes(strings=False)),
|
2017-09-26 11:44:56 +00:00
|
|
|
('cfg', lambda: json.dumps(self.cfg, indent=2, sort_keys=True))
|
2017-06-02 19:07:56 +00:00
|
|
|
))
|
2017-06-02 16:37:07 +00:00
|
|
|
if 'model' in exclude:
|
2017-08-18 20:23:03 +00:00
|
|
|
exclude['tok2vec_model'] = True
|
2017-06-02 16:37:07 +00:00
|
|
|
exclude['lower_model'] = True
|
|
|
|
exclude['upper_model'] = True
|
|
|
|
exclude.pop('model')
|
2017-05-29 09:45:45 +00:00
|
|
|
return util.to_bytes(serializers, exclude)
|
2017-05-29 08:14:20 +00:00
|
|
|
|
|
|
|
def from_bytes(self, bytes_data, **exclude):
|
2017-06-02 19:07:56 +00:00
|
|
|
deserializers = OrderedDict((
|
|
|
|
('vocab', lambda b: self.vocab.from_bytes(b)),
|
|
|
|
('moves', lambda b: self.moves.from_bytes(b, strings=False)),
|
2017-09-26 11:44:56 +00:00
|
|
|
('cfg', lambda b: self.cfg.update(json.loads(b))),
|
2017-08-18 20:23:03 +00:00
|
|
|
('tok2vec_model', lambda b: None),
|
2017-06-02 19:07:56 +00:00
|
|
|
('lower_model', lambda b: None),
|
|
|
|
('upper_model', lambda b: None)
|
|
|
|
))
|
2017-05-29 09:45:45 +00:00
|
|
|
msg = util.from_bytes(bytes_data, deserializers, exclude)
|
2017-05-29 08:14:20 +00:00
|
|
|
if 'model' not in exclude:
|
|
|
|
if self.model is True:
|
2017-09-22 14:38:22 +00:00
|
|
|
self.model, cfg = self.Model(**self.cfg)
|
2017-09-17 10:47:34 +00:00
|
|
|
cfg['pretrained_dims'] = self.vocab.vectors_length
|
2017-05-29 13:40:45 +00:00
|
|
|
else:
|
|
|
|
cfg = {}
|
2017-09-16 17:47:21 +00:00
|
|
|
cfg['pretrained_dims'] = self.vocab.vectors_length
|
2017-08-18 20:23:03 +00:00
|
|
|
if 'tok2vec_model' in msg:
|
|
|
|
self.model[0].from_bytes(msg['tok2vec_model'])
|
2017-06-02 19:07:56 +00:00
|
|
|
if 'lower_model' in msg:
|
2017-08-18 20:23:03 +00:00
|
|
|
self.model[1].from_bytes(msg['lower_model'])
|
2017-06-02 19:07:56 +00:00
|
|
|
if 'upper_model' in msg:
|
2017-08-18 20:23:03 +00:00
|
|
|
self.model[2].from_bytes(msg['upper_model'])
|
2017-05-29 11:38:20 +00:00
|
|
|
self.cfg.update(cfg)
|
2017-05-29 08:14:20 +00:00
|
|
|
return self
|
2017-05-17 10:04:50 +00:00
|
|
|
|
2017-05-13 22:55:01 +00:00
|
|
|
|
|
|
|
class ParserStateError(ValueError):
|
|
|
|
def __init__(self, doc):
|
|
|
|
ValueError.__init__(self,
|
|
|
|
"Error analysing doc -- no valid actions available. This should "
|
|
|
|
"never happen, so please report the error on the issue tracker. "
|
|
|
|
"Here's the thread to do so --- reopen it if it's closed:\n"
|
|
|
|
"https://github.com/spacy-io/spaCy/issues/429\n"
|
|
|
|
"Please include the text that the parser failed on, which is:\n"
|
|
|
|
"%s" % repr(doc.text))
|
|
|
|
|
|
|
|
|
|
|
|
cdef int arg_max_if_gold(const weight_t* scores, const weight_t* costs, const int* is_valid, int n) nogil:
|
|
|
|
# Find minimum cost
|
|
|
|
cdef float cost = 1
|
|
|
|
for i in range(n):
|
|
|
|
if is_valid[i] and costs[i] < cost:
|
|
|
|
cost = costs[i]
|
|
|
|
# Now find best-scoring with that cost
|
|
|
|
cdef int best = -1
|
|
|
|
for i in range(n):
|
|
|
|
if costs[i] <= cost and is_valid[i]:
|
|
|
|
if best == -1 or scores[i] > scores[best]:
|
|
|
|
best = i
|
|
|
|
return best
|
|
|
|
|
|
|
|
|
|
|
|
cdef int arg_max_if_valid(const weight_t* scores, const int* is_valid, int n) nogil:
|
|
|
|
cdef int best = -1
|
|
|
|
for i in range(n):
|
|
|
|
if is_valid[i] >= 1:
|
|
|
|
if best == -1 or scores[i] > scores[best]:
|
|
|
|
best = i
|
|
|
|
return best
|
|
|
|
|
|
|
|
|
2017-07-20 13:02:55 +00:00
|
|
|
# These are passed as callbacks to thinc.search.Beam
|
|
|
|
cdef int _transition_state(void* _dest, void* _src, class_t clas, void* _moves) except -1:
|
2017-11-14 01:11:40 +00:00
|
|
|
dest = <StateC*>_dest
|
|
|
|
src = <StateC*>_src
|
2017-07-20 13:02:55 +00:00
|
|
|
moves = <const Transition*>_moves
|
|
|
|
dest.clone(src)
|
2017-11-14 01:11:40 +00:00
|
|
|
moves[clas].do(dest, moves[clas].label)
|
|
|
|
dest.push_hist(clas)
|
2017-07-20 13:02:55 +00:00
|
|
|
|
|
|
|
|
|
|
|
cdef int _check_final_state(void* _state, void* extra_args) except -1:
|
2017-11-14 01:11:40 +00:00
|
|
|
state = <StateC*>_state
|
|
|
|
return state.is_final()
|
2017-07-20 13:02:55 +00:00
|
|
|
|
|
|
|
|
|
|
|
def _cleanup(Beam beam):
|
2017-11-14 01:11:40 +00:00
|
|
|
cdef StateC* state
|
|
|
|
# Once parsing has finished, states in beam may not be unique. Is this
|
|
|
|
# correct?
|
|
|
|
seen = set()
|
2017-07-20 13:02:55 +00:00
|
|
|
for i in range(beam.width):
|
2017-11-14 01:11:40 +00:00
|
|
|
addr = <size_t>beam._parents[i].content
|
|
|
|
if addr not in seen:
|
|
|
|
state = <StateC*>addr
|
|
|
|
del state
|
|
|
|
seen.add(addr)
|
2017-11-14 22:36:46 +00:00
|
|
|
else:
|
|
|
|
print(i, addr)
|
|
|
|
print(seen)
|
|
|
|
raise Exception
|
2017-11-14 01:11:40 +00:00
|
|
|
addr = <size_t>beam._states[i].content
|
|
|
|
if addr not in seen:
|
|
|
|
state = <StateC*>addr
|
|
|
|
del state
|
|
|
|
seen.add(addr)
|
2017-11-14 22:36:46 +00:00
|
|
|
else:
|
|
|
|
print(i, addr)
|
|
|
|
print(seen)
|
|
|
|
raise Exception
|