2017-05-06 12:22:20 +00:00
|
|
|
from thinc.api import layerize, chain, clone, concatenate, with_flatten
|
2017-05-06 14:47:15 +00:00
|
|
|
from thinc.neural import Model, Maxout, Softmax, Affine
|
2017-05-04 11:31:40 +00:00
|
|
|
from thinc.neural._classes.hash_embed import HashEmbed
|
2017-05-05 18:12:03 +00:00
|
|
|
|
|
|
|
from thinc.neural._classes.convolution import ExtractWindow
|
|
|
|
from thinc.neural._classes.static_vectors import StaticVectors
|
|
|
|
|
2017-05-06 15:37:36 +00:00
|
|
|
from .attrs import ID, LOWER, PREFIX, SUFFIX, SHAPE, TAG, DEP
|
2017-05-04 11:31:40 +00:00
|
|
|
|
|
|
|
|
|
|
|
def get_col(idx):
|
|
|
|
def forward(X, drop=0.):
|
2017-05-06 12:22:20 +00:00
|
|
|
output = Model.ops.xp.ascontiguousarray(X[:, idx])
|
|
|
|
return output, None
|
2017-05-04 11:31:40 +00:00
|
|
|
return layerize(forward)
|
|
|
|
|
|
|
|
|
2017-05-05 17:20:39 +00:00
|
|
|
def build_model(state2vec, width, depth, nr_class):
|
|
|
|
with Model.define_operators({'>>': chain, '**': clone}):
|
2017-05-06 12:22:20 +00:00
|
|
|
model = (
|
|
|
|
state2vec
|
|
|
|
>> Maxout(width, 1344)
|
|
|
|
>> Maxout(width, width)
|
2017-05-06 14:47:15 +00:00
|
|
|
>> Affine(nr_class, width)
|
2017-05-06 12:22:20 +00:00
|
|
|
)
|
2017-05-05 17:20:39 +00:00
|
|
|
return model
|
2017-05-04 11:31:40 +00:00
|
|
|
|
|
|
|
|
2017-05-06 14:47:15 +00:00
|
|
|
def build_debug_model(state2vec, width, depth, nr_class):
|
|
|
|
with Model.define_operators({'>>': chain, '**': clone}):
|
|
|
|
model = (
|
|
|
|
state2vec
|
|
|
|
>> Maxout(width)
|
|
|
|
>> Affine(nr_class)
|
|
|
|
)
|
|
|
|
return model
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def build_debug_state2vec(width, nr_vector=1000, nF=1, nB=0, nS=1, nL=2, nR=2):
|
|
|
|
ops = Model.ops
|
|
|
|
def forward(tokens_attrs_vectors, drop=0.):
|
|
|
|
tokens, attr_vals, tokvecs = tokens_attrs_vectors
|
|
|
|
|
|
|
|
orig_tokvecs_shape = tokvecs.shape
|
|
|
|
tokvecs = tokvecs.reshape((tokvecs.shape[0], tokvecs.shape[1] *
|
|
|
|
tokvecs.shape[2]))
|
|
|
|
|
|
|
|
vector = tokvecs
|
|
|
|
|
|
|
|
def backward(d_vector, sgd=None):
|
|
|
|
d_tokvecs = vector.reshape(orig_tokvecs_shape)
|
|
|
|
return (tokens, d_tokvecs)
|
|
|
|
return vector, backward
|
|
|
|
model = layerize(forward)
|
|
|
|
return model
|
|
|
|
|
|
|
|
|
2017-05-05 18:09:50 +00:00
|
|
|
def build_parser_state2vec(width, nr_vector=1000, nF=1, nB=0, nS=1, nL=2, nR=2):
|
2017-05-06 12:22:20 +00:00
|
|
|
embed_tags = _reshape(chain(get_col(0), HashEmbed(16, nr_vector)))
|
|
|
|
embed_deps = _reshape(chain(get_col(1), HashEmbed(16, nr_vector)))
|
2017-05-05 18:09:50 +00:00
|
|
|
ops = embed_tags.ops
|
2017-05-06 12:22:20 +00:00
|
|
|
def forward(tokens_attrs_vectors, drop=0.):
|
|
|
|
tokens, attr_vals, tokvecs = tokens_attrs_vectors
|
2017-05-05 18:09:50 +00:00
|
|
|
tagvecs, bp_tagvecs = embed_deps.begin_update(attr_vals, drop=drop)
|
|
|
|
depvecs, bp_depvecs = embed_tags.begin_update(attr_vals, drop=drop)
|
2017-05-06 12:22:20 +00:00
|
|
|
orig_tokvecs_shape = tokvecs.shape
|
2017-05-05 18:09:50 +00:00
|
|
|
tokvecs = tokvecs.reshape((tokvecs.shape[0], tokvecs.shape[1] *
|
|
|
|
tokvecs.shape[2]))
|
2017-05-04 11:31:40 +00:00
|
|
|
|
2017-05-05 17:20:39 +00:00
|
|
|
shapes = (tagvecs.shape, depvecs.shape, tokvecs.shape)
|
2017-05-06 12:22:20 +00:00
|
|
|
assert tagvecs.shape[0] == depvecs.shape[0] == tokvecs.shape[0], shapes
|
|
|
|
vector = ops.xp.hstack((tagvecs, depvecs, tokvecs))
|
|
|
|
|
2017-05-05 17:20:39 +00:00
|
|
|
def backward(d_vector, sgd=None):
|
2017-05-06 12:22:20 +00:00
|
|
|
d_tagvecs, d_depvecs, d_tokvecs = backprop_concatenate(d_vector, shapes)
|
|
|
|
assert d_tagvecs.shape == shapes[0], (d_tagvecs.shape, shapes)
|
|
|
|
assert d_depvecs.shape == shapes[1], (d_depvecs.shape, shapes)
|
|
|
|
assert d_tokvecs.shape == shapes[2], (d_tokvecs.shape, shapes)
|
2017-05-05 17:20:39 +00:00
|
|
|
bp_tagvecs(d_tagvecs)
|
|
|
|
bp_depvecs(d_depvecs)
|
2017-05-06 12:22:20 +00:00
|
|
|
d_tokvecs = d_tokvecs.reshape(orig_tokvecs_shape)
|
|
|
|
|
|
|
|
return (tokens, d_tokvecs)
|
2017-05-05 17:20:39 +00:00
|
|
|
return vector, backward
|
|
|
|
model = layerize(forward)
|
|
|
|
model._layers = [embed_tags, embed_deps]
|
2017-05-04 11:31:40 +00:00
|
|
|
return model
|
|
|
|
|
2017-05-05 17:20:39 +00:00
|
|
|
|
2017-05-06 12:22:20 +00:00
|
|
|
def backprop_concatenate(gradient, shapes):
|
|
|
|
grads = []
|
|
|
|
start = 0
|
|
|
|
for shape in shapes:
|
|
|
|
end = start + shape[1]
|
|
|
|
grads.append(gradient[:, start : end])
|
|
|
|
start = end
|
|
|
|
return grads
|
2017-05-05 18:09:50 +00:00
|
|
|
|
|
|
|
|
2017-05-05 17:20:39 +00:00
|
|
|
def _reshape(layer):
|
2017-05-06 12:22:20 +00:00
|
|
|
'''Transforms input with shape
|
|
|
|
(states, tokens, features)
|
|
|
|
into input with shape:
|
|
|
|
(states * tokens, features)
|
|
|
|
So that it can be used with a token-wise feature extraction layer, e.g.
|
|
|
|
an embedding layer. The embedding layer outputs:
|
|
|
|
(states * tokens, ndim)
|
|
|
|
But we want to concatenate the vectors for the tokens, so we produce:
|
|
|
|
(states, tokens * ndim)
|
|
|
|
We then need to reverse the transforms to do the backward pass. Recall
|
|
|
|
the simple rule here: each layer is a map:
|
|
|
|
inputs -> (outputs, (d_outputs->d_inputs))
|
|
|
|
So the shapes must match like this:
|
|
|
|
shape of forward input == shape of backward output
|
|
|
|
shape of backward input == shape of forward output
|
|
|
|
'''
|
|
|
|
def forward(X__bfm, drop=0.):
|
|
|
|
b, f, m = X__bfm.shape
|
|
|
|
B = b*f
|
|
|
|
M = f*m
|
|
|
|
X__Bm = X__bfm.reshape((B, m))
|
|
|
|
y__Bn, bp_yBn = layer.begin_update(X__Bm, drop=drop)
|
|
|
|
n = y__Bn.shape[1]
|
|
|
|
N = f * n
|
|
|
|
y__bN = y__Bn.reshape((b, N))
|
|
|
|
def backward(dy__bN, sgd=None):
|
|
|
|
dy__Bn = dy__bN.reshape((B, n))
|
|
|
|
dX__Bm = bp_yBn(dy__Bn, sgd)
|
|
|
|
if dX__Bm is None:
|
|
|
|
return None
|
|
|
|
else:
|
|
|
|
return dX__Bm.reshape((b, f, m))
|
|
|
|
return y__bN, backward
|
2017-05-05 17:20:39 +00:00
|
|
|
model = layerize(forward)
|
|
|
|
model._layers.append(layer)
|
2017-05-04 11:31:40 +00:00
|
|
|
return model
|
|
|
|
|
2017-05-06 12:22:20 +00:00
|
|
|
|
|
|
|
@layerize
|
|
|
|
def flatten(seqs, drop=0.):
|
|
|
|
ops = Model.ops
|
|
|
|
def finish_update(d_X, sgd=None):
|
|
|
|
return d_X
|
|
|
|
X = ops.xp.concatenate([ops.asarray(seq) for seq in seqs])
|
|
|
|
return X, finish_update
|
|
|
|
|
|
|
|
|
|
|
|
def build_tok2vec(lang, width, depth=2, embed_size=1000):
|
2017-05-06 15:37:36 +00:00
|
|
|
cols = [ID, LOWER, PREFIX, SUFFIX, SHAPE, TAG]
|
2017-05-05 18:12:03 +00:00
|
|
|
with Model.define_operators({'>>': chain, '|': concatenate, '**': clone}):
|
2017-05-06 12:22:20 +00:00
|
|
|
#static = get_col(cols.index(ID)) >> StaticVectors(lang, width)
|
2017-05-06 15:37:36 +00:00
|
|
|
lower = get_col(cols.index(LOWER)) >> HashEmbed(width, embed_size)
|
2017-05-05 18:12:03 +00:00
|
|
|
prefix = get_col(cols.index(PREFIX)) >> HashEmbed(width, embed_size)
|
|
|
|
suffix = get_col(cols.index(SUFFIX)) >> HashEmbed(width, embed_size)
|
|
|
|
shape = get_col(cols.index(SHAPE)) >> HashEmbed(width, embed_size)
|
2017-05-06 15:37:36 +00:00
|
|
|
tag = get_col(cols.index(TAG)) >> HashEmbed(width, embed_size)
|
2017-05-05 18:12:03 +00:00
|
|
|
tok2vec = (
|
2017-05-06 12:22:20 +00:00
|
|
|
doc2feats(cols)
|
|
|
|
>> with_flatten(
|
|
|
|
#(static | prefix | suffix | shape)
|
2017-05-06 15:37:36 +00:00
|
|
|
(lower | prefix | suffix | shape | tag)
|
|
|
|
>> Maxout(width, width*5)
|
2017-05-06 12:22:20 +00:00
|
|
|
>> (ExtractWindow(nW=1) >> Maxout(width, width*3))
|
|
|
|
>> (ExtractWindow(nW=1) >> Maxout(width, width*3))
|
|
|
|
)
|
2017-05-05 18:12:03 +00:00
|
|
|
)
|
|
|
|
return tok2vec
|
2017-05-06 12:22:20 +00:00
|
|
|
|
|
|
|
|
|
|
|
def doc2feats(cols):
|
|
|
|
def forward(docs, drop=0.):
|
|
|
|
feats = [doc.to_array(cols) for doc in docs]
|
|
|
|
feats = [model.ops.asarray(f, dtype='uint64') for f in feats]
|
|
|
|
return feats, None
|
|
|
|
model = layerize(forward)
|
|
|
|
return model
|