mirror of https://github.com/explosion/spaCy.git
Switch parser to gemm from thinc.openblas
This commit is contained in:
parent
9aeec9c242
commit
d55620041b
|
@ -1,7 +1,6 @@
|
||||||
# cython: infer_types=True
|
# cython: infer_types=True
|
||||||
# cython: cdivision=True
|
# cython: cdivision=True
|
||||||
# cython: boundscheck=False
|
# cython: boundscheck=False
|
||||||
# cython: profile=True
|
|
||||||
# coding: utf-8
|
# coding: utf-8
|
||||||
from __future__ import unicode_literals, print_function
|
from __future__ import unicode_literals, print_function
|
||||||
|
|
||||||
|
@ -29,6 +28,8 @@ from thinc.neural.ops import CupyOps
|
||||||
from thinc.neural.util import get_array_module
|
from thinc.neural.util import get_array_module
|
||||||
from thinc.linalg cimport Vec, VecVec
|
from thinc.linalg cimport Vec, VecVec
|
||||||
|
|
||||||
|
from thinc.openblas cimport simple_gemm, simple_axpy
|
||||||
|
|
||||||
from .._ml import zero_init, PrecomputableAffine, Tok2Vec, flatten
|
from .._ml import zero_init, PrecomputableAffine, Tok2Vec, flatten
|
||||||
from .._ml import link_vectors_to_models, create_default_optimizer
|
from .._ml import link_vectors_to_models, create_default_optimizer
|
||||||
from ..compat import json_dumps, copy_array
|
from ..compat import json_dumps, copy_array
|
||||||
|
@ -171,8 +172,9 @@ cdef void sum_state_features(float* output,
|
||||||
else:
|
else:
|
||||||
idx = token_ids[f] * F * O + f*O
|
idx = token_ids[f] * F * O + f*O
|
||||||
feature = &cached[idx]
|
feature = &cached[idx]
|
||||||
for i in range(O):
|
simple_axpy(output, O, feature, 1.)
|
||||||
output[i] += feature[i]
|
#for i in range(O):
|
||||||
|
# output[i] += feature[i]
|
||||||
output += O
|
output += O
|
||||||
token_ids += F
|
token_ids += F
|
||||||
|
|
||||||
|
@ -422,59 +424,69 @@ cdef class Parser:
|
||||||
cdef int nr_hidden = hidden_weights.shape[0]
|
cdef int nr_hidden = hidden_weights.shape[0]
|
||||||
cdef int nr_task = states.size()
|
cdef int nr_task = states.size()
|
||||||
with nogil:
|
with nogil:
|
||||||
for i in range(nr_task):
|
self._parseC(&states[0], nr_task, feat_weights, bias, hW, hb,
|
||||||
self._parseC(states[i],
|
nr_class, nr_hidden, nr_feat, nr_piece)
|
||||||
feat_weights, bias, hW, hb,
|
|
||||||
nr_class, nr_hidden, nr_feat, nr_piece)
|
|
||||||
PyErr_CheckSignals()
|
PyErr_CheckSignals()
|
||||||
tokvecs = self.model[0].ops.unflatten(tokvecs,
|
tokvecs = self.model[0].ops.unflatten(tokvecs,
|
||||||
[len(doc) for doc in docs])
|
[len(doc) for doc in docs])
|
||||||
return state_objs, tokvecs
|
return state_objs, tokvecs
|
||||||
|
|
||||||
cdef void _parseC(self, StateC* state,
|
cdef void _parseC(self, StateC** states, int nr_task,
|
||||||
const float* feat_weights, const float* bias,
|
const float* feat_weights, const float* bias,
|
||||||
const float* hW, const float* hb,
|
const float* hW, const float* hb,
|
||||||
int nr_class, int nr_hidden, int nr_feat, int nr_piece) nogil:
|
int nr_class, int nr_hidden, int nr_feat, int nr_piece) nogil:
|
||||||
token_ids = <int*>calloc(nr_feat, sizeof(int))
|
token_ids = <int*>calloc(nr_feat, sizeof(int))
|
||||||
is_valid = <int*>calloc(nr_class, sizeof(int))
|
is_valid = <int*>calloc(nr_class, sizeof(int))
|
||||||
vectors = <float*>calloc(nr_hidden * nr_piece, sizeof(float))
|
vectors = <float*>calloc(nr_hidden * nr_task, sizeof(float))
|
||||||
scores = <float*>calloc(nr_class, sizeof(float))
|
unmaxed = <float*>calloc(nr_hidden * nr_piece, sizeof(float))
|
||||||
|
scores = <float*>calloc(nr_class*nr_task, sizeof(float))
|
||||||
if not (token_ids and is_valid and vectors and scores):
|
if not (token_ids and is_valid and vectors and scores):
|
||||||
with gil:
|
with gil:
|
||||||
PyErr_SetFromErrno(MemoryError)
|
PyErr_SetFromErrno(MemoryError)
|
||||||
PyErr_CheckSignals()
|
PyErr_CheckSignals()
|
||||||
cdef float feature
|
cdef int nr_todo = nr_task
|
||||||
while not state.is_final():
|
cdef int i, j
|
||||||
state.set_context_tokens(token_ids, nr_feat)
|
cdef vector[StateC*] unfinished
|
||||||
memset(vectors, 0, nr_hidden * nr_piece * sizeof(float))
|
while nr_todo >= 1:
|
||||||
memset(scores, 0, nr_class * sizeof(float))
|
memset(vectors, 0, nr_todo * nr_hidden * sizeof(float))
|
||||||
sum_state_features(vectors,
|
memset(scores, 0, nr_todo * nr_class * sizeof(float))
|
||||||
feat_weights, token_ids, 1, nr_feat, nr_hidden * nr_piece)
|
for i in range(nr_todo):
|
||||||
for i in range(nr_hidden * nr_piece):
|
state = states[i]
|
||||||
vectors[i] += bias[i]
|
state.set_context_tokens(token_ids, nr_feat)
|
||||||
V = vectors
|
memset(unmaxed, 0, nr_hidden * nr_piece * sizeof(float))
|
||||||
W = hW
|
sum_state_features(unmaxed,
|
||||||
for i in range(nr_hidden):
|
feat_weights, token_ids, 1, nr_feat, nr_hidden * nr_piece)
|
||||||
if nr_piece == 1:
|
simple_axpy(unmaxed, nr_hidden*nr_piece, bias, 1.0)
|
||||||
feature = V[0] if V[0] >= 0. else 0.
|
state_vector = &vectors[i*nr_hidden]
|
||||||
elif nr_piece == 2:
|
for j in range(nr_hidden):
|
||||||
feature = V[0] if V[0] >= V[1] else V[1]
|
index = j * nr_piece
|
||||||
else:
|
which = Vec.arg_max(&unmaxed[index], nr_piece)
|
||||||
feature = Vec.max(V, nr_piece)
|
state_vector[j] = unmaxed[index + which]
|
||||||
for j in range(nr_class):
|
# Compute hidden-to-output
|
||||||
scores[j] += feature * W[j]
|
simple_gemm(scores, nr_todo, nr_class,
|
||||||
W += nr_class
|
vectors, nr_todo, nr_hidden,
|
||||||
V += nr_piece
|
hW, nr_hidden, nr_class, 0, 0)
|
||||||
for i in range(nr_class):
|
# Add bias
|
||||||
scores[i] += hb[i]
|
for i in range(nr_todo):
|
||||||
self.moves.set_valid(is_valid, state)
|
simple_axpy(&scores[i*nr_class], nr_class, hb, 1.0)
|
||||||
guess = arg_max_if_valid(scores, is_valid, nr_class)
|
# Validate actions, argmax, take action.
|
||||||
action = self.moves.c[guess]
|
for i in range(nr_todo):
|
||||||
action.do(state, action.label)
|
state = states[i]
|
||||||
state.push_hist(guess)
|
self.moves.set_valid(is_valid, state)
|
||||||
|
guess = arg_max_if_valid(&scores[i*nr_class], is_valid, nr_class)
|
||||||
|
action = self.moves.c[guess]
|
||||||
|
action.do(state, action.label)
|
||||||
|
state.push_hist(guess)
|
||||||
|
if not state.is_final():
|
||||||
|
unfinished.push_back(state)
|
||||||
|
for i in range(unfinished.size()):
|
||||||
|
states[i] = unfinished[i]
|
||||||
|
nr_todo = unfinished.size()
|
||||||
|
unfinished.clear()
|
||||||
free(token_ids)
|
free(token_ids)
|
||||||
free(is_valid)
|
free(is_valid)
|
||||||
free(vectors)
|
free(vectors)
|
||||||
|
free(unmaxed)
|
||||||
free(scores)
|
free(scores)
|
||||||
|
|
||||||
def beam_parse(self, docs, int beam_width=3, float beam_density=0.001,
|
def beam_parse(self, docs, int beam_width=3, float beam_density=0.001,
|
||||||
|
|
Loading…
Reference in New Issue