2015-07-13 17:58:26 +00:00
|
|
|
from cymem.cymem cimport Pool
|
|
|
|
cimport numpy as np
|
|
|
|
from preshed.counter cimport PreshCounter
|
|
|
|
|
|
|
|
from ..vocab cimport Vocab
|
|
|
|
from ..structs cimport TokenC, LexemeC
|
2015-08-05 22:35:40 +00:00
|
|
|
from ..typedefs cimport attr_t
|
|
|
|
from ..attrs cimport attr_id_t
|
|
|
|
|
|
|
|
|
|
|
|
cdef attr_t get_token_attr(const TokenC* token, attr_id_t feat_name) nogil
|
2015-07-13 17:58:26 +00:00
|
|
|
|
|
|
|
|
|
|
|
ctypedef const LexemeC* const_Lexeme_ptr
|
2015-08-28 00:02:33 +00:00
|
|
|
ctypedef const TokenC* const_TokenC_ptr
|
2015-07-13 17:58:26 +00:00
|
|
|
|
|
|
|
ctypedef fused LexemeOrToken:
|
|
|
|
const_Lexeme_ptr
|
2015-08-28 00:02:33 +00:00
|
|
|
const_TokenC_ptr
|
2015-07-13 17:58:26 +00:00
|
|
|
|
|
|
|
|
2018-03-27 17:23:02 +00:00
|
|
|
cdef int set_children_from_heads(TokenC* tokens, int length) except -1
|
|
|
|
|
|
|
|
|
2015-11-06 21:55:34 +00:00
|
|
|
cdef int token_by_start(const TokenC* tokens, int length, int start_char) except -2
|
|
|
|
|
|
|
|
|
|
|
|
cdef int token_by_end(const TokenC* tokens, int length, int end_char) except -2
|
|
|
|
|
|
|
|
|
Add doc.retokenize() context manager (#2172)
This patch takes a step towards #1487 by introducing the
doc.retokenize() context manager, to handle merging spans, and soon
splitting tokens.
The idea is to do merging and splitting like this:
with doc.retokenize() as retokenizer:
for start, end, label in matches:
retokenizer.merge(doc[start : end], attrs={'ent_type': label})
The retokenizer accumulates the merge requests, and applies them
together at the end of the block. This will allow retokenization to be
more efficient, and much less error prone.
A retokenizer.split() function will then be added, to handle splitting a
single token into multiple tokens. These methods take `Span` and `Token`
objects; if the user wants to go directly from offsets, they can append
to the .merges and .splits lists on the retokenizer.
The doc.merge() method's behaviour remains unchanged, so this patch
should be 100% backwards incompatible (modulo bugs). Internally,
doc.merge() fixes up the arguments (to handle the various deprecated styles),
opens the retokenizer, and makes the single merge.
We can later start making deprecation warnings on direct calls to doc.merge(),
to migrate people to use of the retokenize context manager.
2018-04-03 12:10:35 +00:00
|
|
|
cdef int set_children_from_heads(TokenC* tokens, int length) except -1
|
|
|
|
|
2015-07-13 17:58:26 +00:00
|
|
|
cdef class Doc:
|
2015-07-28 18:46:59 +00:00
|
|
|
cdef readonly Pool mem
|
|
|
|
cdef readonly Vocab vocab
|
2015-07-13 17:58:26 +00:00
|
|
|
|
2015-09-17 01:50:11 +00:00
|
|
|
cdef public object _vector
|
|
|
|
cdef public object _vector_norm
|
|
|
|
|
2017-05-07 16:04:24 +00:00
|
|
|
cdef public object tensor
|
2017-07-21 22:34:15 +00:00
|
|
|
cdef public object cats
|
2016-10-17 09:43:22 +00:00
|
|
|
cdef public object user_data
|
2016-10-14 01:24:13 +00:00
|
|
|
|
2015-11-03 13:15:14 +00:00
|
|
|
cdef TokenC* c
|
2015-07-13 17:58:26 +00:00
|
|
|
|
|
|
|
cdef public bint is_tagged
|
|
|
|
cdef public bint is_parsed
|
|
|
|
|
2016-10-19 18:54:03 +00:00
|
|
|
cdef public float sentiment
|
|
|
|
|
|
|
|
cdef public dict user_hooks
|
|
|
|
cdef public dict user_token_hooks
|
|
|
|
cdef public dict user_span_hooks
|
2016-10-17 00:42:05 +00:00
|
|
|
|
2015-07-13 20:28:10 +00:00
|
|
|
cdef public list _py_tokens
|
|
|
|
|
2015-07-13 17:58:26 +00:00
|
|
|
cdef int length
|
|
|
|
cdef int max_length
|
|
|
|
|
2016-05-02 13:26:24 +00:00
|
|
|
cdef public object noun_chunks_iterator
|
2016-03-16 14:53:35 +00:00
|
|
|
|
2017-10-16 17:22:11 +00:00
|
|
|
cdef object __weakref__
|
|
|
|
|
2017-09-26 12:28:50 +00:00
|
|
|
cdef int push_back(self, LexemeOrToken lex_or_tok, bint has_space) except -1
|
2015-07-13 17:58:26 +00:00
|
|
|
|
|
|
|
cpdef np.ndarray to_array(self, object features)
|
|
|
|
|
2016-01-30 19:27:52 +00:00
|
|
|
cdef void set_parse(self, const TokenC* parsed) nogil
|