Fix Doc.copy bugs (#6809)

* Dont let the Doc own LexemeC, to fix Doc.copy

* Copy doc.spans

* Copy doc.spans
This commit is contained in:
Matthew Honnibal 2021-01-26 00:40:18 +11:00 committed by GitHub
parent 0f2de39efb
commit 42b117e561
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 14 additions and 2 deletions

View File

@ -33,6 +33,9 @@ class SpanGroups(UserDict):
def _make_span_group(self, name: str, spans: Iterable["Span"]) -> SpanGroup:
return SpanGroup(self.doc_ref(), name=name, spans=spans)
def copy(self) -> "SpanGroups":
return SpanGroups(self.doc_ref()).from_bytes(self.to_bytes())
def to_bytes(self) -> bytes:
# We don't need to serialize this as a dict, because the groups
# know their names.

View File

@ -1187,6 +1187,7 @@ cdef class Doc:
other.user_span_hooks = dict(self.user_span_hooks)
other.length = self.length
other.max_length = self.max_length
other.spans = self.spans.copy()
buff_size = other.max_length + (PADDING*2)
assert buff_size > 0
tokens = <TokenC*>other.mem.alloc(buff_size, sizeof(TokenC))

View File

@ -161,8 +161,16 @@ cdef class Vocab:
return self._new_lexeme(mem, self.strings[orth])
cdef const LexemeC* _new_lexeme(self, Pool mem, unicode string) except NULL:
if len(string) < 3 or self.length < 10000:
# I think this heuristic is bad, and the Vocab should always
# own the lexemes. It avoids weird bugs this way, as it's how the thing
# was originally supposed to work. The best solution to the growing
# memory use is to periodically reset the vocab, which is an action
# that should be up to the user to do (so we don't need to keep track
# of the doc ownership).
# TODO: Change the C API so that the mem isn't passed in here.
mem = self.mem
#if len(string) < 3 or self.length < 10000:
# mem = self.mem
cdef bint is_oov = mem is not self.mem
lex = <LexemeC*>mem.alloc(1, sizeof(LexemeC))
lex.orth = self.strings.add(string)