2015-03-26 02:16:40 +00:00
|
|
|
from __future__ import unicode_literals
|
|
|
|
from collections import defaultdict
|
2015-09-14 07:49:58 +00:00
|
|
|
import numpy
|
|
|
|
import numpy.linalg
|
|
|
|
cimport numpy as np
|
2015-09-17 01:50:11 +00:00
|
|
|
import math
|
2015-11-02 18:22:18 +00:00
|
|
|
import six
|
2015-03-26 02:16:40 +00:00
|
|
|
|
2015-08-26 17:20:46 +00:00
|
|
|
from ..structs cimport TokenC, LexemeC
|
2015-07-16 17:55:21 +00:00
|
|
|
from ..typedefs cimport flags_t, attr_t
|
|
|
|
from ..attrs cimport attr_id_t
|
2015-07-13 18:20:58 +00:00
|
|
|
from ..parts_of_speech cimport univ_pos_t
|
2015-10-07 08:25:35 +00:00
|
|
|
from ..util import normalize_slice
|
2015-07-13 18:20:58 +00:00
|
|
|
|
2015-03-26 02:16:40 +00:00
|
|
|
|
|
|
|
cdef class Span:
|
2015-07-08 16:53:00 +00:00
|
|
|
"""A slice from a Doc object."""
|
2015-09-21 06:50:40 +00:00
|
|
|
def __cinit__(self, Doc tokens, int start, int end, int label=0, vector=None,
|
|
|
|
vector_norm=None):
|
2015-10-07 08:25:35 +00:00
|
|
|
if not (0 <= start <= end <= len(tokens)):
|
|
|
|
raise IndexError
|
2015-10-06 08:59:11 +00:00
|
|
|
|
2015-09-29 13:03:55 +00:00
|
|
|
self.doc = tokens
|
2015-03-26 02:16:40 +00:00
|
|
|
self.start = start
|
|
|
|
self.end = end
|
|
|
|
self.label = label
|
2015-09-21 06:50:40 +00:00
|
|
|
self._vector = vector
|
|
|
|
self._vector_norm = vector_norm
|
2015-03-26 02:16:40 +00:00
|
|
|
|
|
|
|
def __richcmp__(self, Span other, int op):
|
|
|
|
# Eq
|
|
|
|
if op == 0:
|
|
|
|
return self.start < other.start
|
|
|
|
elif op == 1:
|
|
|
|
return self.start <= other.start
|
|
|
|
elif op == 2:
|
|
|
|
return self.start == other.start and self.end == other.end
|
|
|
|
elif op == 3:
|
|
|
|
return self.start != other.start or self.end != other.end
|
|
|
|
elif op == 4:
|
|
|
|
return self.start > other.start
|
|
|
|
elif op == 5:
|
|
|
|
return self.start >= other.start
|
|
|
|
|
|
|
|
def __len__(self):
|
|
|
|
if self.end < self.start:
|
|
|
|
return 0
|
|
|
|
return self.end - self.start
|
|
|
|
|
2015-10-21 11:11:46 +00:00
|
|
|
def __repr__(self):
|
2015-11-02 18:22:18 +00:00
|
|
|
if six.PY3:
|
|
|
|
return self.text
|
|
|
|
return self.text.encode('utf-8')
|
2015-10-21 11:11:46 +00:00
|
|
|
|
2015-10-06 09:45:49 +00:00
|
|
|
def __getitem__(self, object i):
|
|
|
|
if isinstance(i, slice):
|
2015-10-07 08:25:35 +00:00
|
|
|
start, end = normalize_slice(len(self), i.start, i.stop, i.step)
|
2015-10-06 09:45:49 +00:00
|
|
|
start += self.start
|
|
|
|
end += self.start
|
2015-10-07 08:25:35 +00:00
|
|
|
return Span(self.doc, start, end)
|
2015-10-06 09:45:49 +00:00
|
|
|
|
2015-07-29 20:36:03 +00:00
|
|
|
if i < 0:
|
2015-09-29 13:03:55 +00:00
|
|
|
return self.doc[self.end + i]
|
2015-07-30 00:30:24 +00:00
|
|
|
else:
|
2015-09-29 13:03:55 +00:00
|
|
|
return self.doc[self.start + i]
|
2015-03-26 02:16:40 +00:00
|
|
|
|
|
|
|
def __iter__(self):
|
|
|
|
for i in range(self.start, self.end):
|
2015-09-29 13:03:55 +00:00
|
|
|
yield self.doc[i]
|
2015-03-26 02:16:40 +00:00
|
|
|
|
2015-07-30 00:30:24 +00:00
|
|
|
def merge(self, unicode tag, unicode lemma, unicode ent_type):
|
2015-09-29 13:03:55 +00:00
|
|
|
self.doc.merge(self[0].idx, self[-1].idx + len(self[-1]), tag, lemma, ent_type)
|
2015-07-30 00:30:24 +00:00
|
|
|
|
2015-09-14 07:49:58 +00:00
|
|
|
def similarity(self, other):
|
2015-09-22 00:10:01 +00:00
|
|
|
if self.vector_norm == 0.0 or other.vector_norm == 0.0:
|
|
|
|
return 0.0
|
2015-09-14 07:49:58 +00:00
|
|
|
return numpy.dot(self.vector, other.vector) / (self.vector_norm * other.vector_norm)
|
|
|
|
|
|
|
|
property vector:
|
|
|
|
def __get__(self):
|
2015-09-17 01:50:11 +00:00
|
|
|
if self._vector is None:
|
|
|
|
self._vector = sum(t.vector for t in self) / len(self)
|
|
|
|
return self._vector
|
|
|
|
|
2015-09-14 07:49:58 +00:00
|
|
|
property vector_norm:
|
|
|
|
def __get__(self):
|
2015-09-17 01:50:11 +00:00
|
|
|
cdef float value
|
|
|
|
if self._vector_norm is None:
|
|
|
|
self._vector_norm = 1e-20
|
|
|
|
for value in self.vector:
|
|
|
|
self._vector_norm += value * value
|
|
|
|
self._vector_norm = math.sqrt(self._vector_norm)
|
|
|
|
return self._vector_norm
|
2015-09-14 07:49:58 +00:00
|
|
|
|
2015-09-13 00:27:42 +00:00
|
|
|
property text:
|
|
|
|
def __get__(self):
|
2015-09-17 01:50:11 +00:00
|
|
|
text = self.text_with_ws
|
|
|
|
if self[-1].whitespace_:
|
|
|
|
text = text[:-1]
|
|
|
|
return text
|
2015-09-13 00:27:42 +00:00
|
|
|
|
|
|
|
property text_with_ws:
|
|
|
|
def __get__(self):
|
|
|
|
return u''.join([t.text_with_ws for t in self])
|
|
|
|
|
2015-07-09 15:30:58 +00:00
|
|
|
property root:
|
|
|
|
"""The first ancestor of the first word of the span that has its head
|
|
|
|
outside the span.
|
|
|
|
|
|
|
|
For example:
|
|
|
|
|
|
|
|
>>> toks = nlp(u'I like New York in Autumn.')
|
|
|
|
|
|
|
|
Let's name the indices --- easier than writing "toks[4]" etc.
|
|
|
|
|
|
|
|
>>> i, like, new, york, in_, autumn, dot = range(len(toks))
|
|
|
|
|
|
|
|
The head of 'new' is 'York', and the head of 'York' is 'like'
|
|
|
|
|
|
|
|
>>> toks[new].head.orth_
|
|
|
|
'York'
|
|
|
|
>>> toks[york].head.orth_
|
|
|
|
'like'
|
|
|
|
|
|
|
|
Create a span for "New York". Its root is "York".
|
|
|
|
|
|
|
|
>>> new_york = toks[new:york+1]
|
|
|
|
>>> new_york.root.orth_
|
|
|
|
'York'
|
|
|
|
|
|
|
|
When there are multiple words with external dependencies, we take the first:
|
|
|
|
|
|
|
|
>>> toks[autumn].head.orth_, toks[dot].head.orth_
|
|
|
|
('in', like')
|
|
|
|
>>> autumn_dot = toks[autumn:]
|
|
|
|
>>> autumn_dot.root.orth_
|
|
|
|
'Autumn'
|
2015-05-13 19:45:19 +00:00
|
|
|
"""
|
|
|
|
def __get__(self):
|
2015-07-09 15:30:58 +00:00
|
|
|
# This should probably be called 'head', and the other one called
|
|
|
|
# 'gov'. But we went with 'head' elsehwhere, and now we're stuck =/
|
2015-11-03 13:15:14 +00:00
|
|
|
cdef const TokenC* start = &self.doc.c[self.start]
|
|
|
|
cdef const TokenC* end = &self.doc.c[self.end]
|
2015-07-09 15:30:58 +00:00
|
|
|
head = start
|
|
|
|
while start <= (head + head.head) < end and head.head != 0:
|
|
|
|
head += head.head
|
2015-11-03 13:15:14 +00:00
|
|
|
return self.doc[head - self.doc.c]
|
2015-05-13 19:45:19 +00:00
|
|
|
|
|
|
|
property lefts:
|
|
|
|
"""Tokens that are to the left of the Span, whose head is within the Span."""
|
|
|
|
def __get__(self):
|
|
|
|
for token in reversed(self): # Reverse, so we get the tokens in order
|
|
|
|
for left in token.lefts:
|
|
|
|
if left.i < self.start:
|
|
|
|
yield left
|
|
|
|
|
2015-07-11 20:15:04 +00:00
|
|
|
property rights:
|
2015-05-13 19:45:19 +00:00
|
|
|
"""Tokens that are to the right of the Span, whose head is within the Span."""
|
|
|
|
def __get__(self):
|
|
|
|
for token in self:
|
|
|
|
for right in token.rights:
|
|
|
|
if right.i >= self.end:
|
|
|
|
yield right
|
|
|
|
|
2015-07-09 15:30:58 +00:00
|
|
|
property subtree:
|
|
|
|
def __get__(self):
|
|
|
|
for word in self.lefts:
|
|
|
|
yield from word.subtree
|
|
|
|
yield from self
|
|
|
|
for word in self.rights:
|
|
|
|
yield from word.subtree
|
|
|
|
|
2015-03-26 02:16:40 +00:00
|
|
|
property orth_:
|
|
|
|
def __get__(self):
|
2015-04-07 02:53:40 +00:00
|
|
|
return ''.join([t.string for t in self]).strip()
|
2015-03-26 02:16:40 +00:00
|
|
|
|
|
|
|
property lemma_:
|
|
|
|
def __get__(self):
|
2015-03-26 02:45:11 +00:00
|
|
|
return ' '.join([t.lemma_ for t in self]).strip()
|
2015-03-26 02:16:40 +00:00
|
|
|
|
2015-03-27 16:40:52 +00:00
|
|
|
property string:
|
|
|
|
def __get__(self):
|
|
|
|
return ''.join([t.string for t in self])
|
|
|
|
|
2015-03-26 02:16:40 +00:00
|
|
|
property label_:
|
|
|
|
def __get__(self):
|
2015-09-29 13:03:55 +00:00
|
|
|
return self.doc.vocab.strings[self.label]
|
2015-03-26 02:16:40 +00:00
|
|
|
|