2017-01-10 18:24:10 +00:00
|
|
|
# coding: utf-8
|
2016-10-27 16:01:34 +00:00
|
|
|
from __future__ import unicode_literals
|
|
|
|
|
2017-01-12 21:00:37 +00:00
|
|
|
from ...matcher import Matcher
|
2016-10-27 16:01:34 +00:00
|
|
|
|
2017-01-10 18:24:10 +00:00
|
|
|
import pytest
|
|
|
|
|
2016-10-27 16:01:34 +00:00
|
|
|
|
|
|
|
@pytest.mark.models
|
2017-01-12 21:00:37 +00:00
|
|
|
def test_issue429(EN):
|
2016-10-27 16:01:34 +00:00
|
|
|
def merge_phrases(matcher, doc, i, matches):
|
|
|
|
if i != len(matches) - 1:
|
|
|
|
return None
|
2017-05-22 11:54:20 +00:00
|
|
|
spans = [(ent_id, ent_id, doc[start:end]) for ent_id, start, end in matches]
|
2016-10-27 16:01:34 +00:00
|
|
|
for ent_id, label, span in spans:
|
2017-01-12 21:00:37 +00:00
|
|
|
span.merge('NNP' if label else span.root.tag_, span.text, EN.vocab.strings[label])
|
|
|
|
|
|
|
|
doc = EN('a')
|
|
|
|
matcher = Matcher(EN.vocab)
|
2017-05-23 08:05:25 +00:00
|
|
|
matcher.add('TEST', on_match=merge_phrases, [{'ORTH': 'a'}])
|
|
|
|
doc = EN.tokenizer('a b c')
|
2017-01-12 21:00:37 +00:00
|
|
|
EN.tagger(doc)
|
|
|
|
matcher(doc)
|
|
|
|
EN.entity(doc)
|