2019-07-16 11:07:35 +00:00
|
|
|
from spacy.matcher import PhraseMatcher
|
|
|
|
from spacy.tokens import Doc
|
|
|
|
|
|
|
|
|
|
|
|
def test_issue3972(en_vocab):
|
|
|
|
"""Test that the PhraseMatcher returns duplicates for duplicate match IDs.
|
|
|
|
"""
|
|
|
|
matcher = PhraseMatcher(en_vocab)
|
2019-10-25 20:21:08 +00:00
|
|
|
matcher.add("A", [Doc(en_vocab, words=["New", "York"])])
|
|
|
|
matcher.add("B", [Doc(en_vocab, words=["New", "York"])])
|
2019-07-16 11:07:35 +00:00
|
|
|
doc = Doc(en_vocab, words=["I", "live", "in", "New", "York"])
|
|
|
|
matches = matcher(doc)
|
2019-08-22 15:17:07 +00:00
|
|
|
|
2019-07-16 11:07:35 +00:00
|
|
|
assert len(matches) == 2
|
2019-08-22 15:17:07 +00:00
|
|
|
|
|
|
|
# We should have a match for each of the two rules
|
|
|
|
found_ids = [en_vocab.strings[ent_id] for (ent_id, _, _) in matches]
|
|
|
|
assert "A" in found_ids
|
|
|
|
assert "B" in found_ids
|