2018-07-24 21:38:44 +00:00
|
|
|
import pytest
|
|
|
|
import re
|
2019-10-16 11:34:58 +00:00
|
|
|
|
|
|
|
from spacy.lang.en import English
|
2018-07-24 21:38:44 +00:00
|
|
|
from spacy.matcher import Matcher
|
2019-02-27 09:25:56 +00:00
|
|
|
from spacy.tokens import Doc, Span
|
2018-07-24 21:38:44 +00:00
|
|
|
|
|
|
|
|
2019-12-25 11:39:49 +00:00
|
|
|
pattern1 = [{"ORTH": "A"}, {"ORTH": "A", "OP": "*"}]
|
2020-07-29 09:04:43 +00:00
|
|
|
pattern2 = [{"ORTH": "A", "OP": "*"}, {"ORTH": "A"}]
|
2019-12-25 11:39:49 +00:00
|
|
|
pattern3 = [{"ORTH": "A"}, {"ORTH": "A"}]
|
2020-07-29 09:04:43 +00:00
|
|
|
pattern4 = [{"ORTH": "B"}, {"ORTH": "A", "OP": "*"}, {"ORTH": "B"}]
|
|
|
|
pattern5 = [{"ORTH": "B", "OP": "*"}, {"ORTH": "A", "OP": "*"}, {"ORTH": "B"}]
|
2018-11-27 00:09:36 +00:00
|
|
|
|
|
|
|
re_pattern1 = "AA*"
|
|
|
|
re_pattern2 = "A*A"
|
|
|
|
re_pattern3 = "AA"
|
|
|
|
re_pattern4 = "BA*B"
|
|
|
|
re_pattern5 = "B*A*B"
|
2018-07-24 21:38:44 +00:00
|
|
|
|
2020-07-29 09:04:43 +00:00
|
|
|
longest1 = "A A A A A"
|
|
|
|
longest2 = "A A A A A"
|
|
|
|
longest3 = "A A"
|
2020-08-05 14:00:59 +00:00
|
|
|
longest4 = "B A A A A A B" # "FIRST" would be "B B"
|
2020-07-29 09:04:43 +00:00
|
|
|
longest5 = "B B A A A A A B"
|
|
|
|
|
2018-07-24 21:38:44 +00:00
|
|
|
|
|
|
|
@pytest.fixture
|
|
|
|
def text():
|
2020-07-29 09:04:43 +00:00
|
|
|
return "(BBAAAAAB)."
|
2018-07-24 21:38:44 +00:00
|
|
|
|
|
|
|
|
|
|
|
@pytest.fixture
|
|
|
|
def doc(en_tokenizer, text):
|
2018-11-27 00:09:36 +00:00
|
|
|
doc = en_tokenizer(" ".join(text))
|
2018-07-24 21:38:44 +00:00
|
|
|
return doc
|
|
|
|
|
|
|
|
|
2018-11-27 00:09:36 +00:00
|
|
|
@pytest.mark.parametrize(
|
|
|
|
"pattern,re_pattern",
|
|
|
|
[
|
2020-07-29 09:04:43 +00:00
|
|
|
(pattern1, re_pattern1),
|
|
|
|
(pattern2, re_pattern2),
|
|
|
|
(pattern3, re_pattern3),
|
2018-11-27 00:09:36 +00:00
|
|
|
(pattern4, re_pattern4),
|
2020-07-29 09:04:43 +00:00
|
|
|
(pattern5, re_pattern5),
|
2018-11-27 00:09:36 +00:00
|
|
|
],
|
|
|
|
)
|
2020-07-29 09:04:43 +00:00
|
|
|
def test_greedy_matching_first(doc, text, pattern, re_pattern):
|
|
|
|
"""Test that the greedy matching behavior "FIRST" is consistent with
|
2018-07-24 21:38:44 +00:00
|
|
|
other re implementations."""
|
|
|
|
matcher = Matcher(doc.vocab)
|
2020-07-29 09:04:43 +00:00
|
|
|
matcher.add(re_pattern, [pattern], greedy="FIRST")
|
2018-07-24 21:38:44 +00:00
|
|
|
matches = matcher(doc)
|
|
|
|
re_matches = [m.span() for m in re.finditer(re_pattern, text)]
|
2020-07-29 09:04:43 +00:00
|
|
|
for (key, m_s, m_e), (re_s, re_e) in zip(matches, re_matches):
|
|
|
|
# matching the string, not the exact position
|
|
|
|
assert doc[m_s:m_e].text == doc[re_s:re_e].text
|
|
|
|
|
|
|
|
|
|
|
|
@pytest.mark.parametrize(
|
|
|
|
"pattern,longest",
|
|
|
|
[
|
|
|
|
(pattern1, longest1),
|
|
|
|
(pattern2, longest2),
|
|
|
|
(pattern3, longest3),
|
|
|
|
(pattern4, longest4),
|
|
|
|
(pattern5, longest5),
|
|
|
|
],
|
|
|
|
)
|
|
|
|
def test_greedy_matching_longest(doc, text, pattern, longest):
|
|
|
|
"""Test the "LONGEST" greedy matching behavior"""
|
|
|
|
matcher = Matcher(doc.vocab)
|
|
|
|
matcher.add("RULE", [pattern], greedy="LONGEST")
|
|
|
|
matches = matcher(doc)
|
|
|
|
for (key, s, e) in matches:
|
|
|
|
assert doc[s:e].text == longest
|
|
|
|
|
|
|
|
|
|
|
|
def test_greedy_matching_longest_first(en_tokenizer):
|
|
|
|
"""Test that "LONGEST" matching prefers the first of two equally long matches"""
|
|
|
|
doc = en_tokenizer(" ".join("CCC"))
|
|
|
|
matcher = Matcher(doc.vocab)
|
|
|
|
pattern = [{"ORTH": "C"}, {"ORTH": "C"}]
|
|
|
|
matcher.add("RULE", [pattern], greedy="LONGEST")
|
|
|
|
matches = matcher(doc)
|
|
|
|
# out of 0-2 and 1-3, the first should be picked
|
|
|
|
assert len(matches) == 1
|
|
|
|
assert matches[0][1] == 0
|
|
|
|
assert matches[0][2] == 2
|
|
|
|
|
|
|
|
|
|
|
|
def test_invalid_greediness(doc, text):
|
|
|
|
matcher = Matcher(doc.vocab)
|
|
|
|
with pytest.raises(ValueError):
|
|
|
|
matcher.add("RULE", [pattern1], greedy="GREEDY")
|
2018-07-24 21:38:44 +00:00
|
|
|
|
|
|
|
|
2018-11-27 00:09:36 +00:00
|
|
|
@pytest.mark.parametrize(
|
|
|
|
"pattern,re_pattern",
|
|
|
|
[
|
|
|
|
(pattern1, re_pattern1),
|
|
|
|
(pattern2, re_pattern2),
|
|
|
|
(pattern3, re_pattern3),
|
|
|
|
(pattern4, re_pattern4),
|
|
|
|
(pattern5, re_pattern5),
|
|
|
|
],
|
|
|
|
)
|
2018-07-24 21:38:44 +00:00
|
|
|
def test_match_consuming(doc, text, pattern, re_pattern):
|
|
|
|
"""Test that matcher.__call__ consumes tokens on a match similar to
|
|
|
|
re.findall."""
|
|
|
|
matcher = Matcher(doc.vocab)
|
2020-07-29 09:04:43 +00:00
|
|
|
matcher.add(re_pattern, [pattern], greedy="FIRST")
|
2018-07-24 21:38:44 +00:00
|
|
|
matches = matcher(doc)
|
|
|
|
re_matches = [m.span() for m in re.finditer(re_pattern, text)]
|
|
|
|
assert len(matches) == len(re_matches)
|
|
|
|
|
|
|
|
|
|
|
|
def test_operator_combos(en_vocab):
|
|
|
|
cases = [
|
2018-11-27 00:09:36 +00:00
|
|
|
("aaab", "a a a b", True),
|
|
|
|
("aaab", "a+ b", True),
|
|
|
|
("aaab", "a+ a+ b", True),
|
|
|
|
("aaab", "a+ a+ a b", True),
|
|
|
|
("aaab", "a+ a+ a+ b", True),
|
|
|
|
("aaab", "a+ a a b", True),
|
|
|
|
("aaab", "a+ a a", True),
|
|
|
|
("aaab", "a+", True),
|
|
|
|
("aaa", "a+ b", False),
|
|
|
|
("aaa", "a+ a+ b", False),
|
|
|
|
("aaa", "a+ a+ a+ b", False),
|
|
|
|
("aaa", "a+ a b", False),
|
|
|
|
("aaa", "a+ a a b", False),
|
|
|
|
("aaab", "a+ a a", True),
|
|
|
|
("aaab", "a+", True),
|
|
|
|
("aaab", "a+ a b", True),
|
2018-07-24 21:38:44 +00:00
|
|
|
]
|
|
|
|
for string, pattern_str, result in cases:
|
|
|
|
matcher = Matcher(en_vocab)
|
|
|
|
doc = Doc(matcher.vocab, words=list(string))
|
|
|
|
pattern = []
|
|
|
|
for part in pattern_str.split():
|
2018-11-27 00:09:36 +00:00
|
|
|
if part.endswith("+"):
|
|
|
|
pattern.append({"ORTH": part[0], "OP": "+"})
|
2018-07-24 21:38:44 +00:00
|
|
|
else:
|
2018-11-27 00:09:36 +00:00
|
|
|
pattern.append({"ORTH": part})
|
2019-10-25 20:21:08 +00:00
|
|
|
matcher.add("PATTERN", [pattern])
|
2018-07-24 21:38:44 +00:00
|
|
|
matches = matcher(doc)
|
|
|
|
if result:
|
|
|
|
assert matches, (string, pattern_str)
|
|
|
|
else:
|
|
|
|
assert not matches, (string, pattern_str)
|
|
|
|
|
|
|
|
|
|
|
|
def test_matcher_end_zero_plus(en_vocab):
|
|
|
|
"""Test matcher works when patterns end with * operator. (issue 1450)"""
|
|
|
|
matcher = Matcher(en_vocab)
|
2018-11-27 00:09:36 +00:00
|
|
|
pattern = [{"ORTH": "a"}, {"ORTH": "b", "OP": "*"}]
|
2019-10-25 20:21:08 +00:00
|
|
|
matcher.add("TSTEND", [pattern])
|
2018-07-24 21:38:44 +00:00
|
|
|
nlp = lambda string: Doc(matcher.vocab, words=string.split())
|
2018-11-27 00:09:36 +00:00
|
|
|
assert len(matcher(nlp("a"))) == 1
|
|
|
|
assert len(matcher(nlp("a b"))) == 2
|
|
|
|
assert len(matcher(nlp("a c"))) == 1
|
|
|
|
assert len(matcher(nlp("a b c"))) == 2
|
|
|
|
assert len(matcher(nlp("a b b c"))) == 3
|
|
|
|
assert len(matcher(nlp("a b b"))) == 3
|
2019-02-27 09:25:56 +00:00
|
|
|
|
|
|
|
|
|
|
|
def test_matcher_sets_return_correct_tokens(en_vocab):
|
|
|
|
matcher = Matcher(en_vocab)
|
|
|
|
patterns = [
|
2019-08-20 15:36:34 +00:00
|
|
|
[{"LOWER": {"IN": ["zero"]}}],
|
|
|
|
[{"LOWER": {"IN": ["one"]}}],
|
|
|
|
[{"LOWER": {"IN": ["two"]}}],
|
2019-02-27 09:25:56 +00:00
|
|
|
]
|
2019-10-25 20:21:08 +00:00
|
|
|
matcher.add("TEST", patterns)
|
2019-02-27 09:25:56 +00:00
|
|
|
doc = Doc(en_vocab, words="zero one two three".split())
|
|
|
|
matches = matcher(doc)
|
|
|
|
texts = [Span(doc, s, e, label=L).text for L, s, e in matches]
|
2019-08-20 15:36:34 +00:00
|
|
|
assert texts == ["zero", "one", "two"]
|
2019-10-10 12:01:53 +00:00
|
|
|
|
|
|
|
|
2021-06-21 07:34:29 +00:00
|
|
|
@pytest.mark.filterwarnings("ignore:\\[W036")
|
2019-10-16 11:34:58 +00:00
|
|
|
def test_matcher_remove():
|
|
|
|
nlp = English()
|
|
|
|
matcher = Matcher(nlp.vocab)
|
|
|
|
text = "This is a test case."
|
|
|
|
|
2019-10-10 12:01:53 +00:00
|
|
|
pattern = [{"ORTH": "test"}, {"OP": "?"}]
|
|
|
|
assert len(matcher) == 0
|
2019-10-25 20:21:08 +00:00
|
|
|
matcher.add("Rule", [pattern])
|
2019-10-10 12:01:53 +00:00
|
|
|
assert "Rule" in matcher
|
|
|
|
|
2019-10-16 11:34:58 +00:00
|
|
|
# should give two matches
|
|
|
|
results1 = matcher(nlp(text))
|
2019-10-18 09:27:38 +00:00
|
|
|
assert len(results1) == 2
|
2019-10-16 11:34:58 +00:00
|
|
|
|
2019-10-10 12:01:53 +00:00
|
|
|
# removing once should work
|
|
|
|
matcher.remove("Rule")
|
|
|
|
|
2019-10-16 11:34:58 +00:00
|
|
|
# should not return any maches anymore
|
|
|
|
results2 = matcher(nlp(text))
|
2019-10-18 09:27:38 +00:00
|
|
|
assert len(results2) == 0
|
2019-10-16 11:34:58 +00:00
|
|
|
|
2019-10-10 12:01:53 +00:00
|
|
|
# removing again should throw an error
|
|
|
|
with pytest.raises(ValueError):
|
|
|
|
matcher.remove("Rule")
|
2021-04-08 08:10:14 +00:00
|
|
|
|
|
|
|
|
|
|
|
def test_matcher_with_alignments_greedy_longest(en_vocab):
|
|
|
|
cases = [
|
|
|
|
("aaab", "a* b", [0, 0, 0, 1]),
|
|
|
|
("baab", "b a* b", [0, 1, 1, 2]),
|
|
|
|
("aaab", "a a a b", [0, 1, 2, 3]),
|
|
|
|
("aaab", "a+ b", [0, 0, 0, 1]),
|
|
|
|
("aaba", "a+ b a+", [0, 0, 1, 2]),
|
|
|
|
("aabaa", "a+ b a+", [0, 0, 1, 2, 2]),
|
|
|
|
("aaba", "a+ b a*", [0, 0, 1, 2]),
|
|
|
|
("aaaa", "a*", [0, 0, 0, 0]),
|
|
|
|
("baab", "b a* b b*", [0, 1, 1, 2]),
|
|
|
|
("aabb", "a* b* a*", [0, 0, 1, 1]),
|
|
|
|
("aaab", "a+ a+ a b", [0, 1, 2, 3]),
|
|
|
|
("aaab", "a+ a+ a+ b", [0, 1, 2, 3]),
|
|
|
|
("aaab", "a+ a a b", [0, 1, 2, 3]),
|
|
|
|
("aaab", "a+ a a", [0, 1, 2]),
|
|
|
|
("aaab", "a+ a a?", [0, 1, 2]),
|
|
|
|
("aaaa", "a a a a a?", [0, 1, 2, 3]),
|
|
|
|
("aaab", "a+ a b", [0, 0, 1, 2]),
|
|
|
|
("aaab", "a+ a+ b", [0, 0, 1, 2]),
|
|
|
|
]
|
|
|
|
for string, pattern_str, result in cases:
|
|
|
|
matcher = Matcher(en_vocab)
|
|
|
|
doc = Doc(matcher.vocab, words=list(string))
|
|
|
|
pattern = []
|
|
|
|
for part in pattern_str.split():
|
|
|
|
if part.endswith("+"):
|
|
|
|
pattern.append({"ORTH": part[0], "OP": "+"})
|
|
|
|
elif part.endswith("*"):
|
|
|
|
pattern.append({"ORTH": part[0], "OP": "*"})
|
|
|
|
elif part.endswith("?"):
|
|
|
|
pattern.append({"ORTH": part[0], "OP": "?"})
|
|
|
|
else:
|
|
|
|
pattern.append({"ORTH": part})
|
|
|
|
matcher.add("PATTERN", [pattern], greedy="LONGEST")
|
|
|
|
matches = matcher(doc, with_alignments=True)
|
|
|
|
n_matches = len(matches)
|
|
|
|
|
|
|
|
_, s, e, expected = matches[0]
|
|
|
|
|
|
|
|
assert expected == result, (string, pattern_str, s, e, n_matches)
|
|
|
|
|
|
|
|
|
|
|
|
def test_matcher_with_alignments_nongreedy(en_vocab):
|
|
|
|
cases = [
|
|
|
|
(0, "aaab", "a* b", [[0, 1], [0, 0, 1], [0, 0, 0, 1], [1]]),
|
|
|
|
(1, "baab", "b a* b", [[0, 1, 1, 2]]),
|
|
|
|
(2, "aaab", "a a a b", [[0, 1, 2, 3]]),
|
2021-06-28 09:48:00 +00:00
|
|
|
(3, "aaab", "a+ b", [[0, 1], [0, 0, 1], [0, 0, 0, 1]]),
|
2021-04-08 08:10:14 +00:00
|
|
|
(4, "aaba", "a+ b a+", [[0, 1, 2], [0, 0, 1, 2]]),
|
2021-06-28 09:48:00 +00:00
|
|
|
(
|
|
|
|
5,
|
|
|
|
"aabaa",
|
|
|
|
"a+ b a+",
|
|
|
|
[[0, 1, 2], [0, 0, 1, 2], [0, 0, 1, 2, 2], [0, 1, 2, 2]],
|
|
|
|
),
|
2021-04-08 08:10:14 +00:00
|
|
|
(6, "aaba", "a+ b a*", [[0, 1], [0, 0, 1], [0, 0, 1, 2], [0, 1, 2]]),
|
|
|
|
(7, "aaaa", "a*", [[0], [0, 0], [0, 0, 0], [0, 0, 0, 0]]),
|
|
|
|
(8, "baab", "b a* b b*", [[0, 1, 1, 2]]),
|
2021-06-28 09:48:00 +00:00
|
|
|
(
|
|
|
|
9,
|
|
|
|
"aabb",
|
|
|
|
"a* b* a*",
|
|
|
|
[[1], [2], [2, 2], [0, 1], [0, 0, 1], [0, 0, 1, 1], [0, 1, 1], [1, 1]],
|
|
|
|
),
|
2021-04-08 08:10:14 +00:00
|
|
|
(10, "aaab", "a+ a+ a b", [[0, 1, 2, 3]]),
|
|
|
|
(11, "aaab", "a+ a+ a+ b", [[0, 1, 2, 3]]),
|
|
|
|
(12, "aaab", "a+ a a b", [[0, 1, 2, 3]]),
|
|
|
|
(13, "aaab", "a+ a a", [[0, 1, 2]]),
|
|
|
|
(14, "aaab", "a+ a a?", [[0, 1], [0, 1, 2]]),
|
|
|
|
(15, "aaaa", "a a a a a?", [[0, 1, 2, 3]]),
|
|
|
|
(16, "aaab", "a+ a b", [[0, 1, 2], [0, 0, 1, 2]]),
|
|
|
|
(17, "aaab", "a+ a+ b", [[0, 1, 2], [0, 0, 1, 2]]),
|
|
|
|
]
|
|
|
|
for case_id, string, pattern_str, results in cases:
|
|
|
|
matcher = Matcher(en_vocab)
|
|
|
|
doc = Doc(matcher.vocab, words=list(string))
|
|
|
|
pattern = []
|
|
|
|
for part in pattern_str.split():
|
|
|
|
if part.endswith("+"):
|
|
|
|
pattern.append({"ORTH": part[0], "OP": "+"})
|
|
|
|
elif part.endswith("*"):
|
|
|
|
pattern.append({"ORTH": part[0], "OP": "*"})
|
|
|
|
elif part.endswith("?"):
|
|
|
|
pattern.append({"ORTH": part[0], "OP": "?"})
|
|
|
|
else:
|
|
|
|
pattern.append({"ORTH": part})
|
|
|
|
|
|
|
|
matcher.add("PATTERN", [pattern])
|
|
|
|
matches = matcher(doc, with_alignments=True)
|
|
|
|
n_matches = len(matches)
|
|
|
|
|
|
|
|
for _, s, e, expected in matches:
|
|
|
|
assert expected in results, (case_id, string, pattern_str, s, e, n_matches)
|
|
|
|
assert len(expected) == e - s
|