2017-01-12 16:51:46 +00:00
|
|
|
# coding: utf-8
|
|
|
|
from __future__ import unicode_literals
|
|
|
|
|
|
|
|
import pytest
|
2018-09-05 03:53:21 +00:00
|
|
|
import re
|
2019-06-16 11:25:32 +00:00
|
|
|
from spacy.matcher import Matcher, DependencyMatcher
|
2019-01-21 12:23:15 +00:00
|
|
|
from spacy.tokens import Doc, Token
|
2017-01-12 16:51:46 +00:00
|
|
|
|
|
|
|
|
2018-07-06 10:17:50 +00:00
|
|
|
@pytest.fixture
|
2017-01-12 21:23:11 +00:00
|
|
|
def matcher(en_vocab):
|
2018-11-27 00:09:36 +00:00
|
|
|
rules = {
|
|
|
|
"JS": [[{"ORTH": "JavaScript"}]],
|
|
|
|
"GoogleNow": [[{"ORTH": "Google"}, {"ORTH": "Now"}]],
|
|
|
|
"Java": [[{"LOWER": "java"}]],
|
|
|
|
}
|
2017-05-22 10:59:50 +00:00
|
|
|
matcher = Matcher(en_vocab)
|
|
|
|
for key, patterns in rules.items():
|
|
|
|
matcher.add(key, None, *patterns)
|
|
|
|
return matcher
|
2017-01-12 21:23:11 +00:00
|
|
|
|
|
|
|
|
2018-07-06 10:17:50 +00:00
|
|
|
def test_matcher_from_api_docs(en_vocab):
|
|
|
|
matcher = Matcher(en_vocab)
|
2018-11-27 00:09:36 +00:00
|
|
|
pattern = [{"ORTH": "test"}]
|
2018-07-06 10:17:50 +00:00
|
|
|
assert len(matcher) == 0
|
2018-11-27 00:09:36 +00:00
|
|
|
matcher.add("Rule", None, pattern)
|
2018-07-06 10:17:50 +00:00
|
|
|
assert len(matcher) == 1
|
2018-11-27 00:09:36 +00:00
|
|
|
matcher.remove("Rule")
|
|
|
|
assert "Rule" not in matcher
|
|
|
|
matcher.add("Rule", None, pattern)
|
|
|
|
assert "Rule" in matcher
|
|
|
|
on_match, patterns = matcher.get("Rule")
|
2018-07-06 10:17:50 +00:00
|
|
|
assert len(patterns[0])
|
|
|
|
|
|
|
|
|
|
|
|
def test_matcher_from_usage_docs(en_vocab):
|
|
|
|
text = "Wow 😀 This is really cool! 😂 😂"
|
2018-11-27 00:09:36 +00:00
|
|
|
doc = Doc(en_vocab, words=text.split(" "))
|
|
|
|
pos_emoji = ["😀", "😃", "😂", "🤣", "😊", "😍"]
|
|
|
|
pos_patterns = [[{"ORTH": emoji}] for emoji in pos_emoji]
|
2018-07-06 10:17:50 +00:00
|
|
|
|
|
|
|
def label_sentiment(matcher, doc, i, matches):
|
|
|
|
match_id, start, end = matches[i]
|
2018-11-27 00:09:36 +00:00
|
|
|
if doc.vocab.strings[match_id] == "HAPPY":
|
2018-07-06 10:17:50 +00:00
|
|
|
doc.sentiment += 0.1
|
2018-11-27 00:09:36 +00:00
|
|
|
span = doc[start:end]
|
2019-02-15 09:29:44 +00:00
|
|
|
with doc.retokenize() as retokenizer:
|
|
|
|
retokenizer.merge(span)
|
|
|
|
token = doc[start]
|
2018-11-27 00:09:36 +00:00
|
|
|
token.vocab[token.text].norm_ = "happy emoji"
|
2018-07-06 10:17:50 +00:00
|
|
|
|
|
|
|
matcher = Matcher(en_vocab)
|
2018-11-27 00:09:36 +00:00
|
|
|
matcher.add("HAPPY", label_sentiment, *pos_patterns)
|
2018-11-30 16:43:08 +00:00
|
|
|
matcher(doc)
|
2018-07-06 10:17:50 +00:00
|
|
|
assert doc.sentiment != 0
|
2018-11-27 00:09:36 +00:00
|
|
|
assert doc[1].norm_ == "happy emoji"
|
2018-07-06 10:17:50 +00:00
|
|
|
|
|
|
|
|
2018-07-24 21:38:44 +00:00
|
|
|
def test_matcher_len_contains(matcher):
|
|
|
|
assert len(matcher) == 3
|
2018-11-27 00:09:36 +00:00
|
|
|
matcher.add("TEST", None, [{"ORTH": "test"}])
|
|
|
|
assert "TEST" in matcher
|
|
|
|
assert "TEST2" not in matcher
|
2018-07-06 10:17:50 +00:00
|
|
|
|
|
|
|
|
|
|
|
def test_matcher_no_match(matcher):
|
2018-07-24 21:38:44 +00:00
|
|
|
doc = Doc(matcher.vocab, words=["I", "like", "cheese", "."])
|
2018-07-06 10:17:50 +00:00
|
|
|
assert matcher(doc) == []
|
|
|
|
|
|
|
|
|
|
|
|
def test_matcher_match_start(matcher):
|
2018-07-24 21:38:44 +00:00
|
|
|
doc = Doc(matcher.vocab, words=["JavaScript", "is", "good"])
|
2018-11-27 00:09:36 +00:00
|
|
|
assert matcher(doc) == [(matcher.vocab.strings["JS"], 0, 1)]
|
2018-07-06 10:17:50 +00:00
|
|
|
|
|
|
|
|
|
|
|
def test_matcher_match_end(matcher):
|
|
|
|
words = ["I", "like", "java"]
|
2018-07-24 21:38:44 +00:00
|
|
|
doc = Doc(matcher.vocab, words=words)
|
2018-11-27 00:09:36 +00:00
|
|
|
assert matcher(doc) == [(doc.vocab.strings["Java"], 2, 3)]
|
2018-07-06 10:17:50 +00:00
|
|
|
|
|
|
|
|
|
|
|
def test_matcher_match_middle(matcher):
|
|
|
|
words = ["I", "like", "Google", "Now", "best"]
|
2018-07-24 21:38:44 +00:00
|
|
|
doc = Doc(matcher.vocab, words=words)
|
2018-11-27 00:09:36 +00:00
|
|
|
assert matcher(doc) == [(doc.vocab.strings["GoogleNow"], 2, 4)]
|
2018-07-06 10:17:50 +00:00
|
|
|
|
|
|
|
|
|
|
|
def test_matcher_match_multi(matcher):
|
|
|
|
words = ["I", "like", "Google", "Now", "and", "java", "best"]
|
2018-07-24 21:38:44 +00:00
|
|
|
doc = Doc(matcher.vocab, words=words)
|
2018-11-27 00:09:36 +00:00
|
|
|
assert matcher(doc) == [
|
|
|
|
(doc.vocab.strings["GoogleNow"], 2, 4),
|
|
|
|
(doc.vocab.strings["Java"], 5, 6),
|
|
|
|
]
|
2018-07-06 10:17:50 +00:00
|
|
|
|
|
|
|
|
|
|
|
def test_matcher_empty_dict(en_vocab):
|
2018-07-24 21:38:44 +00:00
|
|
|
"""Test matcher allows empty token specs, meaning match on any token."""
|
2018-07-06 10:17:50 +00:00
|
|
|
matcher = Matcher(en_vocab)
|
2018-07-24 21:38:44 +00:00
|
|
|
doc = Doc(matcher.vocab, words=["a", "b", "c"])
|
2018-11-27 00:09:36 +00:00
|
|
|
matcher.add("A.C", None, [{"ORTH": "a"}, {}, {"ORTH": "c"}])
|
2018-07-06 10:17:50 +00:00
|
|
|
matches = matcher(doc)
|
|
|
|
assert len(matches) == 1
|
|
|
|
assert matches[0][1:] == (0, 3)
|
|
|
|
matcher = Matcher(en_vocab)
|
2018-11-27 00:09:36 +00:00
|
|
|
matcher.add("A.", None, [{"ORTH": "a"}, {}])
|
2018-07-06 10:17:50 +00:00
|
|
|
matches = matcher(doc)
|
|
|
|
assert matches[0][1:] == (0, 2)
|
|
|
|
|
|
|
|
|
|
|
|
def test_matcher_operator_shadow(en_vocab):
|
|
|
|
matcher = Matcher(en_vocab)
|
2018-07-24 21:38:44 +00:00
|
|
|
doc = Doc(matcher.vocab, words=["a", "b", "c"])
|
2018-11-27 00:09:36 +00:00
|
|
|
pattern = [{"ORTH": "a"}, {"IS_ALPHA": True, "OP": "+"}, {"ORTH": "c"}]
|
|
|
|
matcher.add("A.C", None, pattern)
|
2018-07-06 10:17:50 +00:00
|
|
|
matches = matcher(doc)
|
|
|
|
assert len(matches) == 1
|
|
|
|
assert matches[0][1:] == (0, 3)
|
|
|
|
|
|
|
|
|
|
|
|
def test_matcher_match_zero(matcher):
|
|
|
|
words1 = 'He said , " some words " ...'.split()
|
|
|
|
words2 = 'He said , " some three words " ...'.split()
|
2018-11-27 00:09:36 +00:00
|
|
|
pattern1 = [
|
|
|
|
{"ORTH": '"'},
|
|
|
|
{"OP": "!", "IS_PUNCT": True},
|
|
|
|
{"OP": "!", "IS_PUNCT": True},
|
|
|
|
{"ORTH": '"'},
|
|
|
|
]
|
|
|
|
pattern2 = [
|
|
|
|
{"ORTH": '"'},
|
|
|
|
{"IS_PUNCT": True},
|
|
|
|
{"IS_PUNCT": True},
|
|
|
|
{"IS_PUNCT": True},
|
|
|
|
{"ORTH": '"'},
|
|
|
|
]
|
|
|
|
matcher.add("Quote", None, pattern1)
|
2018-07-24 21:38:44 +00:00
|
|
|
doc = Doc(matcher.vocab, words=words1)
|
2018-07-06 10:17:50 +00:00
|
|
|
assert len(matcher(doc)) == 1
|
2018-07-24 21:38:44 +00:00
|
|
|
doc = Doc(matcher.vocab, words=words2)
|
2018-07-06 10:17:50 +00:00
|
|
|
assert len(matcher(doc)) == 0
|
2018-11-27 00:09:36 +00:00
|
|
|
matcher.add("Quote", None, pattern2)
|
2018-07-06 10:17:50 +00:00
|
|
|
assert len(matcher(doc)) == 0
|
|
|
|
|
|
|
|
|
|
|
|
def test_matcher_match_zero_plus(matcher):
|
|
|
|
words = 'He said , " some words " ...'.split()
|
2018-11-27 00:09:36 +00:00
|
|
|
pattern = [{"ORTH": '"'}, {"OP": "*", "IS_PUNCT": False}, {"ORTH": '"'}]
|
2018-07-06 10:17:50 +00:00
|
|
|
matcher = Matcher(matcher.vocab)
|
2018-11-27 00:09:36 +00:00
|
|
|
matcher.add("Quote", None, pattern)
|
2018-07-24 21:38:44 +00:00
|
|
|
doc = Doc(matcher.vocab, words=words)
|
2018-07-06 10:17:50 +00:00
|
|
|
assert len(matcher(doc)) == 1
|
|
|
|
|
|
|
|
|
|
|
|
def test_matcher_match_one_plus(matcher):
|
|
|
|
control = Matcher(matcher.vocab)
|
2018-11-27 00:09:36 +00:00
|
|
|
control.add("BasicPhilippe", None, [{"ORTH": "Philippe"}])
|
|
|
|
doc = Doc(control.vocab, words=["Philippe", "Philippe"])
|
2018-07-06 10:17:50 +00:00
|
|
|
m = control(doc)
|
|
|
|
assert len(m) == 2
|
2018-11-27 00:09:36 +00:00
|
|
|
matcher.add(
|
|
|
|
"KleenePhilippe",
|
|
|
|
None,
|
|
|
|
[{"ORTH": "Philippe", "OP": "1"}, {"ORTH": "Philippe", "OP": "+"}],
|
|
|
|
)
|
2018-07-06 10:17:50 +00:00
|
|
|
m = matcher(doc)
|
|
|
|
assert len(m) == 1
|
|
|
|
|
2017-10-16 11:38:01 +00:00
|
|
|
|
2018-07-06 10:17:50 +00:00
|
|
|
def test_matcher_any_token_operator(en_vocab):
|
|
|
|
"""Test that patterns with "any token" {} work with operators."""
|
|
|
|
matcher = Matcher(en_vocab)
|
2018-11-27 00:09:36 +00:00
|
|
|
matcher.add("TEST", None, [{"ORTH": "test"}, {"OP": "*"}])
|
|
|
|
doc = Doc(en_vocab, words=["test", "hello", "world"])
|
2018-07-06 10:17:50 +00:00
|
|
|
matches = [doc[start:end].text for _, start, end in matcher(doc)]
|
|
|
|
assert len(matches) == 3
|
2018-11-27 00:09:36 +00:00
|
|
|
assert matches[0] == "test"
|
|
|
|
assert matches[1] == "test hello"
|
|
|
|
assert matches[2] == "test hello world"
|
2018-09-05 03:53:21 +00:00
|
|
|
|
|
|
|
|
2019-01-21 12:23:15 +00:00
|
|
|
def test_matcher_extension_attribute(en_vocab):
|
|
|
|
matcher = Matcher(en_vocab)
|
2019-02-06 12:32:18 +00:00
|
|
|
get_is_fruit = lambda token: token.text in ("apple", "banana")
|
|
|
|
Token.set_extension("is_fruit", getter=get_is_fruit, force=True)
|
|
|
|
pattern = [{"ORTH": "an"}, {"_": {"is_fruit": True}}]
|
|
|
|
matcher.add("HAVING_FRUIT", None, pattern)
|
|
|
|
doc = Doc(en_vocab, words=["an", "apple"])
|
2019-01-21 12:23:15 +00:00
|
|
|
matches = matcher(doc)
|
|
|
|
assert len(matches) == 1
|
2019-02-06 12:32:18 +00:00
|
|
|
doc = Doc(en_vocab, words=["an", "aardvark"])
|
2019-01-21 12:23:15 +00:00
|
|
|
matches = matcher(doc)
|
|
|
|
assert len(matches) == 0
|
|
|
|
|
|
|
|
|
|
|
|
def test_matcher_set_value(en_vocab):
|
|
|
|
matcher = Matcher(en_vocab)
|
2019-02-06 12:32:18 +00:00
|
|
|
pattern = [{"ORTH": {"IN": ["an", "a"]}}]
|
|
|
|
matcher.add("A_OR_AN", None, pattern)
|
|
|
|
doc = Doc(en_vocab, words=["an", "a", "apple"])
|
2019-01-21 12:23:15 +00:00
|
|
|
matches = matcher(doc)
|
|
|
|
assert len(matches) == 2
|
2019-02-06 12:32:18 +00:00
|
|
|
doc = Doc(en_vocab, words=["aardvark"])
|
2019-01-21 12:23:15 +00:00
|
|
|
matches = matcher(doc)
|
|
|
|
assert len(matches) == 0
|
|
|
|
|
|
|
|
|
2019-02-06 12:40:11 +00:00
|
|
|
def test_matcher_set_value_operator(en_vocab):
|
|
|
|
matcher = Matcher(en_vocab)
|
|
|
|
pattern = [{"ORTH": {"IN": ["a", "the"]}, "OP": "?"}, {"ORTH": "house"}]
|
|
|
|
matcher.add("DET_HOUSE", None, pattern)
|
|
|
|
doc = Doc(en_vocab, words=["In", "a", "house"])
|
|
|
|
matches = matcher(doc)
|
2019-02-20 20:30:39 +00:00
|
|
|
assert len(matches) == 2
|
2019-02-06 12:40:11 +00:00
|
|
|
doc = Doc(en_vocab, words=["my", "house"])
|
|
|
|
matches = matcher(doc)
|
|
|
|
assert len(matches) == 1
|
|
|
|
|
|
|
|
|
2019-01-21 12:23:15 +00:00
|
|
|
def test_matcher_regex(en_vocab):
|
|
|
|
matcher = Matcher(en_vocab)
|
2019-02-06 12:32:18 +00:00
|
|
|
pattern = [{"ORTH": {"REGEX": r"(?:a|an)"}}]
|
|
|
|
matcher.add("A_OR_AN", None, pattern)
|
|
|
|
doc = Doc(en_vocab, words=["an", "a", "hi"])
|
2019-01-21 12:23:15 +00:00
|
|
|
matches = matcher(doc)
|
|
|
|
assert len(matches) == 2
|
2019-02-06 12:32:18 +00:00
|
|
|
doc = Doc(en_vocab, words=["bye"])
|
2019-01-21 12:23:15 +00:00
|
|
|
matches = matcher(doc)
|
|
|
|
assert len(matches) == 0
|
|
|
|
|
2019-02-06 12:32:18 +00:00
|
|
|
|
2019-01-21 12:23:15 +00:00
|
|
|
def test_matcher_regex_shape(en_vocab):
|
|
|
|
matcher = Matcher(en_vocab)
|
2019-02-06 12:32:18 +00:00
|
|
|
pattern = [{"SHAPE": {"REGEX": r"^[^x]+$"}}]
|
|
|
|
matcher.add("NON_ALPHA", None, pattern)
|
|
|
|
doc = Doc(en_vocab, words=["99", "problems", "!"])
|
2019-01-21 12:23:15 +00:00
|
|
|
matches = matcher(doc)
|
|
|
|
assert len(matches) == 2
|
2019-02-06 12:32:18 +00:00
|
|
|
doc = Doc(en_vocab, words=["bye"])
|
2019-01-21 12:23:15 +00:00
|
|
|
matches = matcher(doc)
|
|
|
|
assert len(matches) == 0
|
|
|
|
|
2019-02-06 12:32:18 +00:00
|
|
|
|
2019-01-21 12:23:15 +00:00
|
|
|
def test_matcher_compare_length(en_vocab):
|
|
|
|
matcher = Matcher(en_vocab)
|
2019-02-06 12:32:18 +00:00
|
|
|
pattern = [{"LENGTH": {">=": 2}}]
|
|
|
|
matcher.add("LENGTH_COMPARE", None, pattern)
|
|
|
|
doc = Doc(en_vocab, words=["a", "aa", "aaa"])
|
2019-01-21 12:23:15 +00:00
|
|
|
matches = matcher(doc)
|
|
|
|
assert len(matches) == 2
|
2019-02-06 12:32:18 +00:00
|
|
|
doc = Doc(en_vocab, words=["a"])
|
2019-01-21 12:23:15 +00:00
|
|
|
matches = matcher(doc)
|
|
|
|
assert len(matches) == 0
|
|
|
|
|
|
|
|
|
|
|
|
def test_matcher_extension_set_membership(en_vocab):
|
|
|
|
matcher = Matcher(en_vocab)
|
2019-02-06 12:32:18 +00:00
|
|
|
get_reversed = lambda token: "".join(reversed(token.text))
|
|
|
|
Token.set_extension("reversed", getter=get_reversed, force=True)
|
|
|
|
pattern = [{"_": {"reversed": {"IN": ["eyb", "ih"]}}}]
|
|
|
|
matcher.add("REVERSED", None, pattern)
|
|
|
|
doc = Doc(en_vocab, words=["hi", "bye", "hello"])
|
2019-01-21 12:23:15 +00:00
|
|
|
matches = matcher(doc)
|
|
|
|
assert len(matches) == 2
|
2019-02-06 12:32:18 +00:00
|
|
|
doc = Doc(en_vocab, words=["aardvark"])
|
2019-01-21 12:23:15 +00:00
|
|
|
matches = matcher(doc)
|
|
|
|
assert len(matches) == 0
|
|
|
|
|
|
|
|
|
2018-09-05 03:53:21 +00:00
|
|
|
@pytest.fixture
|
|
|
|
def text():
|
2018-11-27 00:09:36 +00:00
|
|
|
return "The quick brown fox jumped over the lazy fox"
|
|
|
|
|
2018-09-05 03:53:21 +00:00
|
|
|
|
|
|
|
@pytest.fixture
|
|
|
|
def heads():
|
2018-11-27 00:09:36 +00:00
|
|
|
return [3, 2, 1, 1, 0, -1, 2, 1, -3]
|
|
|
|
|
2018-09-05 03:53:21 +00:00
|
|
|
|
|
|
|
@pytest.fixture
|
|
|
|
def deps():
|
2018-11-27 00:09:36 +00:00
|
|
|
return ["det", "amod", "amod", "nsubj", "prep", "pobj", "det", "amod"]
|
|
|
|
|
2018-09-05 03:53:21 +00:00
|
|
|
|
|
|
|
@pytest.fixture
|
2019-06-16 11:25:32 +00:00
|
|
|
def dependency_matcher(en_vocab):
|
2018-11-27 00:09:36 +00:00
|
|
|
def is_brown_yellow(text):
|
|
|
|
return bool(re.compile(r"brown|yellow|over").match(text))
|
2019-07-11 10:02:25 +00:00
|
|
|
|
2018-09-05 03:53:21 +00:00
|
|
|
IS_BROWN_YELLOW = en_vocab.add_flag(is_brown_yellow)
|
2019-06-16 11:25:32 +00:00
|
|
|
|
2018-09-05 03:53:21 +00:00
|
|
|
pattern1 = [
|
2018-11-27 00:09:36 +00:00
|
|
|
{"SPEC": {"NODE_NAME": "fox"}, "PATTERN": {"ORTH": "fox"}},
|
2019-07-11 10:02:25 +00:00
|
|
|
{
|
|
|
|
"SPEC": {"NODE_NAME": "q", "NBOR_RELOP": ">", "NBOR_NAME": "fox"},
|
|
|
|
"PATTERN": {"ORTH": "quick", "DEP": "amod"},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"SPEC": {"NODE_NAME": "r", "NBOR_RELOP": ">", "NBOR_NAME": "fox"},
|
|
|
|
"PATTERN": {IS_BROWN_YELLOW: True},
|
|
|
|
},
|
2018-09-05 03:53:21 +00:00
|
|
|
]
|
|
|
|
|
|
|
|
pattern2 = [
|
2018-11-27 00:09:36 +00:00
|
|
|
{"SPEC": {"NODE_NAME": "jumped"}, "PATTERN": {"ORTH": "jumped"}},
|
2019-07-11 10:02:25 +00:00
|
|
|
{
|
|
|
|
"SPEC": {"NODE_NAME": "fox", "NBOR_RELOP": ">", "NBOR_NAME": "jumped"},
|
|
|
|
"PATTERN": {"ORTH": "fox"},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"SPEC": {"NODE_NAME": "quick", "NBOR_RELOP": ".", "NBOR_NAME": "jumped"},
|
|
|
|
"PATTERN": {"ORTH": "fox"},
|
|
|
|
},
|
2019-06-16 11:25:32 +00:00
|
|
|
]
|
|
|
|
|
|
|
|
pattern3 = [
|
|
|
|
{"SPEC": {"NODE_NAME": "jumped"}, "PATTERN": {"ORTH": "jumped"}},
|
2019-07-11 10:02:25 +00:00
|
|
|
{
|
|
|
|
"SPEC": {"NODE_NAME": "fox", "NBOR_RELOP": ">", "NBOR_NAME": "jumped"},
|
|
|
|
"PATTERN": {"ORTH": "fox"},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"SPEC": {"NODE_NAME": "r", "NBOR_RELOP": ">>", "NBOR_NAME": "fox"},
|
|
|
|
"PATTERN": {"ORTH": "brown"},
|
|
|
|
},
|
2018-09-05 03:53:21 +00:00
|
|
|
]
|
2019-06-16 11:25:32 +00:00
|
|
|
|
|
|
|
matcher = DependencyMatcher(en_vocab)
|
2018-11-27 00:09:36 +00:00
|
|
|
matcher.add("pattern1", None, pattern1)
|
|
|
|
matcher.add("pattern2", None, pattern2)
|
2019-06-16 11:25:32 +00:00
|
|
|
matcher.add("pattern3", None, pattern3)
|
|
|
|
|
2018-09-05 03:53:21 +00:00
|
|
|
return matcher
|
|
|
|
|
|
|
|
|
2019-06-16 11:25:32 +00:00
|
|
|
def test_dependency_matcher_compile(dependency_matcher):
|
|
|
|
assert len(dependency_matcher) == 3
|
2018-09-05 03:53:21 +00:00
|
|
|
|
2018-11-27 00:09:36 +00:00
|
|
|
|
2019-07-11 10:02:25 +00:00
|
|
|
# def test_dependency_matcher(dependency_matcher, text, heads, deps):
|
|
|
|
# doc = get_doc(dependency_matcher.vocab, text.split(), heads=heads, deps=deps)
|
|
|
|
# matches = dependency_matcher(doc)
|
|
|
|
# assert matches[0][1] == [[3, 1, 2]]
|
|
|
|
# assert matches[1][1] == [[4, 3, 3]]
|
|
|
|
# assert matches[2][1] == [[4, 3, 2]]
|
2019-08-21 18:52:36 +00:00
|
|
|
|
|
|
|
|
|
|
|
def test_attr_pipeline_checks(en_vocab):
|
|
|
|
doc1 = Doc(en_vocab, words=["Test"])
|
|
|
|
doc1.is_parsed = True
|
|
|
|
doc2 = Doc(en_vocab, words=["Test"])
|
|
|
|
doc2.is_tagged = True
|
|
|
|
doc3 = Doc(en_vocab, words=["Test"])
|
|
|
|
# DEP requires is_parsed
|
|
|
|
matcher = Matcher(en_vocab)
|
|
|
|
matcher.add("TEST", None, [{"DEP": "a"}])
|
|
|
|
matcher(doc1)
|
|
|
|
with pytest.raises(ValueError):
|
|
|
|
matcher(doc2)
|
|
|
|
with pytest.raises(ValueError):
|
|
|
|
matcher(doc3)
|
|
|
|
# TAG, POS, LEMMA require is_tagged
|
|
|
|
for attr in ("TAG", "POS", "LEMMA"):
|
|
|
|
matcher = Matcher(en_vocab)
|
|
|
|
matcher.add("TEST", None, [{attr: "a"}])
|
|
|
|
matcher(doc2)
|
|
|
|
with pytest.raises(ValueError):
|
|
|
|
matcher(doc1)
|
|
|
|
with pytest.raises(ValueError):
|
|
|
|
matcher(doc3)
|
|
|
|
# TEXT/ORTH only require tokens
|
|
|
|
matcher = Matcher(en_vocab)
|
|
|
|
matcher.add("TEST", None, [{"ORTH": "a"}])
|
|
|
|
matcher(doc1)
|
|
|
|
matcher(doc2)
|
|
|
|
matcher(doc3)
|
|
|
|
matcher = Matcher(en_vocab)
|
|
|
|
matcher.add("TEST", None, [{"TEXT": "a"}])
|
|
|
|
matcher(doc1)
|
|
|
|
matcher(doc2)
|
|
|
|
matcher(doc3)
|
2019-08-29 10:02:26 +00:00
|
|
|
|
|
|
|
|
|
|
|
@pytest.mark.parametrize(
|
|
|
|
"pattern,text",
|
|
|
|
[
|
|
|
|
([{"IS_ALPHA": True}], "a"),
|
|
|
|
([{"IS_ASCII": True}], "a"),
|
|
|
|
([{"IS_DIGIT": True}], "1"),
|
|
|
|
([{"IS_LOWER": True}], "a"),
|
|
|
|
([{"IS_UPPER": True}], "A"),
|
|
|
|
([{"IS_TITLE": True}], "Aaaa"),
|
|
|
|
([{"IS_PUNCT": True}], "."),
|
|
|
|
([{"IS_SPACE": True}], "\n"),
|
|
|
|
([{"IS_BRACKET": True}], "["),
|
2019-08-31 11:39:06 +00:00
|
|
|
([{"IS_QUOTE": True}], '"'),
|
2019-08-29 10:02:26 +00:00
|
|
|
([{"IS_LEFT_PUNCT": True}], "``"),
|
|
|
|
([{"IS_RIGHT_PUNCT": True}], "''"),
|
|
|
|
([{"IS_STOP": True}], "the"),
|
|
|
|
([{"LIKE_NUM": True}], "1"),
|
|
|
|
([{"LIKE_URL": True}], "http://example.com"),
|
|
|
|
([{"LIKE_EMAIL": True}], "mail@example.com"),
|
|
|
|
],
|
|
|
|
)
|
|
|
|
def test_matcher_schema_token_attributes(en_vocab, pattern, text):
|
|
|
|
matcher = Matcher(en_vocab)
|
2019-08-31 11:39:06 +00:00
|
|
|
doc = Doc(en_vocab, words=text.split(" "))
|
2019-08-29 10:02:26 +00:00
|
|
|
matcher.add("Rule", None, pattern)
|
|
|
|
assert len(matcher) == 1
|
|
|
|
matches = matcher(doc)
|
|
|
|
assert len(matches) == 1
|
2019-09-24 21:06:24 +00:00
|
|
|
|
|
|
|
|
|
|
|
def test_matcher_valid_callback(en_vocab):
|
|
|
|
"""Test that on_match can only be None or callable."""
|
|
|
|
matcher = Matcher(en_vocab)
|
|
|
|
with pytest.raises(ValueError):
|
|
|
|
matcher.add("TEST", [], [{"TEXT": "test"}])
|
|
|
|
matcher(Doc(en_vocab, words=["test"]))
|