2020-09-21 18:43:54 +00:00
|
|
|
from spacy.tokens import Doc
|
2017-01-13 01:01:00 +00:00
|
|
|
|
|
|
|
|
2020-09-21 18:43:54 +00:00
|
|
|
def test_en_parser_noun_chunks_standard(en_vocab):
|
|
|
|
words = ["A", "base", "phrase", "should", "be", "recognized", "."]
|
|
|
|
heads = [2, 2, 5, 5, 5, 5, 5]
|
2020-07-15 12:13:58 +00:00
|
|
|
pos = ["DET", "ADJ", "NOUN", "AUX", "VERB", "VERB", "PUNCT"]
|
2018-11-27 00:09:36 +00:00
|
|
|
deps = ["det", "amod", "nsubjpass", "aux", "auxpass", "ROOT", "punct"]
|
2020-09-21 18:43:54 +00:00
|
|
|
doc = Doc(en_vocab, words=words, pos=pos, deps=deps, heads=heads)
|
2017-01-13 01:01:00 +00:00
|
|
|
chunks = list(doc.noun_chunks)
|
|
|
|
assert len(chunks) == 1
|
|
|
|
assert chunks[0].text_with_ws == "A base phrase "
|
|
|
|
|
|
|
|
|
2020-09-21 18:43:54 +00:00
|
|
|
def test_en_parser_noun_chunks_coordinated(en_vocab):
|
2018-11-27 00:09:36 +00:00
|
|
|
# fmt: off
|
2020-09-21 18:43:54 +00:00
|
|
|
words = ["A", "base", "phrase", "and", "a", "good", "phrase", "are", "often", "the", "same", "."]
|
|
|
|
heads = [2, 2, 7, 2, 6, 6, 2, 7, 7, 10, 7, 7]
|
2020-07-15 12:13:58 +00:00
|
|
|
pos = ["DET", "NOUN", "NOUN", "CCONJ", "DET", "ADJ", "NOUN", "VERB", "ADV", "DET", "ADJ", "PUNCT"]
|
2018-11-27 00:09:36 +00:00
|
|
|
deps = ["det", "compound", "nsubj", "cc", "det", "amod", "conj", "ROOT", "advmod", "det", "attr", "punct"]
|
|
|
|
# fmt: on
|
2020-09-21 18:43:54 +00:00
|
|
|
doc = Doc(en_vocab, words=words, pos=pos, deps=deps, heads=heads)
|
2017-01-13 01:01:00 +00:00
|
|
|
chunks = list(doc.noun_chunks)
|
|
|
|
assert len(chunks) == 2
|
|
|
|
assert chunks[0].text_with_ws == "A base phrase "
|
|
|
|
assert chunks[1].text_with_ws == "a good phrase "
|
|
|
|
|
|
|
|
|
2020-09-21 18:43:54 +00:00
|
|
|
def test_en_parser_noun_chunks_pp_chunks(en_vocab):
|
|
|
|
words = ["A", "phrase", "with", "another", "phrase", "occurs", "."]
|
|
|
|
heads = [1, 5, 1, 4, 2, 5, 5]
|
2020-07-15 12:13:58 +00:00
|
|
|
pos = ["DET", "NOUN", "ADP", "DET", "NOUN", "VERB", "PUNCT"]
|
2018-11-27 00:09:36 +00:00
|
|
|
deps = ["det", "nsubj", "prep", "det", "pobj", "ROOT", "punct"]
|
2020-09-21 18:43:54 +00:00
|
|
|
doc = Doc(en_vocab, words=words, pos=pos, deps=deps, heads=heads)
|
2017-01-13 01:01:00 +00:00
|
|
|
chunks = list(doc.noun_chunks)
|
|
|
|
assert len(chunks) == 2
|
|
|
|
assert chunks[0].text_with_ws == "A phrase "
|
|
|
|
assert chunks[1].text_with_ws == "another phrase "
|
2017-10-14 11:16:21 +00:00
|
|
|
|
|
|
|
|
2020-09-21 18:43:54 +00:00
|
|
|
def test_en_parser_noun_chunks_appositional_modifiers(en_vocab):
|
2018-11-27 00:09:36 +00:00
|
|
|
# fmt: off
|
2020-09-21 18:43:54 +00:00
|
|
|
words = ["Sam", ",", "my", "brother", ",", "arrived", "to", "the", "house", "."]
|
|
|
|
heads = [5, 0, 3, 0, 0, 5, 5, 8, 6, 5]
|
2020-07-15 12:13:58 +00:00
|
|
|
pos = ["PROPN", "PUNCT", "DET", "NOUN", "PUNCT", "VERB", "ADP", "DET", "NOUN", "PUNCT"]
|
2018-11-27 00:09:36 +00:00
|
|
|
deps = ["nsubj", "punct", "poss", "appos", "punct", "ROOT", "prep", "det", "pobj", "punct"]
|
|
|
|
# fmt: on
|
2020-09-21 18:43:54 +00:00
|
|
|
doc = Doc(en_vocab, words=words, pos=pos, deps=deps, heads=heads)
|
2017-10-14 11:16:21 +00:00
|
|
|
chunks = list(doc.noun_chunks)
|
|
|
|
assert len(chunks) == 3
|
|
|
|
assert chunks[0].text_with_ws == "Sam "
|
|
|
|
assert chunks[1].text_with_ws == "my brother "
|
|
|
|
assert chunks[2].text_with_ws == "the house "
|
|
|
|
|
|
|
|
|
2020-09-21 18:43:54 +00:00
|
|
|
def test_en_parser_noun_chunks_dative(en_vocab):
|
|
|
|
words = ["She", "gave", "Bob", "a", "raise", "."]
|
|
|
|
heads = [1, 1, 1, 4, 1, 1]
|
2020-07-15 12:13:58 +00:00
|
|
|
pos = ["PRON", "VERB", "PROPN", "DET", "NOUN", "PUNCT"]
|
2018-11-27 00:09:36 +00:00
|
|
|
deps = ["nsubj", "ROOT", "dative", "det", "dobj", "punct"]
|
2020-09-21 18:43:54 +00:00
|
|
|
doc = Doc(en_vocab, words=words, pos=pos, deps=deps, heads=heads)
|
2017-10-14 11:16:21 +00:00
|
|
|
chunks = list(doc.noun_chunks)
|
|
|
|
assert len(chunks) == 3
|
|
|
|
assert chunks[0].text_with_ws == "She "
|
|
|
|
assert chunks[1].text_with_ws == "Bob "
|
|
|
|
assert chunks[2].text_with_ws == "a raise "
|