2017-01-11 20:30:27 +00:00
|
|
|
# coding: utf-8
|
2016-04-13 13:28:28 +00:00
|
|
|
from __future__ import unicode_literals
|
|
|
|
|
2015-04-28 18:46:29 +00:00
|
|
|
import pytest
|
|
|
|
|
2018-07-24 21:38:44 +00:00
|
|
|
from ..util import get_doc, apply_transition_sequence
|
|
|
|
|
2015-04-28 18:46:29 +00:00
|
|
|
|
2017-01-11 20:30:27 +00:00
|
|
|
def test_parser_root(en_tokenizer):
|
|
|
|
text = "i don't have other assistance"
|
|
|
|
heads = [3, 2, 1, 0, 1, -2]
|
2018-11-27 00:09:36 +00:00
|
|
|
deps = ["nsubj", "aux", "neg", "ROOT", "amod", "dobj"]
|
2017-01-11 20:30:27 +00:00
|
|
|
tokens = en_tokenizer(text)
|
2018-07-24 21:38:44 +00:00
|
|
|
doc = get_doc(tokens.vocab, words=[t.text for t in tokens], heads=heads, deps=deps)
|
2017-01-11 20:30:27 +00:00
|
|
|
for t in doc:
|
|
|
|
assert t.dep != 0, t.text
|
|
|
|
|
2016-04-13 13:28:28 +00:00
|
|
|
|
2017-05-16 14:17:30 +00:00
|
|
|
@pytest.mark.xfail
|
2018-11-27 00:09:36 +00:00
|
|
|
@pytest.mark.parametrize("text", ["Hello"])
|
2017-01-11 20:30:27 +00:00
|
|
|
def test_parser_parse_one_word_sentence(en_tokenizer, en_parser, text):
|
|
|
|
tokens = en_tokenizer(text)
|
2018-11-27 00:09:36 +00:00
|
|
|
doc = get_doc(
|
|
|
|
tokens.vocab, words=[t.text for t in tokens], heads=[0], deps=["ROOT"]
|
|
|
|
)
|
2016-04-13 13:28:28 +00:00
|
|
|
|
2016-05-02 14:08:11 +00:00
|
|
|
assert len(doc) == 1
|
2018-11-30 16:43:08 +00:00
|
|
|
with en_parser.step_through(doc) as _: # noqa: F841
|
2016-05-02 14:08:11 +00:00
|
|
|
pass
|
|
|
|
assert doc[0].dep != 0
|
2016-04-25 10:01:19 +00:00
|
|
|
|
|
|
|
|
2017-05-16 14:17:30 +00:00
|
|
|
@pytest.mark.xfail
|
2017-01-12 00:07:29 +00:00
|
|
|
def test_parser_initial(en_tokenizer, en_parser):
|
|
|
|
text = "I ate the pizza with anchovies."
|
2018-11-30 16:43:08 +00:00
|
|
|
# heads = [1, 0, 1, -2, -3, -1, -5]
|
2018-11-27 00:09:36 +00:00
|
|
|
transition = ["L-nsubj", "S", "L-det"]
|
2017-01-12 00:07:29 +00:00
|
|
|
tokens = en_tokenizer(text)
|
|
|
|
apply_transition_sequence(en_parser, tokens, transition)
|
|
|
|
assert tokens[0].head.i == 1
|
|
|
|
assert tokens[1].head.i == 1
|
|
|
|
assert tokens[2].head.i == 3
|
|
|
|
assert tokens[3].head.i == 3
|
|
|
|
|
|
|
|
|
|
|
|
def test_parser_parse_subtrees(en_tokenizer, en_parser):
|
|
|
|
text = "The four wheels on the bus turned quickly"
|
|
|
|
heads = [2, 1, 4, -1, 1, -2, 0, -1]
|
|
|
|
tokens = en_tokenizer(text)
|
2018-07-24 21:38:44 +00:00
|
|
|
doc = get_doc(tokens.vocab, words=[t.text for t in tokens], heads=heads)
|
2017-01-12 00:07:29 +00:00
|
|
|
assert len(list(doc[2].lefts)) == 2
|
|
|
|
assert len(list(doc[2].rights)) == 1
|
|
|
|
assert len(list(doc[2].children)) == 3
|
|
|
|
assert len(list(doc[5].lefts)) == 1
|
|
|
|
assert len(list(doc[5].rights)) == 0
|
|
|
|
assert len(list(doc[5].children)) == 1
|
|
|
|
assert len(list(doc[2].subtree)) == 6
|
|
|
|
|
|
|
|
|
|
|
|
def test_parser_merge_pp(en_tokenizer):
|
|
|
|
text = "A phrase with another phrase occurs"
|
|
|
|
heads = [1, 4, -1, 1, -2, 0]
|
2018-11-27 00:09:36 +00:00
|
|
|
deps = ["det", "nsubj", "prep", "det", "pobj", "ROOT"]
|
|
|
|
tags = ["DT", "NN", "IN", "DT", "NN", "VBZ"]
|
2017-01-12 00:07:29 +00:00
|
|
|
tokens = en_tokenizer(text)
|
2018-11-27 00:09:36 +00:00
|
|
|
doc = get_doc(
|
|
|
|
tokens.vocab, words=[t.text for t in tokens], deps=deps, heads=heads, tags=tags
|
|
|
|
)
|
2019-02-15 09:29:44 +00:00
|
|
|
with doc.retokenize() as retokenizer:
|
|
|
|
for np in doc.noun_chunks:
|
|
|
|
retokenizer.merge(np, attrs={"lemma": np.lemma_})
|
2018-11-27 00:09:36 +00:00
|
|
|
assert doc[0].text == "A phrase"
|
|
|
|
assert doc[1].text == "with"
|
|
|
|
assert doc[2].text == "another phrase"
|
|
|
|
assert doc[3].text == "occurs"
|
2017-01-12 00:07:29 +00:00
|
|
|
|
|
|
|
|
2017-05-16 14:17:30 +00:00
|
|
|
@pytest.mark.xfail
|
2017-01-11 20:30:27 +00:00
|
|
|
def test_parser_arc_eager_finalize_state(en_tokenizer, en_parser):
|
|
|
|
text = "a b c d e"
|
2016-04-25 10:01:19 +00:00
|
|
|
|
2016-05-02 13:25:27 +00:00
|
|
|
# right branching
|
2018-11-27 00:09:36 +00:00
|
|
|
transition = ["R-nsubj", "D", "R-nsubj", "R-nsubj", "D", "R-ROOT"]
|
2017-01-11 20:30:27 +00:00
|
|
|
tokens = en_tokenizer(text)
|
|
|
|
apply_transition_sequence(en_parser, tokens, transition)
|
|
|
|
|
|
|
|
assert tokens[0].n_lefts == 0
|
|
|
|
assert tokens[0].n_rights == 2
|
|
|
|
assert tokens[0].left_edge.i == 0
|
|
|
|
assert tokens[0].right_edge.i == 4
|
|
|
|
assert tokens[0].head.i == 0
|
|
|
|
|
|
|
|
assert tokens[1].n_lefts == 0
|
|
|
|
assert tokens[1].n_rights == 0
|
|
|
|
assert tokens[1].left_edge.i == 1
|
|
|
|
assert tokens[1].right_edge.i == 1
|
|
|
|
assert tokens[1].head.i == 0
|
|
|
|
|
|
|
|
assert tokens[2].n_lefts == 0
|
|
|
|
assert tokens[2].n_rights == 2
|
|
|
|
assert tokens[2].left_edge.i == 2
|
|
|
|
assert tokens[2].right_edge.i == 4
|
|
|
|
assert tokens[2].head.i == 0
|
|
|
|
|
|
|
|
assert tokens[3].n_lefts == 0
|
|
|
|
assert tokens[3].n_rights == 0
|
|
|
|
assert tokens[3].left_edge.i == 3
|
|
|
|
assert tokens[3].right_edge.i == 3
|
|
|
|
assert tokens[3].head.i == 2
|
|
|
|
|
|
|
|
assert tokens[4].n_lefts == 0
|
|
|
|
assert tokens[4].n_rights == 0
|
|
|
|
assert tokens[4].left_edge.i == 4
|
|
|
|
assert tokens[4].right_edge.i == 4
|
|
|
|
assert tokens[4].head.i == 2
|
2016-05-02 13:25:27 +00:00
|
|
|
|
|
|
|
# left branching
|
2018-11-27 00:09:36 +00:00
|
|
|
transition = ["S", "S", "S", "L-nsubj", "L-nsubj", "L-nsubj", "L-nsubj"]
|
2017-01-11 20:30:27 +00:00
|
|
|
tokens = en_tokenizer(text)
|
|
|
|
apply_transition_sequence(en_parser, tokens, transition)
|
|
|
|
|
|
|
|
assert tokens[0].n_lefts == 0
|
|
|
|
assert tokens[0].n_rights == 0
|
|
|
|
assert tokens[0].left_edge.i == 0
|
|
|
|
assert tokens[0].right_edge.i == 0
|
|
|
|
assert tokens[0].head.i == 4
|
|
|
|
|
|
|
|
assert tokens[1].n_lefts == 0
|
|
|
|
assert tokens[1].n_rights == 0
|
|
|
|
assert tokens[1].left_edge.i == 1
|
|
|
|
assert tokens[1].right_edge.i == 1
|
|
|
|
assert tokens[1].head.i == 4
|
|
|
|
|
|
|
|
assert tokens[2].n_lefts == 0
|
|
|
|
assert tokens[2].n_rights == 0
|
|
|
|
assert tokens[2].left_edge.i == 2
|
|
|
|
assert tokens[2].right_edge.i == 2
|
|
|
|
assert tokens[2].head.i == 4
|
|
|
|
|
|
|
|
assert tokens[3].n_lefts == 0
|
|
|
|
assert tokens[3].n_rights == 0
|
|
|
|
assert tokens[3].left_edge.i == 3
|
|
|
|
assert tokens[3].right_edge.i == 3
|
|
|
|
assert tokens[3].head.i == 4
|
|
|
|
|
|
|
|
assert tokens[4].n_lefts == 4
|
|
|
|
assert tokens[4].n_rights == 0
|
|
|
|
assert tokens[4].left_edge.i == 0
|
|
|
|
assert tokens[4].right_edge.i == 4
|
|
|
|
assert tokens[4].head.i == 4
|