From 6c7301cc6d204f6f23e78e30eca987655242a1a8 Mon Sep 17 00:00:00 2001 From: Wolfgang Seeker Date: Thu, 21 Apr 2016 16:50:53 +0200 Subject: [PATCH 1/5] the parser now introduces sentence boundaries properly when predicting dependents with root labels --- spacy/syntax/arc_eager.pyx | 1 + spacy/tests/parser/test_sbd.py | 74 ++++++++++++++++++++++++++++++++-- 2 files changed, 72 insertions(+), 3 deletions(-) diff --git a/spacy/syntax/arc_eager.pyx b/spacy/syntax/arc_eager.pyx index 1b76f724c..5cc04c96c 100644 --- a/spacy/syntax/arc_eager.pyx +++ b/spacy/syntax/arc_eager.pyx @@ -447,6 +447,7 @@ cdef class ArcEager(TransitionSystem): # note that this can create non-projective trees if there are arcs # between nodes on both sides of the new root node st._sent[i].head = 0 + st._sent[st._sent[i].l_edge].sent_start = True cdef int set_valid(self, int* output, const StateC* st) nogil: cdef bint[N_MOVES] is_valid diff --git a/spacy/tests/parser/test_sbd.py b/spacy/tests/parser/test_sbd.py index 771e2401f..247c14a34 100644 --- a/spacy/tests/parser/test_sbd.py +++ b/spacy/tests/parser/test_sbd.py @@ -1,7 +1,7 @@ from __future__ import unicode_literals import pytest - +from spacy.tokens import Doc @pytest.mark.models @@ -42,7 +42,7 @@ def test_single_question(EN): @pytest.mark.models def test_sentence_breaks_no_space(EN): - doc = EN.tokenizer.tokens_from_list('This is a sentence . This is another one .'.split(' ')) + doc = EN.tokenizer.tokens_from_list(u'This is a sentence . This is another one .'.split(' ')) EN.tagger(doc) with EN.parser.step_through(doc) as stepwise: # stack empty, automatic Shift (This) @@ -83,7 +83,7 @@ def test_sentence_breaks_no_space(EN): @pytest.mark.models def test_sentence_breaks_with_space(EN): - doc = EN.tokenizer.tokens_from_list('\t This is \n a sentence \n \n . \n \t \n This is another \t one .'.split(' ')) + doc = EN.tokenizer.tokens_from_list(u'\t This is \n a sentence \n \n . \n \t \n This is another \t one .'.split(' ')) EN.tagger(doc) with EN.parser.step_through(doc) as stepwise: # stack empty, automatic Shift (This) @@ -120,3 +120,71 @@ def test_sentence_breaks_with_space(EN): for tok in doc: assert tok.dep != 0 or tok.is_space assert [ tok.head.i for tok in doc ] == [1,2,2,2,5,2,5,5,2,8,8,8,13,13,16,14,13,13] + + + +@pytest.fixture +@pytest.mark.models +def example(EN): + def apply_transition_sequence(model, doc, sequence): + with model.parser.step_through(doc) as stepwise: + for transition in sequence: + stepwise.transition(transition) + doc = EN.tokenizer.tokens_from_list(u"I bought a couch from IKEA. It was n't very comfortable .".split(' ')) + EN.tagger(doc) + apply_transition_sequence(EN, doc, ['L-nsubj','S','L-det','R-dobj','D','R-prep','R-pobj','D','D','S','L-nsubj','R-ROOT','R-neg','D','S','L-advmod','R-acomp','D','R-punct']) + return doc + + +def test_sbd_for_root_label_dependents(example): + """ + make sure that the parser properly introduces a sentence boundary without + the break transition by checking for dependents with the root label + """ + + assert example[1].head.i == 1 + assert example[7].head.i == 7 + + sents = list(example.sents) + assert len(sents) == 2 + assert sents[1][0].orth_ == u'It' + + + +@pytest.mark.models +def test_sbd_serialization(EN, example): + """ + test that before and after serialization, the sentence boundaries are the same even + if the parser predicted two roots for the sentence that were made into two sentences + after parsing by arc_eager.finalize() + + This is actually an interaction between the sentence boundary prediction and doc.from_array + The process is the following: if the parser doesn't predict a sentence boundary but attaches + a word with the ROOT label, the second root node is made root of its own sentence after parsing. + During serialization, sentence boundary information is lost and reintroduced when the code + is deserialized by introducing sentence starts at every left-edge of every root node. + + BUG that is tested here: So far, the parser wasn't introducing a sentence start when + it introduced the second root node. + """ + + example_serialized = Doc(EN.vocab).from_bytes(example.to_bytes()) + + assert example.to_bytes() == example_serialized.to_bytes() + assert [s.text for s in example.sents] == [s.text for s in example_serialized.sents] + + + + + + + + + + + + + + + + From 736ffcb9a2cee95ddda1eb690e6884e17e710b3e Mon Sep 17 00:00:00 2001 From: Wolfgang Seeker Date: Thu, 21 Apr 2016 16:55:55 +0200 Subject: [PATCH 2/5] remove whitespace --- spacy/tests/parser/test_sbd.py | 16 ---------------- 1 file changed, 16 deletions(-) diff --git a/spacy/tests/parser/test_sbd.py b/spacy/tests/parser/test_sbd.py index 247c14a34..37f9f4061 100644 --- a/spacy/tests/parser/test_sbd.py +++ b/spacy/tests/parser/test_sbd.py @@ -172,19 +172,3 @@ def test_sbd_serialization(EN, example): assert example.to_bytes() == example_serialized.to_bytes() assert [s.text for s in example.sents] == [s.text for s in example_serialized.sents] - - - - - - - - - - - - - - - - From b6477fc4f4cef6d121305e6c6166e4a5301a03f9 Mon Sep 17 00:00:00 2001 From: Wolfgang Seeker Date: Thu, 21 Apr 2016 17:15:10 +0200 Subject: [PATCH 3/5] adjusted tests to Travis Setup --- spacy/tests/parser/test_sbd.py | 29 +++++++++++++++-------------- 1 file changed, 15 insertions(+), 14 deletions(-) diff --git a/spacy/tests/parser/test_sbd.py b/spacy/tests/parser/test_sbd.py index 37f9f4061..86bbe88d0 100644 --- a/spacy/tests/parser/test_sbd.py +++ b/spacy/tests/parser/test_sbd.py @@ -123,24 +123,21 @@ def test_sentence_breaks_with_space(EN): -@pytest.fixture +def apply_transition_sequence(model, doc, sequence): + with model.parser.step_through(doc) as stepwise: + for transition in sequence: + stepwise.transition(transition) + + @pytest.mark.models -def example(EN): - def apply_transition_sequence(model, doc, sequence): - with model.parser.step_through(doc) as stepwise: - for transition in sequence: - stepwise.transition(transition) - doc = EN.tokenizer.tokens_from_list(u"I bought a couch from IKEA. It was n't very comfortable .".split(' ')) - EN.tagger(doc) - apply_transition_sequence(EN, doc, ['L-nsubj','S','L-det','R-dobj','D','R-prep','R-pobj','D','D','S','L-nsubj','R-ROOT','R-neg','D','S','L-advmod','R-acomp','D','R-punct']) - return doc - - -def test_sbd_for_root_label_dependents(example): +def test_sbd_for_root_label_dependents(EN): """ make sure that the parser properly introduces a sentence boundary without the break transition by checking for dependents with the root label """ + example = EN.tokenizer.tokens_from_list(u"I bought a couch from IKEA. It was n't very comfortable .".split(' ')) + EN.tagger(example) + apply_transition_sequence(EN, example, ['L-nsubj','S','L-det','R-dobj','D','R-prep','R-pobj','D','D','S','L-nsubj','R-ROOT','R-neg','D','S','L-advmod','R-acomp','D','R-punct']) assert example[1].head.i == 1 assert example[7].head.i == 7 @@ -152,7 +149,7 @@ def test_sbd_for_root_label_dependents(example): @pytest.mark.models -def test_sbd_serialization(EN, example): +def test_sbd_serialization(EN): """ test that before and after serialization, the sentence boundaries are the same even if the parser predicted two roots for the sentence that were made into two sentences @@ -168,6 +165,10 @@ def test_sbd_serialization(EN, example): it introduced the second root node. """ + example = EN.tokenizer.tokens_from_list(u"I bought a couch from IKEA. It was n't very comfortable .".split(' ')) + EN.tagger(example) + apply_transition_sequence(EN, example, ['L-nsubj','S','L-det','R-dobj','D','R-prep','R-pobj','D','D','S','L-nsubj','R-ROOT','R-neg','D','S','L-advmod','R-acomp','D','R-punct']) + example_serialized = Doc(EN.vocab).from_bytes(example.to_bytes()) assert example.to_bytes() == example_serialized.to_bytes() From f57f843e85ee3a93fd69849a0880a07c4be22801 Mon Sep 17 00:00:00 2001 From: Wolfgang Seeker Date: Mon, 25 Apr 2016 12:01:19 +0200 Subject: [PATCH 4/5] fix bug in updating tree structure when introducing additional roots --- spacy/syntax/arc_eager.pyx | 39 +++++++++------- spacy/tests/parser/test_parse.py | 79 ++++++++++++++++++++++++++++++++ spacy/tests/parser/test_sbd.py | 7 +-- 3 files changed, 106 insertions(+), 19 deletions(-) diff --git a/spacy/syntax/arc_eager.pyx b/spacy/syntax/arc_eager.pyx index 5cc04c96c..e86b99b13 100644 --- a/spacy/syntax/arc_eager.pyx +++ b/spacy/syntax/arc_eager.pyx @@ -399,31 +399,34 @@ cdef class ArcEager(TransitionSystem): cdef TokenC* orig_head cdef int new_edge cdef int child_i - cdef TokenC* head_i + cdef int head_i for i in range(st.length): if st._sent[i].head == 0 and st._sent[i].dep == 0: st._sent[i].dep = self.root_label # If we're not using the Break transition, we segment via root-labelled # arcs between the root words. elif USE_ROOT_ARC_SEGMENT and st._sent[i].dep == self.root_label: - orig_head_id = st._sent[i].head + orig_head_id = i + st._sent[i].head orig_head = &st._sent[orig_head_id] if i < orig_head_id: # i is left dependent orig_head.l_kids -= 1 if i == orig_head.l_edge: # i is left-most child # find the second left-most child and make it the new l_edge new_edge = orig_head_id - child_i = i + child_i = i+1 while child_i < orig_head_id: - if st._sent[child_i].head == orig_head_id: + if child_i + st._sent[child_i].head == orig_head_id: new_edge = child_i + break child_i += 1 # then walk up the path to root and update the l_edges of all ancestors # the logic here works because the tree is guaranteed to be projective - head_i = &st._sent[orig_head.head] - while head_i.l_edge == orig_head.l_edge: - head_i.l_edge = new_edge - head_i = &st._sent[head_i.head] + head_i = orig_head_id + orig_head.head + while st._sent[head_i].l_edge == orig_head.l_edge: + st._sent[head_i].l_edge = new_edge + if st._sent[head_i].head == 0: + break + head_i += st._sent[head_i].head orig_head.l_edge = new_edge elif i > orig_head_id: # i is right dependent @@ -431,24 +434,28 @@ cdef class ArcEager(TransitionSystem): if i == orig_head.r_edge: # find the second right-most child and make it the new r_edge new_edge = orig_head_id - child_i = i + child_i = i-1 while child_i > orig_head_id: - if st._sent[child_i].head == orig_head_id: + if child_i + st._sent[child_i].head == orig_head_id: new_edge = child_i + break child_i -= 1 - # then walk up the path to root and update the l_edges of all ancestors + # then walk up the path to root and update the r_edges of all ancestors # the logic here works because the tree is guaranteed to be projective - head_i = &st._sent[orig_head.head] - while head_i.r_edge == orig_head.r_edge: - head_i.r_edge = new_edge - head_i = &st._sent[head_i.head] + head_i = orig_head_id + orig_head.head + while st._sent[head_i].r_edge == orig_head.r_edge: + st._sent[head_i].r_edge = new_edge + if st._sent[head_i].head == 0: + break + head_i += st._sent[head_i].head orig_head.r_edge = new_edge - # note that this can create non-projective trees if there are arcs + # note that this may create non-projective trees if there are arcs # between nodes on both sides of the new root node st._sent[i].head = 0 st._sent[st._sent[i].l_edge].sent_start = True + cdef int set_valid(self, int* output, const StateC* st) nogil: cdef bint[N_MOVES] is_valid is_valid[SHIFT] = Shift.is_valid(st, -1) diff --git a/spacy/tests/parser/test_parse.py b/spacy/tests/parser/test_parse.py index d4b633d0d..7972612f5 100644 --- a/spacy/tests/parser/test_parse.py +++ b/spacy/tests/parser/test_parse.py @@ -19,3 +19,82 @@ def test_one_word_sentence(EN): with EN.parser.step_through(doc) as _: pass assert doc[0].dep != 0 + + +def apply_transition_sequence(model, doc, sequence): + with model.parser.step_through(doc) as stepwise: + for transition in sequence: + stepwise.transition(transition) + + +@pytest.mark.models +def test_arc_eager_finalize_state(EN): + # right branching + example = EN.tokenizer.tokens_from_list(u"a b c d e".split(' ')) + apply_transition_sequence(EN, example, ['R-nsubj','D','R-nsubj','R-nsubj','D','R-ROOT']) + print [ '%s/%s' % (t.dep_,t.head.i) for t in example ] + + assert example[0].n_lefts == 0 + assert example[0].n_rights == 2 + assert example[0].left_edge.i == 0 + assert example[0].right_edge.i == 3 + assert example[0].head.i == 0 + + assert example[1].n_lefts == 0 + assert example[1].n_rights == 0 + assert example[1].left_edge.i == 1 + assert example[1].right_edge.i == 1 + assert example[1].head.i == 0 + + assert example[2].n_lefts == 0 + assert example[2].n_rights == 1 + assert example[2].left_edge.i == 2 + assert example[2].right_edge.i == 3 + assert example[2].head.i == 0 + + assert example[3].n_lefts == 0 + assert example[3].n_rights == 0 + assert example[3].left_edge.i == 3 + assert example[3].right_edge.i == 3 + assert example[3].head.i == 2 + + assert example[4].n_lefts == 0 + assert example[4].n_rights == 0 + assert example[4].left_edge.i == 4 + assert example[4].right_edge.i == 4 + assert example[4].head.i == 4 + + # left branching + example = EN.tokenizer.tokens_from_list(u"a b c d e".split(' ')) + apply_transition_sequence(EN, example, ['S','L-nsubj','L-ROOT','S','L-nsubj','L-nsubj']) + print [ '%s/%s' % (t.dep_,t.head.i) for t in example ] + + assert example[0].n_lefts == 0 + assert example[0].n_rights == 0 + assert example[0].left_edge.i == 0 + assert example[0].right_edge.i == 0 + assert example[0].head.i == 0 + + assert example[1].n_lefts == 0 + assert example[1].n_rights == 0 + assert example[1].left_edge.i == 1 + assert example[1].right_edge.i == 1 + assert example[1].head.i == 2 + + assert example[2].n_lefts == 1 + assert example[2].n_rights == 0 + assert example[2].left_edge.i == 1 + assert example[2].right_edge.i == 2 + assert example[2].head.i == 4 + + assert example[3].n_lefts == 0 + assert example[3].n_rights == 0 + assert example[3].left_edge.i == 3 + assert example[3].right_edge.i == 3 + assert example[3].head.i == 4 + + assert example[4].n_lefts == 2 + assert example[4].n_rights == 0 + assert example[4].left_edge.i == 1 + assert example[4].right_edge.i == 4 + assert example[4].head.i == 4 diff --git a/spacy/tests/parser/test_sbd.py b/spacy/tests/parser/test_sbd.py index 86bbe88d0..0cc91a3a1 100644 --- a/spacy/tests/parser/test_sbd.py +++ b/spacy/tests/parser/test_sbd.py @@ -135,12 +135,13 @@ def test_sbd_for_root_label_dependents(EN): make sure that the parser properly introduces a sentence boundary without the break transition by checking for dependents with the root label """ - example = EN.tokenizer.tokens_from_list(u"I bought a couch from IKEA. It was n't very comfortable .".split(' ')) + example = EN.tokenizer.tokens_from_list(u"I saw a firefly It glowed".split(' ')) EN.tagger(example) - apply_transition_sequence(EN, example, ['L-nsubj','S','L-det','R-dobj','D','R-prep','R-pobj','D','D','S','L-nsubj','R-ROOT','R-neg','D','S','L-advmod','R-acomp','D','R-punct']) + apply_transition_sequence(EN, example, ['L-nsubj','S','L-det','R-dobj','D','S','L-nsubj','R-ROOT']) + print ['%s/%s' % (t.dep_,t.head.i) for t in example] assert example[1].head.i == 1 - assert example[7].head.i == 7 + assert example[5].head.i == 5 sents = list(example.sents) assert len(sents) == 2 From 1003e7ccec483ed7d9ad3995224af89350a7bbae Mon Sep 17 00:00:00 2001 From: Wolfgang Seeker Date: Mon, 25 Apr 2016 12:12:40 +0200 Subject: [PATCH 5/5] remove debug output from tests --- spacy/tests/parser/test_parse.py | 2 -- spacy/tests/parser/test_sbd.py | 1 - 2 files changed, 3 deletions(-) diff --git a/spacy/tests/parser/test_parse.py b/spacy/tests/parser/test_parse.py index 7972612f5..2590ad13d 100644 --- a/spacy/tests/parser/test_parse.py +++ b/spacy/tests/parser/test_parse.py @@ -32,7 +32,6 @@ def test_arc_eager_finalize_state(EN): # right branching example = EN.tokenizer.tokens_from_list(u"a b c d e".split(' ')) apply_transition_sequence(EN, example, ['R-nsubj','D','R-nsubj','R-nsubj','D','R-ROOT']) - print [ '%s/%s' % (t.dep_,t.head.i) for t in example ] assert example[0].n_lefts == 0 assert example[0].n_rights == 2 @@ -67,7 +66,6 @@ def test_arc_eager_finalize_state(EN): # left branching example = EN.tokenizer.tokens_from_list(u"a b c d e".split(' ')) apply_transition_sequence(EN, example, ['S','L-nsubj','L-ROOT','S','L-nsubj','L-nsubj']) - print [ '%s/%s' % (t.dep_,t.head.i) for t in example ] assert example[0].n_lefts == 0 assert example[0].n_rights == 0 diff --git a/spacy/tests/parser/test_sbd.py b/spacy/tests/parser/test_sbd.py index 0cc91a3a1..d72cef32d 100644 --- a/spacy/tests/parser/test_sbd.py +++ b/spacy/tests/parser/test_sbd.py @@ -138,7 +138,6 @@ def test_sbd_for_root_label_dependents(EN): example = EN.tokenizer.tokens_from_list(u"I saw a firefly It glowed".split(' ')) EN.tagger(example) apply_transition_sequence(EN, example, ['L-nsubj','S','L-det','R-dobj','D','S','L-nsubj','R-ROOT']) - print ['%s/%s' % (t.dep_,t.head.i) for t in example] assert example[1].head.i == 1 assert example[5].head.i == 5