From 9ce059dd067ecc3f097d04023e3cfa0d70d35bb8 Mon Sep 17 00:00:00 2001 From: Vishnu Priya VR Date: Thu, 14 May 2020 16:28:06 +0530 Subject: [PATCH] Limiting noun_chunks for specific languages (#5396) * Limiting noun_chunks for specific langauges * Limiting noun_chunks for specific languages Contributor Agreement * Addressing review comments * Removed unused fixtures and imports * Add fa_tokenizer in test suite * Use fa_tokenizer in test * Undo extraneous reformatting Co-authored-by: adrianeboyd --- .github/contributors/vishnupriyavr.md | 106 ++++++++++++++++++++++++ spacy/lang/de/syntax_iterators.py | 5 ++ spacy/lang/el/syntax_iterators.py | 5 ++ spacy/lang/en/syntax_iterators.py | 5 ++ spacy/lang/es/syntax_iterators.py | 5 ++ spacy/lang/fa/syntax_iterators.py | 5 ++ spacy/lang/fr/syntax_iterators.py | 5 ++ spacy/lang/id/syntax_iterators.py | 5 ++ spacy/lang/nb/syntax_iterators.py | 5 ++ spacy/lang/sv/syntax_iterators.py | 5 ++ spacy/tests/conftest.py | 5 ++ spacy/tests/lang/de/test_noun_chunks.py | 16 ++++ spacy/tests/lang/el/test_noun_chunks.py | 16 ++++ spacy/tests/lang/en/test_noun_chunks.py | 15 ++++ spacy/tests/lang/es/test_noun_chunks.py | 16 ++++ spacy/tests/lang/fa/test_noun_chunks.py | 17 ++++ spacy/tests/lang/fr/test_noun_chunks.py | 16 ++++ spacy/tests/lang/id/test_noun_chunks.py | 16 ++++ spacy/tests/lang/nb/test_noun_chunks.py | 16 ++++ spacy/tests/lang/sv/test_noun_chunks.py | 13 +++ spacy/tokens/doc.pyx | 3 +- 21 files changed, 298 insertions(+), 2 deletions(-) create mode 100644 .github/contributors/vishnupriyavr.md create mode 100644 spacy/tests/lang/de/test_noun_chunks.py create mode 100644 spacy/tests/lang/el/test_noun_chunks.py create mode 100644 spacy/tests/lang/es/test_noun_chunks.py create mode 100644 spacy/tests/lang/fa/test_noun_chunks.py create mode 100644 spacy/tests/lang/fr/test_noun_chunks.py create mode 100644 spacy/tests/lang/id/test_noun_chunks.py create mode 100644 spacy/tests/lang/nb/test_noun_chunks.py diff --git a/.github/contributors/vishnupriyavr.md b/.github/contributors/vishnupriyavr.md new file mode 100644 index 000000000..73657a772 --- /dev/null +++ b/.github/contributors/vishnupriyavr.md @@ -0,0 +1,106 @@ +# spaCy contributor agreement + +This spaCy Contributor Agreement (**"SCA"**) is based on the +[Oracle Contributor Agreement](http://www.oracle.com/technetwork/oca-405177.pdf). +The SCA applies to any contribution that you make to any product or project +managed by us (the **"project"**), and sets out the intellectual property rights +you grant to us in the contributed materials. The term **"us"** shall mean +[ExplosionAI UG (haftungsbeschränkt)](https://explosion.ai/legal). The term +**"you"** shall mean the person or entity identified below. + +If you agree to be bound by these terms, fill in the information requested +below and include the filled-in version with your first pull request, under the +folder [`.github/contributors/`](/.github/contributors/). The name of the file +should be your GitHub username, with the extension `.md`. For example, the user +example_user would create the file `.github/contributors/example_user.md`. + +Read this agreement carefully before signing. These terms and conditions +constitute a binding legal agreement. + +## Contributor Agreement + +1. The term "contribution" or "contributed materials" means any source code, +object code, patch, tool, sample, graphic, specification, manual, +documentation, or any other material posted or submitted by you to the project. + +2. With respect to any worldwide copyrights, or copyright applications and +registrations, in your contribution: + + * you hereby assign to us joint ownership, and to the extent that such + assignment is or becomes invalid, ineffective or unenforceable, you hereby + grant to us a perpetual, irrevocable, non-exclusive, worldwide, no-charge, + royalty-free, unrestricted license to exercise all rights under those + copyrights. This includes, at our option, the right to sublicense these same + rights to third parties through multiple levels of sublicensees or other + licensing arrangements; + + * you agree that each of us can do all things in relation to your + contribution as if each of us were the sole owners, and if one of us makes + a derivative work of your contribution, the one who makes the derivative + work (or has it made will be the sole owner of that derivative work; + + * you agree that you will not assert any moral rights in your contribution + against us, our licensees or transferees; + + * you agree that we may register a copyright in your contribution and + exercise all ownership rights associated with it; and + + * you agree that neither of us has any duty to consult with, obtain the + consent of, pay or render an accounting to the other for any use or + distribution of your contribution. + +3. With respect to any patents you own, or that you can license without payment +to any third party, you hereby grant to us a perpetual, irrevocable, +non-exclusive, worldwide, no-charge, royalty-free license to: + + * make, have made, use, sell, offer to sell, import, and otherwise transfer + your contribution in whole or in part, alone or in combination with or + included in any product, work or materials arising out of the project to + which your contribution was submitted, and + + * at our option, to sublicense these same rights to third parties through + multiple levels of sublicensees or other licensing arrangements. + +4. Except as set out above, you keep all right, title, and interest in your +contribution. The rights that you grant to us under these terms are effective +on the date you first submitted a contribution to us, even if your submission +took place before the date you sign these terms. + +5. You covenant, represent, warrant and agree that: + + * Each contribution that you submit is and shall be an original work of + authorship and you can legally grant the rights set out in this SCA; + + * to the best of your knowledge, each contribution will not violate any + third party's copyrights, trademarks, patents, or other intellectual + property rights; and + + * each contribution shall be in compliance with U.S. export control laws and + other applicable export and import laws. You agree to notify us if you + become aware of any circumstance which would make any of the foregoing + representations inaccurate in any respect. We may publicly disclose your + participation in the project, including the fact that you have signed the SCA. + +6. This SCA is governed by the laws of the State of California and applicable +U.S. Federal law. Any choice of law rules will not apply. + +7. Please place an “x” on one of the applicable statement below. Please do NOT +mark both statements: + + * [x] I am signing on behalf of myself as an individual and no other person + or entity, including my employer, has or will have rights with respect to my + contributions. + + * [ ] I am signing on behalf of my employer or a legal entity and I have the + actual authority to contractually bind that entity. + +## Contributor Details + +| Field | Entry | +|------------------------------- | ------------------------ | +| Name | Vishnu Priya VR | +| Company name (if applicable) | Uniphore | +| Title or role (if applicable) | NLP/AI Engineer | +| Date | 2020-05-03 | +| GitHub username | vishnupriyavr | +| Website (optional) | | diff --git a/spacy/lang/de/syntax_iterators.py b/spacy/lang/de/syntax_iterators.py index 89d784a0c..13bb857ca 100644 --- a/spacy/lang/de/syntax_iterators.py +++ b/spacy/lang/de/syntax_iterators.py @@ -2,6 +2,7 @@ from __future__ import unicode_literals from ...symbols import NOUN, PROPN, PRON +from ...errors import Errors def noun_chunks(obj): @@ -28,6 +29,10 @@ def noun_chunks(obj): "app", ] doc = obj.doc # Ensure works on both Doc and Span. + + if not doc.is_parsed: + raise ValueError(Errors.E029) + np_label = doc.vocab.strings.add("NP") np_deps = set(doc.vocab.strings.add(label) for label in labels) close_app = doc.vocab.strings.add("nk") diff --git a/spacy/lang/el/syntax_iterators.py b/spacy/lang/el/syntax_iterators.py index 5dfd44f07..f02619ac9 100644 --- a/spacy/lang/el/syntax_iterators.py +++ b/spacy/lang/el/syntax_iterators.py @@ -2,6 +2,7 @@ from __future__ import unicode_literals from ...symbols import NOUN, PROPN, PRON +from ...errors import Errors def noun_chunks(obj): @@ -14,6 +15,10 @@ def noun_chunks(obj): # Further improvement of the models will eliminate the need for this tag. labels = ["nsubj", "obj", "iobj", "appos", "ROOT", "obl"] doc = obj.doc # Ensure works on both Doc and Span. + + if not doc.is_parsed: + raise ValueError(Errors.E029) + np_deps = [doc.vocab.strings.add(label) for label in labels] conj = doc.vocab.strings.add("conj") nmod = doc.vocab.strings.add("nmod") diff --git a/spacy/lang/en/syntax_iterators.py b/spacy/lang/en/syntax_iterators.py index ed665ef29..5ff848124 100644 --- a/spacy/lang/en/syntax_iterators.py +++ b/spacy/lang/en/syntax_iterators.py @@ -2,6 +2,7 @@ from __future__ import unicode_literals from ...symbols import NOUN, PROPN, PRON +from ...errors import Errors def noun_chunks(obj): @@ -20,6 +21,10 @@ def noun_chunks(obj): "ROOT", ] doc = obj.doc # Ensure works on both Doc and Span. + + if not doc.is_parsed: + raise ValueError(Errors.E029) + np_deps = [doc.vocab.strings.add(label) for label in labels] conj = doc.vocab.strings.add("conj") np_label = doc.vocab.strings.add("NP") diff --git a/spacy/lang/es/syntax_iterators.py b/spacy/lang/es/syntax_iterators.py index 6a78d86f7..0badddca1 100644 --- a/spacy/lang/es/syntax_iterators.py +++ b/spacy/lang/es/syntax_iterators.py @@ -2,10 +2,15 @@ from __future__ import unicode_literals from ...symbols import NOUN, PROPN, PRON, VERB, AUX +from ...errors import Errors def noun_chunks(obj): doc = obj.doc + + if not doc.is_parsed: + raise ValueError(Errors.E029) + if not len(doc): return np_label = doc.vocab.strings.add("NP") diff --git a/spacy/lang/fa/syntax_iterators.py b/spacy/lang/fa/syntax_iterators.py index ed665ef29..5ff848124 100644 --- a/spacy/lang/fa/syntax_iterators.py +++ b/spacy/lang/fa/syntax_iterators.py @@ -2,6 +2,7 @@ from __future__ import unicode_literals from ...symbols import NOUN, PROPN, PRON +from ...errors import Errors def noun_chunks(obj): @@ -20,6 +21,10 @@ def noun_chunks(obj): "ROOT", ] doc = obj.doc # Ensure works on both Doc and Span. + + if not doc.is_parsed: + raise ValueError(Errors.E029) + np_deps = [doc.vocab.strings.add(label) for label in labels] conj = doc.vocab.strings.add("conj") np_label = doc.vocab.strings.add("NP") diff --git a/spacy/lang/fr/syntax_iterators.py b/spacy/lang/fr/syntax_iterators.py index 4712d34d9..9495dcf1e 100644 --- a/spacy/lang/fr/syntax_iterators.py +++ b/spacy/lang/fr/syntax_iterators.py @@ -2,6 +2,7 @@ from __future__ import unicode_literals from ...symbols import NOUN, PROPN, PRON +from ...errors import Errors def noun_chunks(obj): @@ -19,6 +20,10 @@ def noun_chunks(obj): "nmod:poss", ] doc = obj.doc # Ensure works on both Doc and Span. + + if not doc.is_parsed: + raise ValueError(Errors.E029) + np_deps = [doc.vocab.strings[label] for label in labels] conj = doc.vocab.strings.add("conj") np_label = doc.vocab.strings.add("NP") diff --git a/spacy/lang/id/syntax_iterators.py b/spacy/lang/id/syntax_iterators.py index 4712d34d9..9495dcf1e 100644 --- a/spacy/lang/id/syntax_iterators.py +++ b/spacy/lang/id/syntax_iterators.py @@ -2,6 +2,7 @@ from __future__ import unicode_literals from ...symbols import NOUN, PROPN, PRON +from ...errors import Errors def noun_chunks(obj): @@ -19,6 +20,10 @@ def noun_chunks(obj): "nmod:poss", ] doc = obj.doc # Ensure works on both Doc and Span. + + if not doc.is_parsed: + raise ValueError(Errors.E029) + np_deps = [doc.vocab.strings[label] for label in labels] conj = doc.vocab.strings.add("conj") np_label = doc.vocab.strings.add("NP") diff --git a/spacy/lang/nb/syntax_iterators.py b/spacy/lang/nb/syntax_iterators.py index 4712d34d9..9495dcf1e 100644 --- a/spacy/lang/nb/syntax_iterators.py +++ b/spacy/lang/nb/syntax_iterators.py @@ -2,6 +2,7 @@ from __future__ import unicode_literals from ...symbols import NOUN, PROPN, PRON +from ...errors import Errors def noun_chunks(obj): @@ -19,6 +20,10 @@ def noun_chunks(obj): "nmod:poss", ] doc = obj.doc # Ensure works on both Doc and Span. + + if not doc.is_parsed: + raise ValueError(Errors.E029) + np_deps = [doc.vocab.strings[label] for label in labels] conj = doc.vocab.strings.add("conj") np_label = doc.vocab.strings.add("NP") diff --git a/spacy/lang/sv/syntax_iterators.py b/spacy/lang/sv/syntax_iterators.py index 7a82e6b59..148884efe 100644 --- a/spacy/lang/sv/syntax_iterators.py +++ b/spacy/lang/sv/syntax_iterators.py @@ -2,6 +2,7 @@ from __future__ import unicode_literals from ...symbols import NOUN, PROPN, PRON +from ...errors import Errors def noun_chunks(obj): @@ -20,6 +21,10 @@ def noun_chunks(obj): "nmod:poss", ] doc = obj.doc # Ensure works on both Doc and Span. + + if not doc.is_parsed: + raise ValueError(Errors.E029) + np_deps = [doc.vocab.strings[label] for label in labels] conj = doc.vocab.strings.add("conj") np_label = doc.vocab.strings.add("NP") diff --git a/spacy/tests/conftest.py b/spacy/tests/conftest.py index e52c5155f..d26f0ce5c 100644 --- a/spacy/tests/conftest.py +++ b/spacy/tests/conftest.py @@ -88,6 +88,11 @@ def eu_tokenizer(): return get_lang_class("eu").Defaults.create_tokenizer() +@pytest.fixture(scope="session") +def fa_tokenizer(): + return get_lang_class("fa").Defaults.create_tokenizer() + + @pytest.fixture(scope="session") def fi_tokenizer(): return get_lang_class("fi").Defaults.create_tokenizer() diff --git a/spacy/tests/lang/de/test_noun_chunks.py b/spacy/tests/lang/de/test_noun_chunks.py new file mode 100644 index 000000000..12ece84b5 --- /dev/null +++ b/spacy/tests/lang/de/test_noun_chunks.py @@ -0,0 +1,16 @@ +# coding: utf-8 +from __future__ import unicode_literals + +import pytest + + +def test_noun_chunks_is_parsed_de(de_tokenizer): + """Test that noun_chunks raises Value Error for 'de' language if Doc is not parsed. + To check this test, we're constructing a Doc + with a new Vocab here and forcing is_parsed to 'False' + to make sure the noun chunks don't run. + """ + doc = de_tokenizer("Er lag auf seinem") + doc.is_parsed = False + with pytest.raises(ValueError): + list(doc.noun_chunks) diff --git a/spacy/tests/lang/el/test_noun_chunks.py b/spacy/tests/lang/el/test_noun_chunks.py new file mode 100644 index 000000000..be14acc81 --- /dev/null +++ b/spacy/tests/lang/el/test_noun_chunks.py @@ -0,0 +1,16 @@ +# coding: utf-8 +from __future__ import unicode_literals + +import pytest + + +def test_noun_chunks_is_parsed_el(el_tokenizer): + """Test that noun_chunks raises Value Error for 'el' language if Doc is not parsed. + To check this test, we're constructing a Doc + with a new Vocab here and forcing is_parsed to 'False' + to make sure the noun chunks don't run. + """ + doc = el_tokenizer("είναι χώρα της νοτιοανατολικής") + doc.is_parsed = False + with pytest.raises(ValueError): + list(doc.noun_chunks) diff --git a/spacy/tests/lang/en/test_noun_chunks.py b/spacy/tests/lang/en/test_noun_chunks.py index 7dc47f9cc..1109af150 100644 --- a/spacy/tests/lang/en/test_noun_chunks.py +++ b/spacy/tests/lang/en/test_noun_chunks.py @@ -6,9 +6,24 @@ from spacy.attrs import HEAD, DEP from spacy.symbols import nsubj, dobj, amod, nmod, conj, cc, root from spacy.lang.en.syntax_iterators import SYNTAX_ITERATORS +import pytest + + from ...util import get_doc +def test_noun_chunks_is_parsed(en_tokenizer): + """Test that noun_chunks raises Value Error for 'en' language if Doc is not parsed. + To check this test, we're constructing a Doc + with a new Vocab here and forcing is_parsed to 'False' + to make sure the noun chunks don't run. + """ + doc = en_tokenizer("This is a sentence") + doc.is_parsed = False + with pytest.raises(ValueError): + list(doc.noun_chunks) + + def test_en_noun_chunks_not_nested(en_vocab): words = ["Peter", "has", "chronic", "command", "and", "control", "issues"] heads = [1, 0, 4, 3, -1, -2, -5] diff --git a/spacy/tests/lang/es/test_noun_chunks.py b/spacy/tests/lang/es/test_noun_chunks.py new file mode 100644 index 000000000..71069d313 --- /dev/null +++ b/spacy/tests/lang/es/test_noun_chunks.py @@ -0,0 +1,16 @@ +# coding: utf-8 +from __future__ import unicode_literals + +import pytest + + +def test_noun_chunks_is_parsed_es(es_tokenizer): + """Test that noun_chunks raises Value Error for 'es' language if Doc is not parsed. + To check this test, we're constructing a Doc + with a new Vocab here and forcing is_parsed to 'False' + to make sure the noun chunks don't run. + """ + doc = es_tokenizer("en Oxford este verano") + doc.is_parsed = False + with pytest.raises(ValueError): + list(doc.noun_chunks) diff --git a/spacy/tests/lang/fa/test_noun_chunks.py b/spacy/tests/lang/fa/test_noun_chunks.py new file mode 100644 index 000000000..a98aae061 --- /dev/null +++ b/spacy/tests/lang/fa/test_noun_chunks.py @@ -0,0 +1,17 @@ +# coding: utf-8 +from __future__ import unicode_literals + +import pytest + + +def test_noun_chunks_is_parsed_fa(fa_tokenizer): + """Test that noun_chunks raises Value Error for 'fa' language if Doc is not parsed. + To check this test, we're constructing a Doc + with a new Vocab here and forcing is_parsed to 'False' + to make sure the noun chunks don't run. + """ + + doc = fa_tokenizer("این یک جمله نمونه می باشد.") + doc.is_parsed = False + with pytest.raises(ValueError): + list(doc.noun_chunks) diff --git a/spacy/tests/lang/fr/test_noun_chunks.py b/spacy/tests/lang/fr/test_noun_chunks.py new file mode 100644 index 000000000..876bc0ea4 --- /dev/null +++ b/spacy/tests/lang/fr/test_noun_chunks.py @@ -0,0 +1,16 @@ +# coding: utf-8 +from __future__ import unicode_literals + +import pytest + + +def test_noun_chunks_is_parsed_fr(fr_tokenizer): + """Test that noun_chunks raises Value Error for 'fr' language if Doc is not parsed. + To check this test, we're constructing a Doc + with a new Vocab here and forcing is_parsed to 'False' + to make sure the noun chunks don't run. + """ + doc = fr_tokenizer("trouver des travaux antérieurs") + doc.is_parsed = False + with pytest.raises(ValueError): + list(doc.noun_chunks) diff --git a/spacy/tests/lang/id/test_noun_chunks.py b/spacy/tests/lang/id/test_noun_chunks.py new file mode 100644 index 000000000..7bac808b3 --- /dev/null +++ b/spacy/tests/lang/id/test_noun_chunks.py @@ -0,0 +1,16 @@ +# coding: utf-8 +from __future__ import unicode_literals + +import pytest + + +def test_noun_chunks_is_parsed_id(id_tokenizer): + """Test that noun_chunks raises Value Error for 'id' language if Doc is not parsed. + To check this test, we're constructing a Doc + with a new Vocab here and forcing is_parsed to 'False' + to make sure the noun chunks don't run. + """ + doc = id_tokenizer("sebelas") + doc.is_parsed = False + with pytest.raises(ValueError): + list(doc.noun_chunks) diff --git a/spacy/tests/lang/nb/test_noun_chunks.py b/spacy/tests/lang/nb/test_noun_chunks.py new file mode 100644 index 000000000..17ec6cfda --- /dev/null +++ b/spacy/tests/lang/nb/test_noun_chunks.py @@ -0,0 +1,16 @@ +# coding: utf-8 +from __future__ import unicode_literals + +import pytest + + +def test_noun_chunks_is_parsed_nb(nb_tokenizer): + """Test that noun_chunks raises Value Error for 'nb' language if Doc is not parsed. + To check this test, we're constructing a Doc + with a new Vocab here and forcing is_parsed to 'False' + to make sure the noun chunks don't run. + """ + doc = nb_tokenizer("Smørsausen brukes bl.a. til") + doc.is_parsed = False + with pytest.raises(ValueError): + list(doc.noun_chunks) diff --git a/spacy/tests/lang/sv/test_noun_chunks.py b/spacy/tests/lang/sv/test_noun_chunks.py index ac7c066ba..38086c255 100644 --- a/spacy/tests/lang/sv/test_noun_chunks.py +++ b/spacy/tests/lang/sv/test_noun_chunks.py @@ -2,9 +2,22 @@ from __future__ import unicode_literals import pytest + from ...util import get_doc +def test_noun_chunks_is_parsed_sv(sv_tokenizer): + """Test that noun_chunks raises Value Error for 'sv' language if Doc is not parsed. + To check this test, we're constructing a Doc + with a new Vocab here and forcing is_parsed to 'False' + to make sure the noun chunks don't run. + """ + doc = sv_tokenizer("Studenten läste den bästa boken") + doc.is_parsed = False + with pytest.raises(ValueError): + list(doc.noun_chunks) + + SV_NP_TEST_EXAMPLES = [ ( "En student läste en bok", # A student read a book diff --git a/spacy/tokens/doc.pyx b/spacy/tokens/doc.pyx index 4dc438695..25a147208 100644 --- a/spacy/tokens/doc.pyx +++ b/spacy/tokens/doc.pyx @@ -597,8 +597,7 @@ cdef class Doc: DOCS: https://spacy.io/api/doc#noun_chunks """ - if not self.is_parsed: - raise ValueError(Errors.E029) + # Accumulate the result before beginning to iterate over it. This # prevents the tokenisation from being changed out from under us # during the iteration. The tricky thing here is that Span accepts