diff --git a/spacy/tests/doc/test_noun_chunks.py b/spacy/tests/lang/en/test_noun_chunks.py similarity index 73% rename from spacy/tests/doc/test_noun_chunks.py rename to spacy/tests/lang/en/test_noun_chunks.py index f046dfa20..2bfe041f9 100644 --- a/spacy/tests/doc/test_noun_chunks.py +++ b/spacy/tests/lang/en/test_noun_chunks.py @@ -1,15 +1,15 @@ # coding: utf-8 from __future__ import unicode_literals -from ...attrs import HEAD, DEP -from ...symbols import nsubj, dobj, amod, nmod, conj, cc, root -from ...syntax.iterators import english_noun_chunks -from ..util import get_doc +from ....attrs import HEAD, DEP +from ....symbols import nsubj, dobj, amod, nmod, conj, cc, root +from ....lang.en.syntax_iterators import SYNTAX_ITERATORS +from ...util import get_doc import numpy -def test_doc_noun_chunks_not_nested(en_tokenizer): +def test_en_noun_chunks_not_nested(en_tokenizer): text = "Peter has chronic command and control issues" heads = [1, 0, 4, 3, -1, -2, -5] deps = ['nsubj', 'ROOT', 'amod', 'nmod', 'cc', 'conj', 'dobj'] @@ -21,7 +21,7 @@ def test_doc_noun_chunks_not_nested(en_tokenizer): [HEAD, DEP], numpy.asarray([[1, nsubj], [0, root], [4, amod], [3, nmod], [-1, cc], [-2, conj], [-5, dobj]], dtype='uint64')) - tokens.noun_chunks_iterator = english_noun_chunks + tokens.noun_chunks_iterator = SYNTAX_ITERATORS['noun_chunks'] word_occurred = {} for chunk in tokens.noun_chunks: for word in chunk: