From 73b577cb01b7f5460b55f83a1c4b1678e086dd68 Mon Sep 17 00:00:00 2001 From: ines Date: Mon, 8 May 2017 22:29:04 +0200 Subject: [PATCH] Fix relative imports --- spacy/lang/bn/__init__.py | 8 ++++---- spacy/lang/bn/morph_rules.py | 4 ++-- spacy/lang/bn/punctuation.py | 5 ++--- spacy/lang/bn/tag_map.py | 4 ++-- spacy/lang/bn/tokenizer_exceptions.py | 2 +- spacy/lang/de/__init__.py | 10 +++++----- spacy/lang/de/tag_map.py | 4 ++-- spacy/lang/de/tokenizer_exceptions.py | 4 ++-- spacy/lang/en/__init__.py | 8 ++++---- spacy/lang/en/morph_rules.py | 4 ++-- spacy/lang/en/tag_map.py | 4 ++-- spacy/lang/en/tokenizer_exceptions.py | 4 ++-- spacy/lang/es/__init__.py | 10 +++++----- spacy/lang/es/tokenizer_exceptions.py | 4 ++-- spacy/lang/fi/__init__.py | 8 ++++---- spacy/lang/fi/tokenizer_exceptions.py | 2 +- spacy/lang/fr/__init__.py | 10 +++++----- spacy/lang/fr/punctuation.py | 6 +++--- spacy/lang/fr/tokenizer_exceptions.py | 10 +++++----- spacy/lang/he/__init__.py | 8 ++++---- spacy/lang/hu/__init__.py | 10 +++++----- spacy/lang/hu/punctuation.py | 6 +++--- spacy/lang/hu/tokenizer_exceptions.py | 7 ++++--- spacy/lang/it/__init__.py | 10 +++++----- spacy/lang/ja/__init__.py | 6 +++--- spacy/lang/nb/__init__.py | 8 ++++---- spacy/lang/nb/morph_rules.py | 4 ++-- spacy/lang/nb/tokenizer_exceptions.py | 2 +- spacy/lang/nl/__init__.py | 8 ++++---- spacy/lang/pt/__init__.py | 10 +++++----- spacy/lang/pt/tokenizer_exceptions.py | 4 ++-- spacy/lang/sv/__init__.py | 10 +++++----- spacy/lang/sv/morph_rules.py | 4 ++-- spacy/lang/sv/tokenizer_exceptions.py | 4 ++-- spacy/lang/zh/__init__.py | 4 ++-- 35 files changed, 108 insertions(+), 108 deletions(-) diff --git a/spacy/lang/bn/__init__.py b/spacy/lang/bn/__init__.py index d47b32857..cb748085b 100644 --- a/spacy/lang/bn/__init__.py +++ b/spacy/lang/bn/__init__.py @@ -7,10 +7,10 @@ from .tag_map import TAG_MAP from .stop_words import STOP_WORDS from .lemmatizer import LEMMA_RULES -from ..language_data import BASE_EXCEPTIONS -from ..language import Language -from ..attrs import LANG -from ..util import update_exc +from ..tokenizer_exceptions import BASE_EXCEPTIONS +from ...language import Language +from ...attrs import LANG +from ...util import update_exc class Bengali(Language): diff --git a/spacy/lang/bn/morph_rules.py b/spacy/lang/bn/morph_rules.py index dda948e47..8561f8676 100644 --- a/spacy/lang/bn/morph_rules.py +++ b/spacy/lang/bn/morph_rules.py @@ -1,8 +1,8 @@ # coding: utf8 from __future__ import unicode_literals -from ..symbols import LEMMA -from ..deprecated import PRON_LEMMA +from ...deprecated import PRON_LEMMA +from ...symbols import LEMMA MORPH_RULES = { diff --git a/spacy/lang/bn/punctuation.py b/spacy/lang/bn/punctuation.py index cd5ac7f1d..8b37d6cb3 100644 --- a/spacy/lang/bn/punctuation.py +++ b/spacy/lang/bn/punctuation.py @@ -1,9 +1,6 @@ # coding: utf8 from __future__ import unicode_literals -from ..language_data.punctuation import ALPHA_LOWER, LIST_ELLIPSES, QUOTES -from ..language_data.punctuation import ALPHA_UPPER, LIST_QUOTES, UNITS -from ..language_data.punctuation import CURRENCY, LIST_PUNCT, ALPHA, _QUOTES CURRENCY_SYMBOLS = r"\$ ¢ £ € ¥ ฿ ৳" @@ -44,3 +41,5 @@ TOKENIZER_INFIXES = ( r'(?<=[{a}])([{q}\)\]\(\[])(?=[\-{a}])'.format(a=ALPHA, q=_QUOTES.replace("'", "").strip().replace(" ", "")), ] ) +from ..char_classes import LIST_PUNCT, LIST_ELLIPSES, LIST_QUOTES, UNITS +from ..char_classes import ALPHA_LOWER, ALPHA_UPPER, ALPHA, HYPHENS, QUOTES diff --git a/spacy/lang/bn/tag_map.py b/spacy/lang/bn/tag_map.py index a264e8d95..a1d95b81d 100644 --- a/spacy/lang/bn/tag_map.py +++ b/spacy/lang/bn/tag_map.py @@ -1,8 +1,8 @@ # coding: utf8 from __future__ import unicode_literals -from ..symbols import POS, PUNCT, ADJ, CONJ, SCONJ, NUM, DET, ADV, ADP, X, VERB -from ..symbols import CCONJ, NOUN, PROPN, PART, INTJ, SPACE, PRON, AUX, SYM +from ...symbols import POS, PUNCT, ADJ, CONJ, SCONJ, NUM, DET, ADV, ADP, X, VERB +from ...symbols import CCONJ, NOUN, PROPN, PART, INTJ, SPACE, PRON, AUX, SYM TAG_MAP = { diff --git a/spacy/lang/bn/tokenizer_exceptions.py b/spacy/lang/bn/tokenizer_exceptions.py index a47b89280..c5a02d5ad 100644 --- a/spacy/lang/bn/tokenizer_exceptions.py +++ b/spacy/lang/bn/tokenizer_exceptions.py @@ -1,7 +1,7 @@ # coding=utf-8 from __future__ import unicode_literals -from ..symbols import * +from ...symbols import ORTH, LEMMA TOKENIZER_EXCEPTIONS = {} diff --git a/spacy/lang/de/__init__.py b/spacy/lang/de/__init__.py index 65f7a3c66..2da572500 100644 --- a/spacy/lang/de/__init__.py +++ b/spacy/lang/de/__init__.py @@ -6,11 +6,11 @@ from .tag_map import TAG_MAP from .stop_words import STOP_WORDS from .lemmatizer import LOOKUP -from ..language_data import BASE_EXCEPTIONS -from ..language import Language -from ..lemmatizerlookup import Lemmatizer -from ..attrs import LANG -from ..util import update_exc +from ..tokenizer_exceptions import BASE_EXCEPTIONS +from ...language import Language +from ...lemmatizerlookup import Lemmatizer +from ...attrs import LANG +from ...util import update_exc class German(Language): diff --git a/spacy/lang/de/tag_map.py b/spacy/lang/de/tag_map.py index 75ae45edd..d16bd17e0 100644 --- a/spacy/lang/de/tag_map.py +++ b/spacy/lang/de/tag_map.py @@ -1,8 +1,8 @@ # coding: utf8 from __future__ import unicode_literals -from ..symbols import POS, PUNCT, ADJ, CONJ, SCONJ, NUM, DET, ADV, ADP, X, VERB -from ..symbols import NOUN, PROPN, PART, INTJ, SPACE, PRON, AUX +from ...symbols import POS, PUNCT, ADJ, CONJ, SCONJ, NUM, DET, ADV, ADP, X, VERB +from ...symbols import NOUN, PROPN, PART, INTJ, SPACE, PRON, AUX TAG_MAP = { diff --git a/spacy/lang/de/tokenizer_exceptions.py b/spacy/lang/de/tokenizer_exceptions.py index 4bb59c490..080311f4e 100644 --- a/spacy/lang/de/tokenizer_exceptions.py +++ b/spacy/lang/de/tokenizer_exceptions.py @@ -1,8 +1,8 @@ # coding: utf8 from __future__ import unicode_literals -from ..symbols import ORTH, LEMMA, TAG, NORM -from ..deprecated import PRON_LEMMA +from ...symbols import ORTH, LEMMA, TAG, NORM +from ...deprecated import PRON_LEMMA _exc = { diff --git a/spacy/lang/en/__init__.py b/spacy/lang/en/__init__.py index d6858c799..a44528da0 100644 --- a/spacy/lang/en/__init__.py +++ b/spacy/lang/en/__init__.py @@ -7,10 +7,10 @@ from .stop_words import STOP_WORDS from .morph_rules import MORPH_RULES from .lemmatizer import LEMMA_RULES, LEMMA_INDEX, LEMMA_EXC -from ..language_data import BASE_EXCEPTIONS -from ..language import Language -from ..attrs import LANG -from ..util import update_exc +from ..tokenizer_exceptions import BASE_EXCEPTIONS +from ...language import Language +from ...attrs import LANG +from ...util import update_exc class English(Language): diff --git a/spacy/lang/en/morph_rules.py b/spacy/lang/en/morph_rules.py index 8e1c9e082..4e95dc747 100644 --- a/spacy/lang/en/morph_rules.py +++ b/spacy/lang/en/morph_rules.py @@ -1,8 +1,8 @@ # coding: utf8 from __future__ import unicode_literals -from ..symbols import LEMMA -from ..deprecated import PRON_LEMMA +from ...symbols import LEMMA +from ...deprecated import PRON_LEMMA MORPH_RULES = { diff --git a/spacy/lang/en/tag_map.py b/spacy/lang/en/tag_map.py index 92c171904..a674c17e3 100644 --- a/spacy/lang/en/tag_map.py +++ b/spacy/lang/en/tag_map.py @@ -1,8 +1,8 @@ # coding: utf8 from __future__ import unicode_literals -from ..symbols import POS, PUNCT, SYM, ADJ, CCONJ, NUM, DET, ADV, ADP, X, VERB -from ..symbols import NOUN, PROPN, PART, INTJ, SPACE, PRON +from ...symbols import POS, PUNCT, SYM, ADJ, CCONJ, NUM, DET, ADV, ADP, X, VERB +from ...symbols import NOUN, PROPN, PART, INTJ, SPACE, PRON TAG_MAP = { diff --git a/spacy/lang/en/tokenizer_exceptions.py b/spacy/lang/en/tokenizer_exceptions.py index 2c197db2c..5c6e3f893 100644 --- a/spacy/lang/en/tokenizer_exceptions.py +++ b/spacy/lang/en/tokenizer_exceptions.py @@ -1,8 +1,8 @@ # coding: utf8 from __future__ import unicode_literals -from ..symbols import ORTH, LEMMA, TAG, NORM -from ..deprecated import PRON_LEMMA +from ...symbols import ORTH, LEMMA, TAG, NORM +from ...deprecated import PRON_LEMMA _exc = {} diff --git a/spacy/lang/es/__init__.py b/spacy/lang/es/__init__.py index f975bd11a..6b56982ba 100644 --- a/spacy/lang/es/__init__.py +++ b/spacy/lang/es/__init__.py @@ -6,11 +6,11 @@ from .tag_map import TAG_MAP from .stop_words import STOP_WORDS from .lemmatizer import LOOKUP -from ..language_data import BASE_EXCEPTIONS -from ..language import Language -from ..lemmatizerlookup import Lemmatizer -from ..attrs import LANG -from ..util import update_exc +from ..tokenizer_exceptions import BASE_EXCEPTIONS +from ...language import Language +from ...lemmatizerlookup import Lemmatizer +from ...attrs import LANG +from ...util import update_exc class Spanish(Language): diff --git a/spacy/lang/es/tokenizer_exceptions.py b/spacy/lang/es/tokenizer_exceptions.py index 524977c73..262089494 100644 --- a/spacy/lang/es/tokenizer_exceptions.py +++ b/spacy/lang/es/tokenizer_exceptions.py @@ -1,8 +1,8 @@ # coding: utf8 from __future__ import unicode_literals -from ..symbols import ORTH, LEMMA, TAG, NORM, ADP, DET -from ..deprecated import PRON_LEMMA +from ...symbols import ORTH, LEMMA, TAG, NORM, ADP, DET +from ...deprecated import PRON_LEMMA _exc = { diff --git a/spacy/lang/fi/__init__.py b/spacy/lang/fi/__init__.py index ae33e77dd..8cb6ad8ab 100644 --- a/spacy/lang/fi/__init__.py +++ b/spacy/lang/fi/__init__.py @@ -4,10 +4,10 @@ from __future__ import unicode_literals from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS from .stop_words import STOP_WORDS -from ..language_data import BASE_EXCEPTIONS -from ..language import Language -from ..attrs import LANG -from ..util import update_exc +from ..tokenizer_exceptions import BASE_EXCEPTIONS +from ...language import Language +from ...attrs import LANG +from ...util import update_exc class Finnish(Language): diff --git a/spacy/lang/fi/tokenizer_exceptions.py b/spacy/lang/fi/tokenizer_exceptions.py index 3dcc55024..a5e18bcfa 100644 --- a/spacy/lang/fi/tokenizer_exceptions.py +++ b/spacy/lang/fi/tokenizer_exceptions.py @@ -1,7 +1,7 @@ # coding: utf8 from __future__ import unicode_literals -from ..symbols import ORTH, LEMMA +from ...symbols import ORTH, LEMMA _exc = {} diff --git a/spacy/lang/fr/__init__.py b/spacy/lang/fr/__init__.py index e0481187d..a8a18a601 100644 --- a/spacy/lang/fr/__init__.py +++ b/spacy/lang/fr/__init__.py @@ -6,11 +6,11 @@ from .punctuation import TOKENIZER_SUFFIXES, TOKENIZER_INFIXES from .stop_words import STOP_WORDS from .lemmatizer import LOOKUP -from ..language_data import BASE_EXCEPTIONS -from ..language import Language -from ..lemmatizerlookup import Lemmatizer -from ..attrs import LANG -from ..util import update_exc +from ..tokenizer_exceptions import BASE_EXCEPTIONS +from ...language import Language +from ...lemmatizerlookup import Lemmatizer +from ...attrs import LANG +from ...util import update_exc class French(Language): diff --git a/spacy/lang/fr/punctuation.py b/spacy/lang/fr/punctuation.py index 3047eaffe..935e4c964 100644 --- a/spacy/lang/fr/punctuation.py +++ b/spacy/lang/fr/punctuation.py @@ -1,9 +1,9 @@ # coding: utf8 from __future__ import unicode_literals -from ..language_data.punctuation import ALPHA, TOKENIZER_INFIXES, LIST_PUNCT -from ..language_data.punctuation import LIST_ELLIPSES, LIST_QUOTES, CURRENCY -from ..language_data.punctuation import UNITS, ALPHA_LOWER, QUOTES, ALPHA_UPPER +from ..char_classes import TOKENIZER_INFIXES, LIST_PUNCT LIST_ELLIPSES +from ..char_classes import LIST_QUOTES, CURRENCY, QUOTES, UNITS +from ..char_classes import ALPHA, ALPHA_LOWER, ALPHA_UPPER ELISION = " ' ’ ".strip().replace(' ', '').replace('\n', '') diff --git a/spacy/lang/fr/tokenizer_exceptions.py b/spacy/lang/fr/tokenizer_exceptions.py index e133fa8ef..2f6f71127 100644 --- a/spacy/lang/fr/tokenizer_exceptions.py +++ b/spacy/lang/fr/tokenizer_exceptions.py @@ -3,12 +3,12 @@ from __future__ import unicode_literals import regex as re -from .punctuation import ELISION, HYPHENS from ._tokenizer_exceptions_list import FR_BASE_EXCEPTIONS -from ..symbols import ORTH, LEMMA, TAG, NORM -from ..deprecated import PRON_LEMMA -from ..language_data.tokenizer_exceptions import _URL_PATTERN -from ..language_data.punctuation import ALPHA_LOWER +from .punctuation import ELISION, HYPHENS +from ..tokenizer_exceptions import URL_PATTERN +from ..char_classes import ALPHA_LOWER +from ...symbols import ORTH, LEMMA, TAG, NORM +from ...deprecated import PRON_LEMMA def upper_first_letter(text): diff --git a/spacy/lang/he/__init__.py b/spacy/lang/he/__init__.py index 839d174a0..4ed1f30d0 100644 --- a/spacy/lang/he/__init__.py +++ b/spacy/lang/he/__init__.py @@ -3,10 +3,10 @@ from __future__ import unicode_literals from .stop_words import STOP_WORDS -from ..language_data import BASE_EXCEPTIONS -from ..language import Language -from ..attrs import LANG -from ..util import update_exc +from ..tokenizer_exceptions import BASE_EXCEPTIONS +from ...language import Language +from ...attrs import LANG +from ...util import update_exc class Hebrew(Language): diff --git a/spacy/lang/hu/__init__.py b/spacy/lang/hu/__init__.py index 016f51d6f..7233239da 100644 --- a/spacy/lang/hu/__init__.py +++ b/spacy/lang/hu/__init__.py @@ -6,11 +6,11 @@ from .punctuation import TOKENIZER_PREFIXES, TOKENIZER_SUFFIXES, TOKENIZER_INFIX from .stop_words import STOP_WORDS from .lemmatizer import LOOKUP -from ..language_data import BASE_EXCEPTIONS -from ..language import Language -from ..lemmatizerlookup import Lemmatizer -from ..attrs import LANG -from ..util import update_exc +from ..tokenizer_exceptions import BASE_EXCEPTIONS +from ...language import Language +from ...lemmatizerlookup import Lemmatizer +from ...attrs import LANG +from ...util import update_exc class Hungarian(Language): diff --git a/spacy/lang/hu/punctuation.py b/spacy/lang/hu/punctuation.py index b14767e92..474a6dc47 100644 --- a/spacy/lang/hu/punctuation.py +++ b/spacy/lang/hu/punctuation.py @@ -1,9 +1,6 @@ # coding: utf8 from __future__ import unicode_literals -from ..language_data.punctuation import ALPHA_LOWER, LIST_ELLIPSES, QUOTES -from ..language_data.punctuation import ALPHA_UPPER, LIST_QUOTES, UNITS -from ..language_data.punctuation import CURRENCY, LIST_PUNCT, ALPHA, _QUOTES _currency_symbols = r"\$ ¢ £ € ¥ ฿" @@ -38,3 +35,6 @@ TOKENIZER_INFIXES = ( r'(?<=[{a}])--(?=[{a}])'.format(a=ALPHA), r'(?<=[{a}]),(?=[{a}])'.format(a=ALPHA), r'(?<=[{a}])([{q}\)\]\(\[])(?=[\-{a}])'.format(a=ALPHA, q=_QUOTES.replace("'", "").strip().replace(" ", ""))]) +from ..char_classes import TOKENIZER_INFIXES, LIST_PUNCT LIST_ELLIPSES +from ..char_classes import LIST_QUOTES, CURRENCY, QUOTES, UNITS +from ..char_classes import ALPHA, ALPHA_LOWER, ALPHA_UPPER diff --git a/spacy/lang/hu/tokenizer_exceptions.py b/spacy/lang/hu/tokenizer_exceptions.py index 87b10a3ad..043839923 100644 --- a/spacy/lang/hu/tokenizer_exceptions.py +++ b/spacy/lang/hu/tokenizer_exceptions.py @@ -3,9 +3,10 @@ from __future__ import unicode_literals import regex as re -from ..symbols import ORTH -from ..language_data.punctuation import ALPHA_LOWER, CURRENCY -from ..language_data.tokenizer_exceptions import _URL_PATTERN +from ..punctuation import ALPHA_LOWER, CURRENCY +from ..tokenizer_exceptions import URL_PATTERN +from ...symbols import ORTH + _exc = {} diff --git a/spacy/lang/it/__init__.py b/spacy/lang/it/__init__.py index e7fdf75ce..93f7f7764 100644 --- a/spacy/lang/it/__init__.py +++ b/spacy/lang/it/__init__.py @@ -4,11 +4,11 @@ from __future__ import unicode_literals from .stop_words import STOP_WORDS from .lemmatizer import LOOKUP -from ..language_data import BASE_EXCEPTIONS -from ..language import Language -from ..lemmatizerlookup import Lemmatizer -from ..attrs import LANG -from ..util import update_exc +from ..tokenizer_exceptions import BASE_EXCEPTIONS +from ...language import Language +from ...lemmatizerlookup import Lemmatizer +from ...attrs import LANG +from ...util import update_exc class Italian(Language): diff --git a/spacy/lang/ja/__init__.py b/spacy/lang/ja/__init__.py index c55f67e47..09ad9945e 100644 --- a/spacy/lang/ja/__init__.py +++ b/spacy/lang/ja/__init__.py @@ -1,9 +1,9 @@ # encoding: utf8 from __future__ import unicode_literals, print_function -from ..language import Language -from ..attrs import LANG -from ..tokens import Doc +from ...language import Language +from ...attrs import LANG +from ...tokens import Doc class Japanese(Language): diff --git a/spacy/lang/nb/__init__.py b/spacy/lang/nb/__init__.py index 72eb5ad81..20832bfe3 100644 --- a/spacy/lang/nb/__init__.py +++ b/spacy/lang/nb/__init__.py @@ -5,10 +5,10 @@ from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS from .stop_words import STOP_WORDS from .morph_rules import MORPH_RULES -from ..language_data import BASE_EXCEPTIONS -from ..language import Language -from ..attrs import LANG -from ..util import update_exc +from ..tokenizer_exceptions import BASE_EXCEPTIONS +from ...language import Language +from ...attrs import LANG +from ...util import update_exc class Norwegian(Language): diff --git a/spacy/lang/nb/morph_rules.py b/spacy/lang/nb/morph_rules.py index f41d2efe2..924c9beb5 100644 --- a/spacy/lang/nb/morph_rules.py +++ b/spacy/lang/nb/morph_rules.py @@ -1,8 +1,8 @@ # encoding: utf8 from __future__ import unicode_literals -from ..symbols import LEMMA -from ..deprecated import PRON_LEMMA +from ...symbols import LEMMA +from ...deprecated import PRON_LEMMA # Used the table of pronouns at https://no.wiktionary.org/wiki/Tillegg:Pronomen_i_norsk diff --git a/spacy/lang/nb/tokenizer_exceptions.py b/spacy/lang/nb/tokenizer_exceptions.py index e87aefd46..a01c1363c 100644 --- a/spacy/lang/nb/tokenizer_exceptions.py +++ b/spacy/lang/nb/tokenizer_exceptions.py @@ -1,7 +1,7 @@ # encoding: utf8 from __future__ import unicode_literals -from ..symbols import ORTH, LEMMA +from ...symbols import ORTH, LEMMA _exc = {} diff --git a/spacy/lang/nl/__init__.py b/spacy/lang/nl/__init__.py index 446d920e6..254849ad0 100644 --- a/spacy/lang/nl/__init__.py +++ b/spacy/lang/nl/__init__.py @@ -3,10 +3,10 @@ from __future__ import unicode_literals from .stop_words import STOP_WORDS -from ..language_data import BASE_EXCEPTIONS -from ..language import Language -from ..attrs import LANG -from ..util import update_exc +from ..tokenizer_exceptions import BASE_EXCEPTIONS +from ...language import Language +from ...attrs import LANG +from ...util import update_exc diff --git a/spacy/lang/pt/__init__.py b/spacy/lang/pt/__init__.py index 0d68cf393..976f7e08b 100644 --- a/spacy/lang/pt/__init__.py +++ b/spacy/lang/pt/__init__.py @@ -5,11 +5,11 @@ from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS from .stop_words import STOP_WORDS from .lemmatizer import LOOKUP -from ..language_data import BASE_EXCEPTIONS -from ..language import Language -from ..lemmatizerlookup import Lemmatizer -from ..attrs import LANG -from ..util import update_exc +from ..tokenizer_exceptions import BASE_EXCEPTIONS +from ...language import Language +from ...lemmatizerlookup import Lemmatizer +from ...attrs import LANG +from ...util import update_exc class Portuguese(Language): diff --git a/spacy/lang/pt/tokenizer_exceptions.py b/spacy/lang/pt/tokenizer_exceptions.py index 087014ca1..72348fa64 100644 --- a/spacy/lang/pt/tokenizer_exceptions.py +++ b/spacy/lang/pt/tokenizer_exceptions.py @@ -1,8 +1,8 @@ # coding: utf8 from __future__ import unicode_literals -from ..symbols import ORTH, LEMMA, NORM -from ..deprecated import PRON_LEMMA +from ...symbols import ORTH, LEMMA, NORM +from ...deprecated import PRON_LEMMA _exc = { diff --git a/spacy/lang/sv/__init__.py b/spacy/lang/sv/__init__.py index 5235b530f..b16e1befc 100644 --- a/spacy/lang/sv/__init__.py +++ b/spacy/lang/sv/__init__.py @@ -6,11 +6,11 @@ from .stop_words import STOP_WORDS from .morph_rules import MORPH_RULES from .lemmatizer import LEMMA_RULES, LOOKUP -from ..language_data import BASE_EXCEPTIONS -from ..language import Language -from ..lemmatizerlookup import Lemmatizer -from ..attrs import LANG -from ..util import update_exc +from ..tokenizer_exceptions import BASE_EXCEPTIONS +from ...language import Language +from ...lemmatizerlookup import Lemmatizer +from ...attrs import LANG +from ...util import update_exc class Swedish(Language): diff --git a/spacy/lang/sv/morph_rules.py b/spacy/lang/sv/morph_rules.py index 0c3b1804e..2875eb3c8 100644 --- a/spacy/lang/sv/morph_rules.py +++ b/spacy/lang/sv/morph_rules.py @@ -1,8 +1,8 @@ # coding: utf8 from __future__ import unicode_literals -from ..symbols import LEMMA -from ..deprecated import PRON_LEMMA +from ...symbols import LEMMA +from ...deprecated import PRON_LEMMA # Used the table of pronouns at https://sv.wiktionary.org/wiki/deras diff --git a/spacy/lang/sv/tokenizer_exceptions.py b/spacy/lang/sv/tokenizer_exceptions.py index 05d7e81e4..b7d9834fe 100644 --- a/spacy/lang/sv/tokenizer_exceptions.py +++ b/spacy/lang/sv/tokenizer_exceptions.py @@ -1,8 +1,8 @@ # coding: utf8 from __future__ import unicode_literals -from ..symbols import ORTH, LEMMA, TAG, NORM -from ..deprecated import PRON_LEMMA +from ...symbols import ORTH, LEMMA, TAG, NORM +from ...deprecated import PRON_LEMMA _exc = {} diff --git a/spacy/lang/zh/__init__.py b/spacy/lang/zh/__init__.py index af5879fde..d63323b4e 100644 --- a/spacy/lang/zh/__init__.py +++ b/spacy/lang/zh/__init__.py @@ -1,8 +1,8 @@ # coding: utf8 from __future__ import unicode_literals -from ..language import Language -from ..tokens import Doc +from ...language import Language +from ...tokens import Doc class Chinese(Language):