From 2912ddc9a60ebcb11d3bcfc0747c6089d8382a00 Mon Sep 17 00:00:00 2001 From: Ines Montani Date: Tue, 12 Mar 2019 13:30:33 +0100 Subject: [PATCH] Don't set extension attribute in Japanese (closes #3398) --- spacy/lang/ja/__init__.py | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/spacy/lang/ja/__init__.py b/spacy/lang/ja/__init__.py index e35967409..3a6074bba 100644 --- a/spacy/lang/ja/__init__.py +++ b/spacy/lang/ja/__init__.py @@ -8,16 +8,13 @@ from .stop_words import STOP_WORDS from .tag_map import TAG_MAP from ...attrs import LANG from ...language import Language -from ...tokens import Doc, Token +from ...tokens import Doc from ...compat import copy_reg from ...util import DummyTokenizer ShortUnitWord = namedtuple("ShortUnitWord", ["surface", "lemma", "pos"]) -# TODO: Is this the right place for this? -Token.set_extension("mecab_tag", default=None) - def try_mecab_import(): """Mecab is required for Japanese support, so check for it. @@ -82,10 +79,12 @@ class JapaneseTokenizer(DummyTokenizer): words = [x.surface for x in dtokens] spaces = [False] * len(words) doc = Doc(self.vocab, words=words, spaces=spaces) + mecab_tags = [] for token, dtoken in zip(doc, dtokens): - token._.mecab_tag = dtoken.pos + mecab_tags.append(dtoken.pos) token.tag_ = resolve_pos(dtoken) token.lemma_ = dtoken.lemma + doc.user_data["mecab_tags"] = mecab_tags return doc