spaCy/spacy/lang/zh/__init__.py

47 lines
1.5 KiB
Python
Raw Normal View History

2017-05-08 13:54:36 +00:00
# coding: utf8
from __future__ import unicode_literals
2017-12-28 09:13:58 +00:00
from ...attrs import LANG
2017-05-08 20:29:04 +00:00
from ...language import Language
from ...tokens import Doc
from ..tokenizer_exceptions import BASE_EXCEPTIONS
from .stop_words import STOP_WORDS
2017-12-28 09:13:58 +00:00
class ChineseDefaults(Language.Defaults):
lex_attr_getters = dict(Language.Defaults.lex_attr_getters)
lex_attr_getters[LANG] = lambda text: "zh"
use_jieba = True
tokenizer_exceptions = BASE_EXCEPTIONS
stop_words = STOP_WORDS
writing_system = {"direction": "ltr", "has_case": False, "has_letters": False}
2017-12-28 09:13:58 +00:00
class Chinese(Language):
lang = "zh"
2017-12-28 09:13:58 +00:00
Defaults = ChineseDefaults # override defaults
2016-05-05 09:39:12 +00:00
2016-11-02 18:57:38 +00:00
def make_doc(self, text):
if self.Defaults.use_jieba:
try:
import jieba
except ImportError:
msg = (
"Jieba not installed. Either set Chinese.use_jieba = False, "
"or install it https://github.com/fxsjy/jieba"
)
raise ImportError(msg)
words = list(jieba.cut(text, cut_all=False))
words = [x for x in words if x]
return Doc(self.vocab, words=words, spaces=[False] * len(words))
else:
words = []
spaces = []
for token in self.tokenizer(text):
words.extend(list(token.text))
spaces.extend([False] * len(token.text))
spaces[-1] = bool(token.whitespace_)
return Doc(self.vocab, words=words, spaces=spaces)
2017-05-03 09:01:42 +00:00
__all__ = ["Chinese"]