2017-03-12 12:07:28 +00:00
|
|
|
# coding: utf8
|
2016-12-20 10:05:06 +00:00
|
|
|
from __future__ import unicode_literals
|
|
|
|
|
|
|
|
from .. import language_data as base
|
|
|
|
from ..language_data import update_exc, strings_to_exc
|
|
|
|
|
|
|
|
from .stop_words import STOP_WORDS
|
2017-02-04 14:22:21 +00:00
|
|
|
from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS, ORTH_ONLY
|
2016-12-20 10:05:06 +00:00
|
|
|
|
|
|
|
|
|
|
|
STOP_WORDS = set(STOP_WORDS)
|
|
|
|
|
2017-02-04 14:22:21 +00:00
|
|
|
TOKENIZER_EXCEPTIONS = dict(TOKENIZER_EXCEPTIONS)
|
|
|
|
update_exc(TOKENIZER_EXCEPTIONS, strings_to_exc(ORTH_ONLY))
|
|
|
|
update_exc(TOKENIZER_EXCEPTIONS, strings_to_exc(base.EMOTICONS))
|
2017-01-08 19:36:00 +00:00
|
|
|
update_exc(TOKENIZER_EXCEPTIONS, strings_to_exc(base.ABBREVIATIONS))
|
|
|
|
|
|
|
|
|
2016-12-20 10:05:06 +00:00
|
|
|
__all__ = ["TOKENIZER_EXCEPTIONS", "STOP_WORDS"]
|