2016-11-02 19:02:41 +00:00
|
|
|
# encoding: utf8
|
|
|
|
from __future__ import unicode_literals
|
2016-11-02 19:43:12 +00:00
|
|
|
|
2016-12-18 15:54:19 +00:00
|
|
|
from .. import language_data as base
|
|
|
|
from ..language_data import update_exc, strings_to_exc
|
2016-11-02 19:02:41 +00:00
|
|
|
|
2016-12-18 15:54:19 +00:00
|
|
|
from .stop_words import STOP_WORDS
|
|
|
|
from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS, ORTH_ONLY
|
2016-11-02 19:02:41 +00:00
|
|
|
|
2016-12-08 18:47:03 +00:00
|
|
|
|
2016-12-18 15:54:19 +00:00
|
|
|
TOKENIZER_EXCEPTIONS = dict(TOKENIZER_EXCEPTIONS)
|
|
|
|
STOP_WORDS = set(STOP_WORDS)
|
2016-12-08 18:47:03 +00:00
|
|
|
|
|
|
|
|
2016-12-18 15:54:19 +00:00
|
|
|
update_exc(TOKENIZER_EXCEPTIONS, strings_to_exc(ORTH_ONLY))
|
|
|
|
update_exc(TOKENIZER_EXCEPTIONS, strings_to_exc(base.EMOTICONS))
|
2016-12-08 18:47:03 +00:00
|
|
|
|
|
|
|
|
2016-12-18 15:54:19 +00:00
|
|
|
__all__ = ["TOKENIZER_EXCEPTIONS", "STOP_WORDS"]
|