2017-03-12 12:07:28 +00:00
|
|
|
# coding: utf8
|
2017-05-08 13:44:26 +00:00
|
|
|
from __future__ import unicode_literals
|
2015-09-06 19:56:47 +00:00
|
|
|
|
2017-05-08 13:44:26 +00:00
|
|
|
from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS
|
|
|
|
from .tag_map import TAG_MAP
|
|
|
|
from .stop_words import STOP_WORDS
|
|
|
|
from .lemmatizer import LOOKUP
|
2017-05-17 09:37:48 +00:00
|
|
|
from .syntax_iterators import SYNTAX_ITERATORS
|
2015-09-06 19:56:47 +00:00
|
|
|
|
2017-05-08 20:29:04 +00:00
|
|
|
from ..tokenizer_exceptions import BASE_EXCEPTIONS
|
|
|
|
from ...language import Language
|
|
|
|
from ...lemmatizerlookup import Lemmatizer
|
|
|
|
from ...attrs import LANG
|
|
|
|
from ...util import update_exc
|
2015-09-06 19:56:47 +00:00
|
|
|
|
|
|
|
|
|
|
|
class German(Language):
|
2016-03-03 14:21:00 +00:00
|
|
|
lang = 'de'
|
2016-12-08 12:56:40 +00:00
|
|
|
|
2016-09-24 18:26:17 +00:00
|
|
|
class Defaults(Language.Defaults):
|
2016-10-18 16:52:48 +00:00
|
|
|
lex_attr_getters = dict(Language.Defaults.lex_attr_getters)
|
|
|
|
lex_attr_getters[LANG] = lambda text: 'de'
|
2016-05-02 14:04:53 +00:00
|
|
|
|
2017-05-08 13:44:26 +00:00
|
|
|
tokenizer_exceptions = update_exc(BASE_EXCEPTIONS, TOKENIZER_EXCEPTIONS)
|
|
|
|
tag_map = dict(TAG_MAP)
|
|
|
|
stop_words = set(STOP_WORDS)
|
2017-05-17 09:37:48 +00:00
|
|
|
syntax_iterators = dict(SYNTAX_ITERATORS)
|
2017-05-03 09:01:42 +00:00
|
|
|
|
2017-05-03 10:14:42 +00:00
|
|
|
@classmethod
|
|
|
|
def create_lemmatizer(cls, nlp=None):
|
2017-05-08 13:44:26 +00:00
|
|
|
return Lemmatizer(LOOKUP)
|
2017-05-03 10:14:42 +00:00
|
|
|
|
2017-05-03 09:01:42 +00:00
|
|
|
|
2017-05-08 13:44:26 +00:00
|
|
|
__all__ = ['German']
|