2017-03-12 12:07:28 +00:00
|
|
|
# coding: utf8
|
2016-12-07 22:07:43 +00:00
|
|
|
from __future__ import unicode_literals, print_function
|
|
|
|
|
2017-01-23 20:27:49 +00:00
|
|
|
from .tokenizer_exceptions import TOKEN_MATCH
|
2016-12-20 21:28:20 +00:00
|
|
|
from .language_data import *
|
2016-12-07 22:07:43 +00:00
|
|
|
from ..attrs import LANG
|
|
|
|
from ..language import Language
|
|
|
|
|
|
|
|
|
|
|
|
class Hungarian(Language):
|
|
|
|
lang = 'hu'
|
|
|
|
|
|
|
|
class Defaults(Language.Defaults):
|
2016-12-20 21:28:20 +00:00
|
|
|
tokenizer_exceptions = dict(TOKENIZER_EXCEPTIONS)
|
2016-12-07 22:07:43 +00:00
|
|
|
lex_attr_getters = dict(Language.Defaults.lex_attr_getters)
|
|
|
|
lex_attr_getters[LANG] = lambda text: 'hu'
|
|
|
|
|
2016-12-20 21:28:20 +00:00
|
|
|
prefixes = tuple(TOKENIZER_PREFIXES)
|
2016-12-07 22:07:43 +00:00
|
|
|
|
2016-12-20 21:28:20 +00:00
|
|
|
suffixes = tuple(TOKENIZER_SUFFIXES)
|
2016-12-07 22:07:43 +00:00
|
|
|
|
2016-12-20 21:28:20 +00:00
|
|
|
infixes = tuple(TOKENIZER_INFIXES)
|
2016-12-07 22:07:43 +00:00
|
|
|
|
2016-12-20 21:28:20 +00:00
|
|
|
stop_words = set(STOP_WORDS)
|
2017-01-14 14:51:59 +00:00
|
|
|
|
|
|
|
token_match = TOKEN_MATCH
|