spaCy/spacy/it/__init__.py

42 lines
1.2 KiB
Python
Raw Normal View History

2016-12-17 11:25:44 +00:00
# encoding: utf8
2015-09-06 20:10:37 +00:00
from __future__ import unicode_literals, print_function
from os import path
from ..language import Language
2016-11-02 18:56:32 +00:00
from . import language_data
2016-12-08 18:52:18 +00:00
from ..attrs import LANG
from ..language_data import update_exc
from ..language_data import strings_to_exc
2016-12-08 18:52:18 +00:00
from ..language_data import EMOTICONS
2016-12-17 11:33:09 +00:00
2016-12-08 18:52:18 +00:00
from .language_data import ORTH_ONLY
TOKENIZER_EXCEPTIONS = dict(language_data.TOKENIZER_EXCEPTIONS)
TOKENIZER_PREFIXES = tuple(language_data.TOKENIZER_PREFIXES)
TOKENIZER_SUFFIXES = tuple(language_data.TOKENIZER_SUFFIXES)
TOKENIZER_INFIXES = tuple(language_data.TOKENIZER_INFIXES)
TAG_MAP = dict(language_data.TAG_MAP)
STOP_WORDS = set(language_data.STOP_WORDS)
update_exc(TOKENIZER_EXCEPTIONS, strings_to_exc(EMOTICONS))
update_exc(TOKENIZER_EXCEPTIONS, strings_to_exc(ORTH_ONLY))
2015-09-06 20:10:37 +00:00
2016-11-02 19:03:24 +00:00
class Italian(Language):
2016-11-02 18:56:32 +00:00
lang = 'it'
2016-12-08 18:52:18 +00:00
2016-11-02 18:56:32 +00:00
class Defaults(Language.Defaults):
lex_attr_getters = dict(Language.Defaults.lex_attr_getters)
lex_attr_getters[LANG] = lambda text: 'it'
2016-12-08 18:52:18 +00:00
tokenizer_exceptions = TOKENIZER_EXCEPTIONS
prefixes = TOKENIZER_PREFIXES
suffixes = TOKENIZER_SUFFIXES
infixes = TOKENIZER_INFIXES
tag_map = TAG_MAP
stop_words = STOP_WORDS