mirror of https://github.com/explosion/spaCy.git
Work on draft Italian tokenizer
This commit is contained in:
parent
7555aa5e63
commit
19c1e83d3d
|
@ -3,7 +3,25 @@ from __future__ import unicode_literals, print_function
|
|||
from os import path
|
||||
|
||||
from ..language import Language
|
||||
from ..attrs import LANG
|
||||
from . import language_data
|
||||
|
||||
|
||||
class Italian(Language):
|
||||
pass
|
||||
class German(Language):
|
||||
lang = 'it'
|
||||
|
||||
class Defaults(Language.Defaults):
|
||||
tokenizer_exceptions = dict(language_data.TOKENIZER_EXCEPTIONS)
|
||||
lex_attr_getters = dict(Language.Defaults.lex_attr_getters)
|
||||
lex_attr_getters[LANG] = lambda text: 'it'
|
||||
|
||||
prefixes = tuple(language_data.TOKENIZER_PREFIXES)
|
||||
|
||||
suffixes = tuple(language_data.TOKENIZER_SUFFIXES)
|
||||
|
||||
infixes = tuple(language_data.TOKENIZER_INFIXES)
|
||||
|
||||
tag_map = dict(language_data.TAG_MAP)
|
||||
|
||||
stop_words = set(language_data.STOP_WORDS)
|
||||
|
||||
|
|
Loading…
Reference in New Issue