2017-03-31 14:52:55 +00:00
|
|
|
# coding: utf8
|
|
|
|
from __future__ import unicode_literals
|
|
|
|
|
2017-05-08 20:29:04 +00:00
|
|
|
from ...symbols import ORTH, LEMMA, NORM
|
|
|
|
from ...deprecated import PRON_LEMMA
|
2017-03-31 14:52:55 +00:00
|
|
|
|
|
|
|
|
2017-05-08 13:52:01 +00:00
|
|
|
_exc = {
|
2017-03-31 14:52:55 +00:00
|
|
|
"às": [
|
|
|
|
{ORTH: "à", NORM: "a"},
|
2017-05-08 13:52:01 +00:00
|
|
|
{ORTH: "s", NORM: "as"}],
|
|
|
|
|
2017-03-31 14:52:55 +00:00
|
|
|
"ao": [
|
|
|
|
{ORTH: "a"},
|
2017-05-08 13:52:01 +00:00
|
|
|
{ORTH: "o"}],
|
|
|
|
|
2017-03-31 14:52:55 +00:00
|
|
|
"aos": [
|
|
|
|
{ORTH: "a"},
|
2017-05-08 13:52:01 +00:00
|
|
|
{ORTH: "os"}],
|
|
|
|
|
2017-03-31 14:52:55 +00:00
|
|
|
"àquele": [
|
|
|
|
{ORTH: "à", NORM: "a"},
|
2017-05-08 13:52:01 +00:00
|
|
|
{ORTH: "quele", NORM: "aquele"}],
|
|
|
|
|
2017-03-31 14:52:55 +00:00
|
|
|
"àquela": [
|
|
|
|
{ORTH: "à", NORM: "a"},
|
2017-05-08 13:52:01 +00:00
|
|
|
{ORTH: "quela", NORM: "aquela"}],
|
|
|
|
|
2017-03-31 14:52:55 +00:00
|
|
|
"àqueles": [
|
|
|
|
{ORTH: "à", NORM: "a"},
|
2017-05-08 13:52:01 +00:00
|
|
|
{ORTH: "queles", NORM: "aqueles"}],
|
|
|
|
|
2017-03-31 14:52:55 +00:00
|
|
|
"àquelas": [
|
|
|
|
{ORTH: "à", NORM: "a"},
|
2017-05-08 13:52:01 +00:00
|
|
|
{ORTH: "quelas", NORM: "aquelas"}],
|
|
|
|
|
2017-03-31 14:52:55 +00:00
|
|
|
"àquilo": [
|
|
|
|
{ORTH: "à", NORM: "a"},
|
2017-05-08 13:52:01 +00:00
|
|
|
{ORTH: "quilo", NORM: "aquilo"}],
|
|
|
|
|
2017-03-31 14:52:55 +00:00
|
|
|
"aonde": [
|
|
|
|
{ORTH: "a"},
|
2017-05-08 13:52:01 +00:00
|
|
|
{ORTH: "onde"}]
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
# Contractions
|
|
|
|
|
|
|
|
_per_pron = ["ele", "ela", "eles", "elas"]
|
|
|
|
_dem_pron = ["este", "esta", "estes", "estas", "isto", "esse", "essa", "esses",
|
|
|
|
"essas", "isso", "aquele", "aquela", "aqueles", "aquelas", "aquilo"]
|
|
|
|
_und_pron = ["outro", "outra", "outros", "outras"]
|
|
|
|
_adv = ["aqui", "aí", "ali", "além"]
|
|
|
|
|
|
|
|
|
|
|
|
for orth in _per_pron + _dem_pron + _und_pron + _adv:
|
|
|
|
_exc["d" + orth] = [
|
|
|
|
{ORTH: "d", NORM: "de"},
|
|
|
|
{ORTH: orth}]
|
|
|
|
|
|
|
|
for orth in _per_pron + _dem_pron + _und_pron:
|
|
|
|
_exc["n" + orth] = [
|
|
|
|
{ORTH: "n", NORM: "em"},
|
|
|
|
{ORTH: orth}]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
for orth in [
|
|
|
|
"Adm.", "Dr.", "e.g.", "E.g.", "E.G.", "Gen.", "Gov.", "i.e.", "I.e.",
|
|
|
|
"I.E.", "Jr.", "Ltd.", "p.m.", "Ph.D.", "Rep.", "Rev.", "Sen.", "Sr.",
|
|
|
|
"Sra.", "vs."]:
|
|
|
|
_exc[orth] = [{ORTH: orth}]
|
|
|
|
|
|
|
|
|
|
|
|
TOKENIZER_EXCEPTIONS = dict(_exc)
|