spaCy/spacy/lang/lb/tokenizer_exceptions.py

54 lines
1.4 KiB
Python
Raw Normal View History

from ..tokenizer_exceptions import BASE_EXCEPTIONS
2019-10-18 09:27:38 +00:00
from ...symbols import ORTH, LEMMA, NORM
from ...util import update_exc
# TODO
# treat other apostrophes within words as part of the word: [op d'mannst], [fir d'éischt] (= exceptions)
2019-11-20 12:15:24 +00:00
_exc = {}
# translate / delete what is not necessary
for exc_data in [
2020-02-14 11:02:15 +00:00
{ORTH: "t", LEMMA: "et", NORM: "et"},
{ORTH: "T", LEMMA: "et", NORM: "et"},
{ORTH: "'t", LEMMA: "et", NORM: "et"},
{ORTH: "'T", LEMMA: "et", NORM: "et"},
{ORTH: "wgl.", LEMMA: "wannechgelift", NORM: "wannechgelift"},
{ORTH: "M.", LEMMA: "Monsieur", NORM: "Monsieur"},
2019-10-18 09:27:38 +00:00
{ORTH: "Mme.", LEMMA: "Madame", NORM: "Madame"},
{ORTH: "Dr.", LEMMA: "Dokter", NORM: "Dokter"},
{ORTH: "Tel.", LEMMA: "Telefon", NORM: "Telefon"},
{ORTH: "asw.", LEMMA: "an sou weider", NORM: "an sou weider"},
{ORTH: "etc.", LEMMA: "et cetera", NORM: "et cetera"},
{ORTH: "bzw.", LEMMA: "bezéiungsweis", NORM: "bezéiungsweis"},
2019-12-21 18:04:17 +00:00
{ORTH: "Jan.", LEMMA: "Januar", NORM: "Januar"},
2019-10-18 09:27:38 +00:00
]:
_exc[exc_data[ORTH]] = [exc_data]
# to be extended
for orth in [
2019-10-18 09:27:38 +00:00
"z.B.",
"Dipl.",
"Dr.",
"etc.",
"i.e.",
"o.k.",
"O.K.",
"p.a.",
"p.s.",
"P.S.",
"phil.",
"q.e.d.",
"R.I.P.",
"rer.",
"sen.",
"ë.a.",
"U.S.",
"U.S.A.",
]:
_exc[orth] = [{ORTH: orth}]
TOKENIZER_EXCEPTIONS = update_exc(BASE_EXCEPTIONS, _exc)