2017-06-26 21:40:04 +00:00
|
|
|
# encoding: utf8
|
|
|
|
from __future__ import unicode_literals
|
|
|
|
|
2018-03-27 17:23:02 +00:00
|
|
|
from ...symbols import ORTH, LEMMA, POS, ADV, ADJ, NOUN
|
2017-06-26 21:40:04 +00:00
|
|
|
|
|
|
|
|
|
|
|
_exc = {}
|
|
|
|
|
|
|
|
for exc_data in [
|
|
|
|
{ORTH: "m.in.", LEMMA: "między innymi", POS: ADV},
|
|
|
|
{ORTH: "inż.", LEMMA: "inżynier", POS: NOUN},
|
|
|
|
{ORTH: "mgr.", LEMMA: "magister", POS: NOUN},
|
|
|
|
{ORTH: "tzn.", LEMMA: "to znaczy", POS: ADV},
|
|
|
|
{ORTH: "tj.", LEMMA: "to jest", POS: ADV},
|
2018-03-27 17:23:02 +00:00
|
|
|
{ORTH: "tzw.", LEMMA: "tak zwany", POS: ADJ}]:
|
2017-11-01 22:02:45 +00:00
|
|
|
_exc[exc_data[ORTH]] = [exc_data]
|
2017-06-26 21:40:04 +00:00
|
|
|
|
|
|
|
for orth in [
|
2018-03-27 17:23:02 +00:00
|
|
|
"w.", "r."]:
|
2017-06-26 21:40:04 +00:00
|
|
|
_exc[orth] = [{ORTH: orth}]
|
|
|
|
|
|
|
|
|
2017-10-31 20:05:29 +00:00
|
|
|
TOKENIZER_EXCEPTIONS = _exc
|