spaCy/spacy/lang/id/tokenizer_exceptions.py

56 lines
1.9 KiB
Python
Raw Normal View History

2017-07-23 15:55:05 +00:00
# coding: utf8
from __future__ import unicode_literals
2017-07-26 12:13:47 +00:00
import regex as re
2017-07-24 07:11:51 +00:00
from ._tokenizer_exceptions_list import ID_BASE_EXCEPTIONS
2017-07-26 12:13:47 +00:00
from ..tokenizer_exceptions import URL_PATTERN
from ..char_classes import ALPHA
2017-07-24 07:12:34 +00:00
from ...symbols import ORTH
2017-07-24 07:11:10 +00:00
2017-07-26 12:13:47 +00:00
2017-07-24 07:11:10 +00:00
_exc = {}
2017-07-26 12:13:47 +00:00
for orth in ID_BASE_EXCEPTIONS:
2017-07-24 07:11:10 +00:00
_exc[orth] = [{ORTH: orth}]
2017-07-26 12:13:47 +00:00
orth_title = orth.title()
_exc[orth_title] = [{ORTH: orth_title}]
orth_caps = orth.upper()
_exc[orth_caps] = [{ORTH: orth_caps}]
orth_lower = orth.lower()
_exc[orth_lower] = [{ORTH: orth_lower}]
if '-' in orth:
orth_title = '-'.join([part.title() for part in orth.split('-')])
_exc[orth_title] = [{ORTH: orth_title}]
orth_caps = '-'.join([part.upper() for part in orth.split('-')])
_exc[orth_caps] = [{ORTH: orth_caps}]
2017-07-26 12:28:57 +00:00
_hyphen_prefix = """abdur abdus abou aboul abror abshar abu abubakar abul
aero agri agro ahmadi ahmed air abd abdel abdul ad adz afro al ala ali all
2017-07-26 12:13:47 +00:00
amir an antar anti ar as ash asy at ath az bekas ber best bi co di double
dual duo e eco eks el era ex full hi high i in inter intra ke kontra korona
kuartal lintas m macro makro me mem meng micro mid mikro mini multi neo nge
no non on pan pasca pe pem poli poly post pra pre pro re se self serba seri
2017-07-26 12:28:57 +00:00
sub super t trans ultra un x""".split()
2017-07-26 12:13:47 +00:00
_hyphen_infix = """me-kan me-kannya men-kan men-kannya meng-kannya ke-an
ke-annya di-kan di-kannya de-isasi ber-an berke-an""".split()
2017-07-26 12:28:57 +00:00
_hyphen_suffix = """el"""
2017-07-26 12:13:47 +00:00
_regular_exp = ['^{p}-*$'.format(p=prefix) for prefix in _hyphen_prefix]
2017-07-26 12:28:57 +00:00
_regular_exp = ['^{p}-*$'.format(p=prefix.title()) for prefix in _hyphen_prefix]
_regular_exp += ['^{0}-*-{1}$'.format(*infix.title().split('-')) for infix in _hyphen_infix]
_regular_exp = ['^*-{s}$'.format(s=suffix) for suffix in _hyphen_suffix]
2017-07-26 12:13:47 +00:00
_regular_exp.append(URL_PATTERN)
TOKENIZER_EXCEPTIONS = dict(_exc)
TOKEN_MATCH = re.compile('|'.join('(?:{})'.format(m) for m in _regular_exp), re.IGNORECASE).match