diff --git a/.github/contributors/Brixjohn.md b/.github/contributors/Brixjohn.md new file mode 100644 index 000000000..056103fe4 --- /dev/null +++ b/.github/contributors/Brixjohn.md @@ -0,0 +1,106 @@ +# spaCy contributor agreement + +This spaCy Contributor Agreement (**"SCA"**) is based on the +[Oracle Contributor Agreement](http://www.oracle.com/technetwork/oca-405177.pdf). +The SCA applies to any contribution that you make to any product or project +managed by us (the **"project"**), and sets out the intellectual property rights +you grant to us in the contributed materials. The term **"us"** shall mean +[ExplosionAI UG (haftungsbeschränkt)](https://explosion.ai/legal). The term +**"you"** shall mean the person or entity identified below. + +If you agree to be bound by these terms, fill in the information requested +below and include the filled-in version with your first pull request, under the +folder [`.github/contributors/`](/.github/contributors/). The name of the file +should be your GitHub username, with the extension `.md`. For example, the user +example_user would create the file `.github/contributors/example_user.md`. + +Read this agreement carefully before signing. These terms and conditions +constitute a binding legal agreement. + +## Contributor Agreement + +1. The term "contribution" or "contributed materials" means any source code, +object code, patch, tool, sample, graphic, specification, manual, +documentation, or any other material posted or submitted by you to the project. + +2. With respect to any worldwide copyrights, or copyright applications and +registrations, in your contribution: + + * you hereby assign to us joint ownership, and to the extent that such + assignment is or becomes invalid, ineffective or unenforceable, you hereby + grant to us a perpetual, irrevocable, non-exclusive, worldwide, no-charge, + royalty-free, unrestricted license to exercise all rights under those + copyrights. This includes, at our option, the right to sublicense these same + rights to third parties through multiple levels of sublicensees or other + licensing arrangements; + + * you agree that each of us can do all things in relation to your + contribution as if each of us were the sole owners, and if one of us makes + a derivative work of your contribution, the one who makes the derivative + work (or has it made will be the sole owner of that derivative work; + + * you agree that you will not assert any moral rights in your contribution + against us, our licensees or transferees; + + * you agree that we may register a copyright in your contribution and + exercise all ownership rights associated with it; and + + * you agree that neither of us has any duty to consult with, obtain the + consent of, pay or render an accounting to the other for any use or + distribution of your contribution. + +3. With respect to any patents you own, or that you can license without payment +to any third party, you hereby grant to us a perpetual, irrevocable, +non-exclusive, worldwide, no-charge, royalty-free license to: + + * make, have made, use, sell, offer to sell, import, and otherwise transfer + your contribution in whole or in part, alone or in combination with or + included in any product, work or materials arising out of the project to + which your contribution was submitted, and + + * at our option, to sublicense these same rights to third parties through + multiple levels of sublicensees or other licensing arrangements. + +4. Except as set out above, you keep all right, title, and interest in your +contribution. The rights that you grant to us under these terms are effective +on the date you first submitted a contribution to us, even if your submission +took place before the date you sign these terms. + +5. You covenant, represent, warrant and agree that: + + * Each contribution that you submit is and shall be an original work of + authorship and you can legally grant the rights set out in this SCA; + + * to the best of your knowledge, each contribution will not violate any + third party's copyrights, trademarks, patents, or other intellectual + property rights; and + + * each contribution shall be in compliance with U.S. export control laws and + other applicable export and import laws. You agree to notify us if you + become aware of any circumstance which would make any of the foregoing + representations inaccurate in any respect. We may publicly disclose your + participation in the project, including the fact that you have signed the SCA. + +6. This SCA is governed by the laws of the State of California and applicable +U.S. Federal law. Any choice of law rules will not apply. + +7. Please place an “x” on one of the applicable statement below. Please do NOT +mark both statements: + + * [ ] I am signing on behalf of myself as an individual and no other person + or entity, including my employer, has or will have rights with respect to my + contributions. + + * [X] I am signing on behalf of my employer or a legal entity and I have the + actual authority to contractually bind that entity. + +## Contributor Details + +| Field | Entry | +|------------------------------- | -------------------- | +| Name | Brixter John Lumabi | +| Company name (if applicable) | Stratpoint | +| Title or role (if applicable) | Software Developer | +| Date | 18 December 2018 | +| GitHub username | Brixjohn | +| Website (optional) | | diff --git a/spacy/lang/tl/__init__.py b/spacy/lang/tl/__init__.py new file mode 100644 index 000000000..407e24bc3 --- /dev/null +++ b/spacy/lang/tl/__init__.py @@ -0,0 +1,73 @@ +# coding: utf8 +from __future__ import unicode_literals + +from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS +from .stop_words import STOP_WORDS +from .lex_attrs import LEX_ATTRS + +# uncomment if files are available +# from .norm_exceptions import NORM_EXCEPTIONS +from .tag_map import TAG_MAP +# from .morph_rules import MORPH_RULES + +# uncomment if lookup-based lemmatizer is available +from .lemmatizer import LOOKUP +# from ...lemmatizerlookup import Lemmatizer + +from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS +from ..tokenizer_exceptions import BASE_EXCEPTIONS +from ..norm_exceptions import BASE_NORMS +from ...language import Language +from ...attrs import LANG, NORM +from ...util import update_exc, add_lookups + +def _return_tl(_): + return 'tl' + + +# Create a Language subclass +# Documentation: https://spacy.io/docs/usage/adding-languages + +# This file should be placed in spacy/lang/xx (ISO code of language). +# Before submitting a pull request, make sure the remove all comments from the +# language data files, and run at least the basic tokenizer tests. Simply add the +# language ID to the list of languages in spacy/tests/conftest.py to include it +# in the basic tokenizer sanity tests. You can optionally add a fixture for the +# language's tokenizer and add more specific tests. For more info, see the +# tests documentation: https://github.com/explosion/spaCy/tree/master/spacy/tests + + +class TagalogDefaults(Language.Defaults): + lex_attr_getters = dict(Language.Defaults.lex_attr_getters) + lex_attr_getters[LANG] = _return_tl # ISO code + # add more norm exception dictionaries here + lex_attr_getters[NORM] = add_lookups(Language.Defaults.lex_attr_getters[NORM], BASE_NORMS) + + # overwrite functions for lexical attributes + lex_attr_getters.update(LEX_ATTRS) + + # add custom tokenizer exceptions to base exceptions + tokenizer_exceptions = update_exc(BASE_EXCEPTIONS, TOKENIZER_EXCEPTIONS) + + # add stop words + stop_words = STOP_WORDS + + # if available: add tag map + # tag_map = dict(TAG_MAP) + + # if available: add morph rules + # morph_rules = dict(MORPH_RULES) + + # if available: add lookup lemmatizer + # @classmethod + # def create_lemmatizer(cls, nlp=None): + # return Lemmatizer(LOOKUP) + + +class Tagalog(Language): + lang = 'tl' # ISO code + Defaults = TagalogDefaults # set Defaults to custom language defaults + + +# set default export – this allows the language class to be lazy-loaded +__all__ = ['Tagalog'] diff --git a/spacy/lang/tl/lemmatizer.py b/spacy/lang/tl/lemmatizer.py new file mode 100644 index 000000000..57045a6a4 --- /dev/null +++ b/spacy/lang/tl/lemmatizer.py @@ -0,0 +1,18 @@ +# coding: utf8 +from __future__ import unicode_literals + + +# Adding a lemmatizer lookup table +# Documentation: https://spacy.io/docs/usage/adding-languages#lemmatizer +# Entries should be added in the following format: + + +LOOKUP = { + "kaugnayan": "ugnay", + "sangkatauhan": "tao", + "kanayunan": "nayon", + "pandaigdigan": "daigdig", + "kasaysayan": "saysay", + "kabayanihan": "bayani", + "karuwagan": "duwag" +} diff --git a/spacy/lang/tl/lex_attrs.py b/spacy/lang/tl/lex_attrs.py new file mode 100644 index 000000000..ba396b48e --- /dev/null +++ b/spacy/lang/tl/lex_attrs.py @@ -0,0 +1,43 @@ +# coding: utf8 +from __future__ import unicode_literals + +# import the symbols for the attrs you want to overwrite +from ...attrs import LIKE_NUM + + +# Overwriting functions for lexical attributes +# Documentation: https://localhost:1234/docs/usage/adding-languages#lex-attrs +# Most of these functions, like is_lower or like_url should be language- +# independent. Others, like like_num (which includes both digits and number +# words), requires customisation. + + +# Example: check if token resembles a number + +_num_words = ['sero', 'isa', 'dalawa', 'tatlo', 'apat', 'lima', 'anim', 'pito', + 'walo', 'siyam', 'sampu', 'labing-isa', 'labindalawa', 'labintatlo', 'labing-apat', + 'labinlima', 'labing-anim', 'labimpito', 'labing-walo', 'labinsiyam', 'dalawampu', + 'tatlumpu', 'apatnapu', 'limampu', 'animnapu', 'pitumpu', 'walumpu', 'siyamnapu', + 'daan', 'libo', 'milyon', 'bilyon', 'trilyon', 'quadrilyon', + 'gajilyon', 'bazilyon'] + + +def like_num(text): + text = text.replace(',', '').replace('.', '') + if text.isdigit(): + return True + if text.count('/') == 1: + num, denom = text.split('/') + if num.isdigit() and denom.isdigit(): + return True + if text in _num_words: + return True + return False + + +# Create dictionary of functions to overwrite. The default lex_attr_getters are +# updated with this one, so only the functions defined here are overwritten. + +LEX_ATTRS = { + LIKE_NUM: like_num +} diff --git a/spacy/lang/tl/stop_words.py b/spacy/lang/tl/stop_words.py new file mode 100644 index 000000000..bfda89eb4 --- /dev/null +++ b/spacy/lang/tl/stop_words.py @@ -0,0 +1,162 @@ +# encoding: utf8 +from __future__ import unicode_literals + + +# Add stop words +# Documentation: https://spacy.io/docs/usage/adding-languages#stop-words +# To improve readability, words should be ordered alphabetically and separated +# by spaces and newlines. When adding stop words from an online source, always +# include the link in a comment. Make sure to proofread and double-check the +# words – lists available online are often known to contain mistakes. + +# data from https://github.com/stopwords-iso/stopwords-tl/blob/master/stopwords-tl.txt + +STOP_WORDS = set(""" + akin + aking + ako + alin + am + amin + aming + ang + ano + anumang + apat + at + atin + ating + ay + bababa + bago + bakit + bawat + bilang + dahil + dalawa + dapat + din + dito + doon + gagawin + gayunman + ginagawa + ginawa + ginawang + gumawa + gusto + habang + hanggang + hindi + huwag + iba + ibaba + ibabaw + ibig + ikaw + ilagay + ilalim + ilan + inyong + isa + isang + itaas + ito + iyo + iyon + iyong + ka + kahit + kailangan + kailanman + kami + kanila + kanilang + kanino + kanya + kanyang + kapag + kapwa + karamihan + katiyakan + katulad + kaya + kaysa + ko + kong + kulang + kumuha + kung + laban + lahat + lamang + likod + lima + maaari + maaaring + maging + mahusay + makita + marami + marapat + masyado + may + mayroon + mga + minsan + mismo + mula + muli + na + nabanggit + naging + nagkaroon + nais + nakita + namin + napaka + narito + nasaan + ng + ngayon + ni + nila + nilang + nito + niya + niyang + noon + o + pa + paano + pababa + paggawa + pagitan + pagkakaroon + pagkatapos + palabas + pamamagitan + panahon + pangalawa + para + paraan + pareho + pataas + pero + pumunta + pumupunta + sa + saan + sabi + sabihin + sarili + sila + sino + siya + tatlo + tayo + tulad + tungkol + una + walang +""".split()) diff --git a/spacy/lang/tl/tag_map.py b/spacy/lang/tl/tag_map.py new file mode 100644 index 000000000..38476c6f6 --- /dev/null +++ b/spacy/lang/tl/tag_map.py @@ -0,0 +1,36 @@ +# coding: utf8 +from __future__ import unicode_literals + +from ...symbols import POS, ADV, NOUN, ADP, PRON, SCONJ, PROPN, DET, SYM, INTJ +from ...symbols import PUNCT, NUM, AUX, X, CONJ, ADJ, VERB, PART, SPACE, CCONJ + + +# Add a tag map +# Documentation: https://spacy.io/docs/usage/adding-languages#tag-map +# Universal Dependencies: http://universaldependencies.org/u/pos/all.html +# The keys of the tag map should be strings in your tag set. The dictionary must +# have an entry POS whose value is one of the Universal Dependencies tags. +# Optionally, you can also include morphological features or other attributes. + + +TAG_MAP = { + "ADV": {POS: ADV}, + "NOUN": {POS: NOUN}, + "ADP": {POS: ADP}, + "PRON": {POS: PRON}, + "SCONJ": {POS: SCONJ}, + "PROPN": {POS: PROPN}, + "DET": {POS: DET}, + "SYM": {POS: SYM}, + "INTJ": {POS: INTJ}, + "PUNCT": {POS: PUNCT}, + "NUM": {POS: NUM}, + "AUX": {POS: AUX}, + "X": {POS: X}, + "CONJ": {POS: CONJ}, + "CCONJ": {POS: CCONJ}, + "ADJ": {POS: ADJ}, + "VERB": {POS: VERB}, + "PART": {POS: PART}, + "SP": {POS: SPACE} +} diff --git a/spacy/lang/tl/tokenizer_exceptions.py b/spacy/lang/tl/tokenizer_exceptions.py new file mode 100644 index 000000000..1df8d6796 --- /dev/null +++ b/spacy/lang/tl/tokenizer_exceptions.py @@ -0,0 +1,48 @@ +# coding: utf8 +from __future__ import unicode_literals + +# import symbols – if you need to use more, add them here +from ...symbols import ORTH, LEMMA, TAG, NORM, ADP, DET + + +# Add tokenizer exceptions +# Documentation: https://spacy.io/docs/usage/adding-languages#tokenizer-exceptions +# Feel free to use custom logic to generate repetitive exceptions more efficiently. +# If an exception is split into more than one token, the ORTH values combined always +# need to match the original string. + +# Exceptions should be added in the following format: + +_exc = { + "tayo'y": [ + {ORTH: "tayo", LEMMA: "tayo"}, + {ORTH: "'y", LEMMA: "ay"}], + "isa'y": [ + {ORTH: "isa", LEMMA: "isa"}, + {ORTH: "'y", LEMMA: "ay"}], + "baya'y": [ + {ORTH: "baya", LEMMA: "bayan"}, + {ORTH: "'y", LEMMA: "ay"}], + "sa'yo": [ + {ORTH: "sa", LEMMA: "sa"}, + {ORTH: "'yo", LEMMA: "iyo"}], + "ano'ng": [ + {ORTH: "ano", LEMMA: "ano"}, + {ORTH: "'ng", LEMMA: "ang"}], + "siya'y": [ + {ORTH: "siya", LEMMA: "siya"}, + {ORTH: "'y", LEMMA: "ay"}], + "nawa'y": [ + {ORTH: "nawa", LEMMA: "nawa"}, + {ORTH: "'y", LEMMA: "ay"}], + "papa'no": [ + {ORTH: "papa'no", LEMMA: "papaano"}], + "'di": [ + {ORTH: "'di", LEMMA: "hindi"}] +} + + +# To keep things clean and readable, it's recommended to only declare the +# TOKENIZER_EXCEPTIONS at the bottom: + +TOKENIZER_EXCEPTIONS = _exc