diff --git a/.github/contributors/idoshr.md b/.github/contributors/idoshr.md new file mode 100644 index 000000000..26e901530 --- /dev/null +++ b/.github/contributors/idoshr.md @@ -0,0 +1,106 @@ +# spaCy contributor agreement + +This spaCy Contributor Agreement (**"SCA"**) is based on the +[Oracle Contributor Agreement](http://www.oracle.com/technetwork/oca-405177.pdf). +The SCA applies to any contribution that you make to any product or project +managed by us (the **"project"**), and sets out the intellectual property rights +you grant to us in the contributed materials. The term **"us"** shall mean +[ExplosionAI GmbH](https://explosion.ai/legal). The term +**"you"** shall mean the person or entity identified below. + +If you agree to be bound by these terms, fill in the information requested +below and include the filled-in version with your first pull request, under the +folder [`.github/contributors/`](/.github/contributors/). The name of the file +should be your GitHub username, with the extension `.md`. For example, the user +example_user would create the file `.github/contributors/example_user.md`. + +Read this agreement carefully before signing. These terms and conditions +constitute a binding legal agreement. + +## Contributor Agreement + +1. The term "contribution" or "contributed materials" means any source code, +object code, patch, tool, sample, graphic, specification, manual, +documentation, or any other material posted or submitted by you to the project. + +2. With respect to any worldwide copyrights, or copyright applications and +registrations, in your contribution: + + * you hereby assign to us joint ownership, and to the extent that such + assignment is or becomes invalid, ineffective or unenforceable, you hereby + grant to us a perpetual, irrevocable, non-exclusive, worldwide, no-charge, + royalty-free, unrestricted license to exercise all rights under those + copyrights. This includes, at our option, the right to sublicense these same + rights to third parties through multiple levels of sublicensees or other + licensing arrangements; + + * you agree that each of us can do all things in relation to your + contribution as if each of us were the sole owners, and if one of us makes + a derivative work of your contribution, the one who makes the derivative + work (or has it made will be the sole owner of that derivative work; + + * you agree that you will not assert any moral rights in your contribution + against us, our licensees or transferees; + + * you agree that we may register a copyright in your contribution and + exercise all ownership rights associated with it; and + + * you agree that neither of us has any duty to consult with, obtain the + consent of, pay or render an accounting to the other for any use or + distribution of your contribution. + +3. With respect to any patents you own, or that you can license without payment +to any third party, you hereby grant to us a perpetual, irrevocable, +non-exclusive, worldwide, no-charge, royalty-free license to: + + * make, have made, use, sell, offer to sell, import, and otherwise transfer + your contribution in whole or in part, alone or in combination with or + included in any product, work or materials arising out of the project to + which your contribution was submitted, and + + * at our option, to sublicense these same rights to third parties through + multiple levels of sublicensees or other licensing arrangements. + +4. Except as set out above, you keep all right, title, and interest in your +contribution. The rights that you grant to us under these terms are effective +on the date you first submitted a contribution to us, even if your submission +took place before the date you sign these terms. + +5. You covenant, represent, warrant and agree that: + + * Each contribution that you submit is and shall be an original work of + authorship and you can legally grant the rights set out in this SCA; + + * to the best of your knowledge, each contribution will not violate any + third party's copyrights, trademarks, patents, or other intellectual + property rights; and + + * each contribution shall be in compliance with U.S. export control laws and + other applicable export and import laws. You agree to notify us if you + become aware of any circumstance which would make any of the foregoing + representations inaccurate in any respect. We may publicly disclose your + participation in the project, including the fact that you have signed the SCA. + +6. This SCA is governed by the laws of the State of California and applicable +U.S. Federal law. Any choice of law rules will not apply. + +7. Please place an “x” on one of the applicable statement below. Please do NOT +mark both statements: + + * [x] I am signing on behalf of myself as an individual and no other person + or entity, including my employer, has or will have rights with respect to my + contributions. + + * [ ] I am signing on behalf of my employer or a legal entity and I have the + actual authority to contractually bind that entity. + +## Contributor Details + +| Field | Entry | +|------------------------------- | -------------------- | +| Name | Ido Shraga | +| Company name (if applicable) | | +| Title or role (if applicable) | | +| Date | 20-09-2020 | +| GitHub username | idoshr | +| Website (optional) | | diff --git a/spacy/lang/en/lex_attrs.py b/spacy/lang/en/lex_attrs.py index bd60b057b..4f6988bd5 100644 --- a/spacy/lang/en/lex_attrs.py +++ b/spacy/lang/en/lex_attrs.py @@ -3,7 +3,6 @@ from __future__ import unicode_literals from ...attrs import LIKE_NUM - _num_words = [ "zero", "one", @@ -92,7 +91,7 @@ def like_num(text): num, denom = text.split("/") if num.isdigit() and denom.isdigit(): return True - + text_lower = text.lower() if text_lower in _num_words: return True diff --git a/spacy/lang/he/__init__.py b/spacy/lang/he/__init__.py index 411cdf107..922f61462 100644 --- a/spacy/lang/he/__init__.py +++ b/spacy/lang/he/__init__.py @@ -4,6 +4,7 @@ from __future__ import unicode_literals from .stop_words import STOP_WORDS from ..tokenizer_exceptions import BASE_EXCEPTIONS +from .lex_attrs import LEX_ATTRS from ...language import Language from ...attrs import LANG from ...util import update_exc @@ -11,6 +12,7 @@ from ...util import update_exc class HebrewDefaults(Language.Defaults): lex_attr_getters = dict(Language.Defaults.lex_attr_getters) + lex_attr_getters.update(LEX_ATTRS) lex_attr_getters[LANG] = lambda text: "he" tokenizer_exceptions = update_exc(BASE_EXCEPTIONS) stop_words = STOP_WORDS diff --git a/spacy/lang/he/lex_attrs.py b/spacy/lang/he/lex_attrs.py new file mode 100644 index 000000000..9eab93ae4 --- /dev/null +++ b/spacy/lang/he/lex_attrs.py @@ -0,0 +1,97 @@ +# coding: utf8 +from __future__ import unicode_literals + +from ...attrs import LIKE_NUM + +_num_words = [ + "אפס", + "אחד", + "אחת", + "שתיים", + "שתים", + "שניים", + "שנים", + "שלוש", + "שלושה", + "ארבע", + "ארבעה", + "חמש", + "חמישה", + "שש", + "שישה", + "שבע", + "שבעה", + "שמונה", + "תשע", + "תשעה", + "עשר", + "עשרה", + "אחד עשר", + "אחת עשרה", + "שנים עשר", + "שתים עשרה", + "שלושה עשר", + "שלוש עשרה", + "ארבעה עשר", + "ארבע עשרה", + "חמישה עשר", + "חמש עשרה", + "ששה עשר", + "שש עשרה", + "שבעה עשר", + "שבע עשרה", + "שמונה עשר", + "שמונה עשרה", + "תשעה עשר", + "תשע עשרה", + "עשרים", + "שלושים", + "ארבעים", + "חמישים", + "שישים", + "שבעים", + "שמונים", + "תשעים", + "מאה", + "אלף", + "מליון", + "מליארד", + "טריליון", +] + + +_ordinal_words = [ + "ראשון", + "שני", + "שלישי", + "רביעי", + "חמישי", + "שישי", + "שביעי", + "שמיני", + "תשיעי", + "עשירי", +] + +def like_num(text): + if text.startswith(("+", "-", "±", "~")): + text = text[1:] + text = text.replace(",", "").replace(".", "") + if text.isdigit(): + return True + + if text.count("/") == 1: + num, denom = text.split("/") + if num.isdigit() and denom.isdigit(): + return True + + if text in _num_words: + return True + + # CHeck ordinal number + if text in _ordinal_words: + return True + return False + + +LEX_ATTRS = {LIKE_NUM: like_num} diff --git a/spacy/lang/he/stop_words.py b/spacy/lang/he/stop_words.py index a01ec4246..d4ac5e846 100644 --- a/spacy/lang/he/stop_words.py +++ b/spacy/lang/he/stop_words.py @@ -43,7 +43,6 @@ STOP_WORDS = set( בין עם עד -נגר על אל מול @@ -62,7 +61,7 @@ STOP_WORDS = set( עליך עלינו עליכם -לעיכן +עליכן עליהם עליהן כל @@ -71,8 +70,8 @@ STOP_WORDS = set( כך ככה כזה +כזאת זה -זות אותי אותה אותם @@ -95,7 +94,7 @@ STOP_WORDS = set( איתכן יהיה תהיה -היתי +הייתי היתה היה להיות @@ -105,8 +104,6 @@ STOP_WORDS = set( עצמם עצמן עצמנו -עצמהם -עצמהן מי מה איפה @@ -157,6 +154,7 @@ STOP_WORDS = set( לאו אי כלל +בעד נגד אם עם @@ -200,7 +198,6 @@ STOP_WORDS = set( אשר ואילו למרות -אס כמו כפי אז @@ -208,8 +205,8 @@ STOP_WORDS = set( כן לכן לפיכך -מאד עז +מאוד מעט מעטים במידה diff --git a/spacy/lang/lex_attrs.py b/spacy/lang/lex_attrs.py index c9cd82d7b..254f8706d 100644 --- a/spacy/lang/lex_attrs.py +++ b/spacy/lang/lex_attrs.py @@ -10,7 +10,7 @@ from .. import attrs _like_email = re.compile(r"([a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+)").match _tlds = set( "com|org|edu|gov|net|mil|aero|asia|biz|cat|coop|info|int|jobs|mobi|museum|" - "name|pro|tel|travel|xxx|ac|ad|ae|af|ag|ai|al|am|an|ao|aq|ar|as|at|au|aw|" + "name|pro|tel|travel|xyz|icu|xxx|ac|ad|ae|af|ag|ai|al|am|an|ao|aq|ar|as|at|au|aw|" "ax|az|ba|bb|bd|be|bf|bg|bh|bi|bj|bm|bn|bo|br|bs|bt|bv|bw|by|bz|ca|cc|cd|" "cf|cg|ch|ci|ck|cl|cm|cn|co|cr|cs|cu|cv|cx|cy|cz|dd|de|dj|dk|dm|do|dz|ec|" "ee|eg|eh|er|es|et|eu|fi|fj|fk|fm|fo|fr|ga|gb|gd|ge|gf|gg|gh|gi|gl|gm|gn|" diff --git a/spacy/lang/tokenizer_exceptions.py b/spacy/lang/tokenizer_exceptions.py index 67349916b..13140a230 100644 --- a/spacy/lang/tokenizer_exceptions.py +++ b/spacy/lang/tokenizer_exceptions.py @@ -133,6 +133,8 @@ emoticons = set( :-] [: [-: +[= +=] :o) (o: :} @@ -164,6 +166,8 @@ emoticons = set( =| :| :-| +]= +=[ :1 :P :-P diff --git a/spacy/tests/lang/he/test_tokenizer.py b/spacy/tests/lang/he/test_tokenizer.py index f138ec6e7..67ad964d8 100644 --- a/spacy/tests/lang/he/test_tokenizer.py +++ b/spacy/tests/lang/he/test_tokenizer.py @@ -1,5 +1,6 @@ # encoding: utf8 from __future__ import unicode_literals +from spacy.lang.he.lex_attrs import like_num import pytest @@ -42,3 +43,41 @@ def test_he_tokenizer_handles_abbreviation(he_tokenizer, text, expected_tokens): def test_he_tokenizer_handles_punct(he_tokenizer, text, expected_tokens): tokens = he_tokenizer(text) assert expected_tokens == [token.text for token in tokens] + + + +@pytest.mark.parametrize( + "text,match", + [ + ("10", True), + ("1", True), + ("10,000", True), + ("10,00", True), + ("999.0", True), + ("אחד", True), + ("שתיים", True), + ("מליון", True), + ("כלב", False), + (",", False), + ("1/2", True), + ], +) +def test_lex_attrs_like_number(he_tokenizer, text, match): + tokens = he_tokenizer(text) + assert len(tokens) == 1 + assert tokens[0].like_num == match + + +@pytest.mark.parametrize( + "word", + [ + "שלישי", + "מליון", + "עשירי", + "מאה", + "עשר", + "אחד עשר", + ] +) +def test_he_lex_attrs_like_number_for_ordinal(word): + assert like_num(word)