From ada4712250a8e02b13a2bbf06f0e150348494a6d Mon Sep 17 00:00:00 2001 From: Felix Sonntag Date: Sun, 19 Nov 2017 16:14:37 +0100 Subject: [PATCH 1/4] Add contributer aggreement --- .github/contributors/fsonntag.md | 106 +++++++++++++++++++++++++++++++ 1 file changed, 106 insertions(+) create mode 100644 .github/contributors/fsonntag.md diff --git a/.github/contributors/fsonntag.md b/.github/contributors/fsonntag.md new file mode 100644 index 000000000..0d84015bb --- /dev/null +++ b/.github/contributors/fsonntag.md @@ -0,0 +1,106 @@ +# spaCy contributor agreement + +This spaCy Contributor Agreement (**"SCA"**) is based on the +[Oracle Contributor Agreement](http://www.oracle.com/technetwork/oca-405177.pdf). +The SCA applies to any contribution that you make to any product or project +managed by us (the **"project"**), and sets out the intellectual property rights +you grant to us in the contributed materials. The term **"us"** shall mean +[ExplosionAI UG (haftungsbeschränkt)](https://explosion.ai/legal). The term +**"you"** shall mean the person or entity identified below. + +If you agree to be bound by these terms, fill in the information requested +below and include the filled-in version with your first pull request, under the +folder [`.github/contributors/`](/.github/contributors/). The name of the file +should be your GitHub username, with the extension `.md`. For example, the user +example_user would create the file `.github/contributors/example_user.md`. + +Read this agreement carefully before signing. These terms and conditions +constitute a binding legal agreement. + +## Contributor Agreement + +1. The term "contribution" or "contributed materials" means any source code, +object code, patch, tool, sample, graphic, specification, manual, +documentation, or any other material posted or submitted by you to the project. + +2. With respect to any worldwide copyrights, or copyright applications and +registrations, in your contribution: + + * you hereby assign to us joint ownership, and to the extent that such + assignment is or becomes invalid, ineffective or unenforceable, you hereby + grant to us a perpetual, irrevocable, non-exclusive, worldwide, no-charge, + royalty-free, unrestricted license to exercise all rights under those + copyrights. This includes, at our option, the right to sublicense these same + rights to third parties through multiple levels of sublicensees or other + licensing arrangements; + + * you agree that each of us can do all things in relation to your + contribution as if each of us were the sole owners, and if one of us makes + a derivative work of your contribution, the one who makes the derivative + work (or has it made will be the sole owner of that derivative work; + + * you agree that you will not assert any moral rights in your contribution + against us, our licensees or transferees; + + * you agree that we may register a copyright in your contribution and + exercise all ownership rights associated with it; and + + * you agree that neither of us has any duty to consult with, obtain the + consent of, pay or render an accounting to the other for any use or + distribution of your contribution. + +3. With respect to any patents you own, or that you can license without payment +to any third party, you hereby grant to us a perpetual, irrevocable, +non-exclusive, worldwide, no-charge, royalty-free license to: + + * make, have made, use, sell, offer to sell, import, and otherwise transfer + your contribution in whole or in part, alone or in combination with or + included in any product, work or materials arising out of the project to + which your contribution was submitted, and + + * at our option, to sublicense these same rights to third parties through + multiple levels of sublicensees or other licensing arrangements. + +4. Except as set out above, you keep all right, title, and interest in your +contribution. The rights that you grant to us under these terms are effective +on the date you first submitted a contribution to us, even if your submission +took place before the date you sign these terms. + +5. You covenant, represent, warrant and agree that: + + * Each contribution that you submit is and shall be an original work of + authorship and you can legally grant the rights set out in this SCA; + + * to the best of your knowledge, each contribution will not violate any + third party's copyrights, trademarks, patents, or other intellectual + property rights; and + + * each contribution shall be in compliance with U.S. export control laws and + other applicable export and import laws. You agree to notify us if you + become aware of any circumstance which would make any of the foregoing + representations inaccurate in any respect. We may publicly disclose your + participation in the project, including the fact that you have signed the SCA. + +6. This SCA is governed by the laws of the State of California and applicable +U.S. Federal law. Any choice of law rules will not apply. + +7. Please place an “x” on one of the applicable statement below. Please do NOT +mark both statements: + + * [ ] I am signing on behalf of myself as an individual and no other person + or entity, including my employer, has or will have rights with respect to my + contributions. + + * [x] I am signing on behalf of my employer or a legal entity and I have the + actual authority to contractually bind that entity. + +## Contributor Details + +| Field | Entry | +|------------------------------- | -------------------- | +| Name | Felix Sonntag | +| Company name (if applicable) | - | +| Title or role (if applicable) | Student | +| Date | 2017-11-19 | +| GitHub username | fsonntag | +| Website (optional) | http://github.com/fsonntag/ | From 8be339230238282bd4df1d235c013f6916855a8d Mon Sep 17 00:00:00 2001 From: Felix Sonntag Date: Sun, 19 Nov 2017 16:29:04 +0100 Subject: [PATCH 2/4] Added regression text for 1494 --- spacy/tests/regression/test_issue1494.py | 27 ++++++++++++++++++++++++ 1 file changed, 27 insertions(+) create mode 100644 spacy/tests/regression/test_issue1494.py diff --git a/spacy/tests/regression/test_issue1494.py b/spacy/tests/regression/test_issue1494.py new file mode 100644 index 000000000..841d2a11d --- /dev/null +++ b/spacy/tests/regression/test_issue1494.py @@ -0,0 +1,27 @@ +# coding: utf8 +from __future__ import unicode_literals + +import pytest +import re + +from ...lang.en import English +from ...tokenizer import Tokenizer + + +def test_issue1494(): + infix_re = re.compile(r'''[^a-z]''') + text_to_tokenize = 'token 123test' + expected_tokens = ['token', '1', '2', '3', 'test'] + + def my_tokenizer(nlp): + return Tokenizer(nlp.vocab, + {}, + infix_finditer=infix_re.finditer + ) + + nlp = English() + + nlp.tokenizer = my_tokenizer(nlp) + tokenized_words = [token.text for token in nlp(text_to_tokenize)] + print(tokenized_words) + assert tokenized_words == expected_tokens From 33b0f86de3a0c1f45ae4c2e7abdad4ac4ef316d1 Mon Sep 17 00:00:00 2001 From: Felix Sonntag Date: Sun, 19 Nov 2017 15:14:40 +0100 Subject: [PATCH 3/4] Changed tokenizer to add infix when infix_start is offset --- spacy/tokenizer.pyx | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/spacy/tokenizer.pyx b/spacy/tokenizer.pyx index 095fbf4ad..600c81fff 100644 --- a/spacy/tokenizer.pyx +++ b/spacy/tokenizer.pyx @@ -241,11 +241,10 @@ cdef class Tokenizer: for match in matches: infix_start = match.start() infix_end = match.end() - if infix_start == start: - continue - span = string[start:infix_start] - tokens.push_back(self.vocab.get(tokens.mem, span), False) + if infix_start != start: + span = string[start:infix_start] + tokens.push_back(self.vocab.get(tokens.mem, span), False) if infix_start != infix_end: # If infix_start != infix_end, it means the infix From 724ae7dc55b9e627534043e1bdbd8ff85cfd18c2 Mon Sep 17 00:00:00 2001 From: Felix Sonntag Date: Tue, 28 Nov 2017 17:17:12 +0100 Subject: [PATCH 4/4] Fixed issue of infix capturing prefixes --- spacy/tests/regression/test_issue1494.py | 22 +++++++++++++++++----- spacy/tokenizer.pyx | 4 ++++ 2 files changed, 21 insertions(+), 5 deletions(-) diff --git a/spacy/tests/regression/test_issue1494.py b/spacy/tests/regression/test_issue1494.py index 841d2a11d..693e81e81 100644 --- a/spacy/tests/regression/test_issue1494.py +++ b/spacy/tests/regression/test_issue1494.py @@ -10,8 +10,14 @@ from ...tokenizer import Tokenizer def test_issue1494(): infix_re = re.compile(r'''[^a-z]''') - text_to_tokenize = 'token 123test' - expected_tokens = ['token', '1', '2', '3', 'test'] + text_to_tokenize1 = 'token 123test' + expected_tokens1 = ['token', '1', '2', '3', 'test'] + + text_to_tokenize2 = 'token 1test' + expected_tokens2 = ['token', '1test'] + + text_to_tokenize3 = 'hello...test' + expected_tokens3 = ['hello', '.', '.', '.', 'test'] def my_tokenizer(nlp): return Tokenizer(nlp.vocab, @@ -22,6 +28,12 @@ def test_issue1494(): nlp = English() nlp.tokenizer = my_tokenizer(nlp) - tokenized_words = [token.text for token in nlp(text_to_tokenize)] - print(tokenized_words) - assert tokenized_words == expected_tokens + + tokenized_words1 = [token.text for token in nlp(text_to_tokenize1)] + assert tokenized_words1 == expected_tokens1 + + tokenized_words2 = [token.text for token in nlp(text_to_tokenize2)] + assert tokenized_words2 == expected_tokens2 + + tokenized_words3 = [token.text for token in nlp(text_to_tokenize3)] + assert tokenized_words3 == expected_tokens3 diff --git a/spacy/tokenizer.pyx b/spacy/tokenizer.pyx index 600c81fff..98019e6ef 100644 --- a/spacy/tokenizer.pyx +++ b/spacy/tokenizer.pyx @@ -238,10 +238,14 @@ cdef class Tokenizer: # let's say we have dyn-o-mite-dave - the regex finds the # start and end positions of the hyphens start = 0 + start_before_infixes = start for match in matches: infix_start = match.start() infix_end = match.end() + if infix_start == start_before_infixes: + continue + if infix_start != start: span = string[start:infix_start] tokens.push_back(self.vocab.get(tokens.mem, span), False)