From f96af8526a927d2b18f70045d623fe0f3e2ec79d Mon Sep 17 00:00:00 2001 From: Ines Montani Date: Sat, 11 May 2019 23:03:56 +0200 Subject: [PATCH 01/31] Merge branch 'spacy.io' [ci skip] --- website/docs/api/top-level.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/docs/api/top-level.md b/website/docs/api/top-level.md index e687cefa8..9d5bdc527 100644 --- a/website/docs/api/top-level.md +++ b/website/docs/api/top-level.md @@ -351,7 +351,7 @@ the two-letter language code. | `name` | unicode | Two-letter language code, e.g. `'en'`. | | `cls` | `Language` | The language class, e.g. `English`. | -### util.lang_class_is_loaded (#util.lang_class_is_loaded tag="function" new="2.1") +### util.lang_class_is_loaded {#util.lang_class_is_loaded tag="function" new="2.1"} Check whether a `Language` class is already loaded. `Language` classes are loaded lazily, to avoid expensive setup code associated with the language data. From 8baff1c7c0418b26bf690756e1cac81ecf655816 Mon Sep 17 00:00:00 2001 From: Ines Montani Date: Sun, 12 May 2019 00:53:11 +0200 Subject: [PATCH 02/31] =?UTF-8?q?=F0=9F=92=AB=20Improve=20introspection=20?= =?UTF-8?q?of=20custom=20extension=20attributes=20(#3729)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Add custom __dir__ to Underscore (see #3707) * Make sure custom extension methods keep their docstrings (see #3707) * Improve tests * Prepend note on partial to docstring (see #3707) * Remove print statement * Handle cases where docstring is None --- spacy/tests/doc/test_underscore.py | 25 +++++++++++++++++++++++++ spacy/tokens/underscore.py | 16 +++++++++++++++- 2 files changed, 40 insertions(+), 1 deletion(-) diff --git a/spacy/tests/doc/test_underscore.py b/spacy/tests/doc/test_underscore.py index 8f47157fa..2877bfeea 100644 --- a/spacy/tests/doc/test_underscore.py +++ b/spacy/tests/doc/test_underscore.py @@ -140,3 +140,28 @@ def test_underscore_mutable_defaults_dict(en_vocab): assert len(token1._.mutable) == 2 assert token1._.mutable["x"] == ["y"] assert len(token2._.mutable) == 0 + + +def test_underscore_dir(en_vocab): + """Test that dir() correctly returns extension attributes. This enables + things like tab-completion for the attributes in doc._.""" + Doc.set_extension("test_dir", default=None) + doc = Doc(en_vocab, words=["hello", "world"]) + assert "_" in dir(doc) + assert "test_dir" in dir(doc._) + assert "test_dir" not in dir(doc[0]._) + assert "test_dir" not in dir(doc[0:2]._) + + +def test_underscore_docstring(en_vocab): + """Test that docstrings are available for extension methods, even though + they're partials.""" + + def test_method(doc, arg1=1, arg2=2): + """I am a docstring""" + return (arg1, arg2) + + Doc.set_extension("test_docstrings", method=test_method) + doc = Doc(en_vocab, words=["hello", "world"]) + assert test_method.__doc__ == "I am a docstring" + assert doc._.test_docstrings.__doc__.rsplit(". ")[-1] == "I am a docstring" diff --git a/spacy/tokens/underscore.py b/spacy/tokens/underscore.py index ef1d78717..b36fe9294 100644 --- a/spacy/tokens/underscore.py +++ b/spacy/tokens/underscore.py @@ -25,6 +25,11 @@ class Underscore(object): object.__setattr__(self, "_start", start) object.__setattr__(self, "_end", end) + def __dir__(self): + # Hack to enable autocomplete on custom extensions + extensions = list(self._extensions.keys()) + return ["set", "get", "has"] + extensions + def __getattr__(self, name): if name not in self._extensions: raise AttributeError(Errors.E046.format(name=name)) @@ -32,7 +37,16 @@ class Underscore(object): if getter is not None: return getter(self._obj) elif method is not None: - return functools.partial(method, self._obj) + method_partial = functools.partial(method, self._obj) + # Hack to port over docstrings of the original function + # See https://stackoverflow.com/q/27362727/6400719 + method_docstring = method.__doc__ or "" + method_docstring_prefix = ( + "This method is a partial function and its first argument " + "(the object it's called on) will be filled automatically. " + ) + method_partial.__doc__ = method_docstring_prefix + method_docstring + return method_partial else: key = self._get_key(name) if key in self._doc.user_data: From ed18a6efbd0aed54be103921ceedd15157722cb7 Mon Sep 17 00:00:00 2001 From: BreakBB <33514570+BreakBB@users.noreply.github.com> Date: Tue, 14 May 2019 16:59:31 +0200 Subject: [PATCH 03/31] Add check for callable to 'Language.replace_pipe' to fix #3737 (#3741) --- spacy/errors.py | 2 ++ spacy/language.py | 5 +++++ spacy/tests/pipeline/test_pipe_methods.py | 6 ++++-- 3 files changed, 11 insertions(+), 2 deletions(-) diff --git a/spacy/errors.py b/spacy/errors.py index 5f964114e..b28393156 100644 --- a/spacy/errors.py +++ b/spacy/errors.py @@ -383,6 +383,8 @@ class Errors(object): E133 = ("The sum of prior probabilities for alias '{alias}' should not exceed 1, " "but found {sum}.") E134 = ("Alias '{alias}' defined for unknown entity '{entity}'.") + E135 = ("If you meant to replace a built-in component, use `create_pipe`: " + "`nlp.replace_pipe('{name}', nlp.create_pipe('{name}'))`") @add_codes diff --git a/spacy/language.py b/spacy/language.py index 6bd21b0bc..924c0b423 100644 --- a/spacy/language.py +++ b/spacy/language.py @@ -333,6 +333,11 @@ class Language(object): """ if name not in self.pipe_names: raise ValueError(Errors.E001.format(name=name, opts=self.pipe_names)) + if not hasattr(component, "__call__"): + msg = Errors.E003.format(component=repr(component), name=name) + if isinstance(component, basestring_) and component in self.factories: + msg += Errors.E135.format(name=name) + raise ValueError(msg) self.pipeline[self.pipe_names.index(name)] = (name, component) def rename_pipe(self, old_name, new_name): diff --git a/spacy/tests/pipeline/test_pipe_methods.py b/spacy/tests/pipeline/test_pipe_methods.py index d36201718..a0870784c 100644 --- a/spacy/tests/pipeline/test_pipe_methods.py +++ b/spacy/tests/pipeline/test_pipe_methods.py @@ -52,11 +52,13 @@ def test_get_pipe(nlp, name): assert nlp.get_pipe(name) == new_pipe -@pytest.mark.parametrize("name,replacement", [("my_component", lambda doc: doc)]) -def test_replace_pipe(nlp, name, replacement): +@pytest.mark.parametrize("name,replacement,not_callable", [("my_component", lambda doc: doc, {})]) +def test_replace_pipe(nlp, name, replacement, not_callable): with pytest.raises(ValueError): nlp.replace_pipe(name, new_pipe) nlp.add_pipe(new_pipe, name=name) + with pytest.raises(ValueError): + nlp.replace_pipe(name, not_callable) nlp.replace_pipe(name, replacement) assert nlp.get_pipe(name) != new_pipe assert nlp.get_pipe(name) == replacement From 321c9f5acc7dccf329dcd827955e833556ca4065 Mon Sep 17 00:00:00 2001 From: Ines Montani Date: Thu, 16 May 2019 23:15:58 +0200 Subject: [PATCH 04/31] Fix lex_id docs (closes #3743) --- website/docs/api/lexeme.md | 1 - website/docs/api/token.md | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/website/docs/api/lexeme.md b/website/docs/api/lexeme.md index 5ec2aaf0c..018dc72d8 100644 --- a/website/docs/api/lexeme.md +++ b/website/docs/api/lexeme.md @@ -128,7 +128,6 @@ The L2 norm of the lexeme's vector representation. | `text` | unicode | Verbatim text content. | | `orth` | int | ID of the verbatim text content. | | `orth_` | unicode | Verbatim text content (identical to `Lexeme.text`). Exists mostly for consistency with the other attributes. | -| `lex_id` | int | ID of the lexeme's lexical type. | | `rank` | int | Sequential ID of the lexemes's lexical type, used to index into tables, e.g. for word vectors. | | `flags` | int | Container of the lexeme's binary flags. | | `norm` | int | The lexemes's norm, i.e. a normalized form of the lexeme text. | diff --git a/website/docs/api/token.md b/website/docs/api/token.md index 2085a02c6..356cffb59 100644 --- a/website/docs/api/token.md +++ b/website/docs/api/token.md @@ -468,7 +468,7 @@ The L2 norm of the token's vector representation. | `prob` | float | Smoothed log probability estimate of token's word type (context-independent entry in the vocabulary). | | `idx` | int | The character offset of the token within the parent document. | | `sentiment` | float | A scalar value indicating the positivity or negativity of the token. | -| `lex_id` | int | Sequential ID of the token's lexical type. | +| `lex_id` | int | Sequential ID of the token's lexical type, used to index into tables, e.g. for word vectors. | | `rank` | int | Sequential ID of the token's lexical type, used to index into tables, e.g. for word vectors. | | `cluster` | int | Brown cluster ID. | | `_` | `Underscore` | User space for adding custom [attribute extensions](/usage/processing-pipelines#custom-components-attributes). | From 4d550a3055587cd8a440e6dcd6d52b351efb6103 Mon Sep 17 00:00:00 2001 From: Ujwal Narayan <31547494+ujwal-narayan@users.noreply.github.com> Date: Mon, 20 May 2019 16:26:10 +0530 Subject: [PATCH 05/31] Enhancing Kannada language Resources (#3755) * Updated stop_words.py Added more stopwords * Create ujwal-narayan.md Enhancing Kannada language resources --- .github/contributors/ujwal-narayan.md | 106 +++++++++++++++++++++ spacy/lang/kn/stop_words.py | 128 +++++++++++++++----------- 2 files changed, 180 insertions(+), 54 deletions(-) create mode 100644 .github/contributors/ujwal-narayan.md diff --git a/.github/contributors/ujwal-narayan.md b/.github/contributors/ujwal-narayan.md new file mode 100644 index 000000000..622bb5da4 --- /dev/null +++ b/.github/contributors/ujwal-narayan.md @@ -0,0 +1,106 @@ +# spaCy contributor agreement + +This spaCy Contributor Agreement (**"SCA"**) is based on the +[Oracle Contributor Agreement](http://www.oracle.com/technetwork/oca-405177.pdf). +The SCA applies to any contribution that you make to any product or project +managed by us (the **"project"**), and sets out the intellectual property rights +you grant to us in the contributed materials. The term **"us"** shall mean +[ExplosionAI GmbH](https://explosion.ai/legal). The term +**"you"** shall mean the person or entity identified below. + +If you agree to be bound by these terms, fill in the information requested +below and include the filled-in version with your first pull request, under the +folder [`.github/contributors/`](/.github/contributors/). The name of the file +should be your GitHub username, with the extension `.md`. For example, the user +example_user would create the file `.github/contributors/example_user.md`. + +Read this agreement carefully before signing. These terms and conditions +constitute a binding legal agreement. + +## Contributor Agreement + +1. The term "contribution" or "contributed materials" means any source code, +object code, patch, tool, sample, graphic, specification, manual, +documentation, or any other material posted or submitted by you to the project. + +2. With respect to any worldwide copyrights, or copyright applications and +registrations, in your contribution: + + * you hereby assign to us joint ownership, and to the extent that such + assignment is or becomes invalid, ineffective or unenforceable, you hereby + grant to us a perpetual, irrevocable, non-exclusive, worldwide, no-charge, + royalty-free, unrestricted license to exercise all rights under those + copyrights. This includes, at our option, the right to sublicense these same + rights to third parties through multiple levels of sublicensees or other + licensing arrangements; + + * you agree that each of us can do all things in relation to your + contribution as if each of us were the sole owners, and if one of us makes + a derivative work of your contribution, the one who makes the derivative + work (or has it made will be the sole owner of that derivative work; + + * you agree that you will not assert any moral rights in your contribution + against us, our licensees or transferees; + + * you agree that we may register a copyright in your contribution and + exercise all ownership rights associated with it; and + + * you agree that neither of us has any duty to consult with, obtain the + consent of, pay or render an accounting to the other for any use or + distribution of your contribution. + +3. With respect to any patents you own, or that you can license without payment +to any third party, you hereby grant to us a perpetual, irrevocable, +non-exclusive, worldwide, no-charge, royalty-free license to: + + * make, have made, use, sell, offer to sell, import, and otherwise transfer + your contribution in whole or in part, alone or in combination with or + included in any product, work or materials arising out of the project to + which your contribution was submitted, and + + * at our option, to sublicense these same rights to third parties through + multiple levels of sublicensees or other licensing arrangements. + +4. Except as set out above, you keep all right, title, and interest in your +contribution. The rights that you grant to us under these terms are effective +on the date you first submitted a contribution to us, even if your submission +took place before the date you sign these terms. + +5. You covenant, represent, warrant and agree that: + + * Each contribution that you submit is and shall be an original work of + authorship and you can legally grant the rights set out in this SCA; + + * to the best of your knowledge, each contribution will not violate any + third party's copyrights, trademarks, patents, or other intellectual + property rights; and + + * each contribution shall be in compliance with U.S. export control laws and + other applicable export and import laws. You agree to notify us if you + become aware of any circumstance which would make any of the foregoing + representations inaccurate in any respect. We may publicly disclose your + participation in the project, including the fact that you have signed the SCA. + +6. This SCA is governed by the laws of the State of California and applicable +U.S. Federal law. Any choice of law rules will not apply. + +7. Please place an “x” on one of the applicable statement below. Please do NOT +mark both statements: + + * [x] I am signing on behalf of myself as an individual and no other person + or entity, including my employer, has or will have rights with respect to my + contributions. + + * [ ] I am signing on behalf of my employer or a legal entity and I have the + actual authority to contractually bind that entity. + +## Contributor Details + +| Field | Entry | +|------------------------------- | -------------------- | +| Name | Ujwal Narayan | +| Company name (if applicable) | | +| Title or role (if applicable) | | +| Date | 17/05/2019 | +| GitHub username | ujwal-narayan | +| Website (optional) | | diff --git a/spacy/lang/kn/stop_words.py b/spacy/lang/kn/stop_words.py index 583a42cc1..652341e73 100644 --- a/spacy/lang/kn/stop_words.py +++ b/spacy/lang/kn/stop_words.py @@ -4,67 +4,87 @@ from __future__ import unicode_literals STOP_WORDS = set( """ -ಈ -ಮತ್ತು -ಹಾಗೂ -ಅವರು -ಅವರ -ಬಗ್ಗೆ -ಎಂಬ -ಆದರೆ -ಅವರನ್ನು -ಆದರೆ -ತಮ್ಮ -ಒಂದು -ಎಂದರು -ಮೇಲೆ -ಹೇಳಿದರು -ಸೇರಿದಂತೆ -ಬಳಿಕ -ಆ -ಯಾವುದೇ -ಅವರಿಗೆ -ನಡೆದ -ಕುರಿತು -ಇದು -ಅವರು -ಕಳೆದ -ಇದೇ -ತಿಳಿಸಿದರು -ಹೀಗಾಗಿ -ಕೂಡ -ತನ್ನ -ತಿಳಿಸಿದ್ದಾರೆ -ನಾನು -ಹೇಳಿದ್ದಾರೆ -ಈಗ -ಎಲ್ಲ -ನನ್ನ -ನಮ್ಮ -ಈಗಾಗಲೇ -ಇದಕ್ಕೆ ಹಲವು -ಇದೆ -ಮತ್ತೆ -ಮಾಡುವ -ನೀಡಿದರು -ನಾವು -ನೀಡಿದ -ಇದರಿಂದ +ಮೂಲಕ +ಹಾಗೂ ಅದು -ಇದನ್ನು ನೀಡಿದ್ದಾರೆ +ಯಾವ +ಎಂದರು +ಅವರು +ಈಗ +ಎಂಬ +ಹಾಗಾಗಿ +ಅಷ್ಟೇ +ನಾವು +ಇದೇ +ಹೇಳಿ +ತಮ್ಮ +ಹೀಗೆ +ನಮ್ಮ +ಬೇರೆ +ನೀಡಿದರು +ಮತ್ತೆ +ಇದು +ಈ +ನೀವು +ನಾನು +ಇತ್ತು +ಎಲ್ಲಾ +ಯಾವುದೇ +ನಡೆದ ಅದನ್ನು -ಇಲ್ಲಿ -ಆಗ -ಬಂದಿದೆ. -ಅದೇ -ಇರುವ -ಅಲ್ಲದೆ -ಕೆಲವು +ಎಂದರೆ ನೀಡಿದೆ +ಹೀಗಾಗಿ +ಜೊತೆಗೆ +ಇದರಿಂದ +ನನಗೆ +ಅಲ್ಲದೆ +ಎಷ್ಟು ಇದರ +ಇಲ್ಲ +ಕಳೆದ +ತುಂಬಾ +ಈಗಾಗಲೇ +ಮಾಡಿ +ಅದಕ್ಕೆ +ಬಗ್ಗೆ +ಅವರ +ಇದನ್ನು +ಆ +ಇದೆ +ಹೆಚ್ಚು ಇನ್ನು +ಎಲ್ಲ +ಇರುವ +ಅವರಿಗೆ +ನಿಮ್ಮ +ಏನು +ಕೂಡ +ಇಲ್ಲಿ +ನನ್ನನ್ನು +ಕೆಲವು +ಮಾತ್ರ +ಬಳಿಕ +ಅಂತ +ತನ್ನ +ಆಗ +ಅಥವಾ +ಅಲ್ಲ +ಕೇವಲ +ಆದರೆ +ಮತ್ತು +ಇನ್ನೂ +ಅದೇ +ಆಗಿ +ಅವರನ್ನು +ಹೇಳಿದ್ದಾರೆ ನಡೆದಿದೆ +ಇದಕ್ಕೆ +ಎಂಬುದು +ಎಂದು +ನನ್ನ +ಮೇಲೆ """.split() ) From b78a8dc1d227ed81aa6feb3a89bdce46428a50b9 Mon Sep 17 00:00:00 2001 From: Ines Montani Date: Fri, 24 May 2019 14:06:04 +0200 Subject: [PATCH 06/31] Update Scorer and add API docs --- spacy/scorer.py | 43 ++++++++++++++++++++++++---- website/docs/api/scorer.md | 58 ++++++++++++++++++++++++++++++++++++++ website/meta/sidebars.json | 3 +- 3 files changed, 98 insertions(+), 6 deletions(-) create mode 100644 website/docs/api/scorer.md diff --git a/spacy/scorer.py b/spacy/scorer.py index 2f49d7d69..32716b852 100644 --- a/spacy/scorer.py +++ b/spacy/scorer.py @@ -35,7 +35,17 @@ class PRFScore(object): class Scorer(object): + """Compute evaluation scores.""" + def __init__(self, eval_punct=False): + """Initialize the Scorer. + + eval_punct (bool): Evaluate the dependency attachments to and from + punctuation. + RETURNS (Scorer): The newly created object. + + DOCS: https://spacy.io/api/scorer#init + """ self.tokens = PRFScore() self.sbd = PRFScore() self.unlabelled = PRFScore() @@ -46,34 +56,46 @@ class Scorer(object): @property def tags_acc(self): + """RETURNS (float): Part-of-speech tag accuracy (fine grained tags, + i.e. `Token.tag`). + """ return self.tags.fscore * 100 @property def token_acc(self): + """RETURNS (float): Tokenization accuracy.""" return self.tokens.precision * 100 @property def uas(self): + """RETURNS (float): Unlabelled dependency score.""" return self.unlabelled.fscore * 100 @property def las(self): + """RETURNS (float): Labelled depdendency score.""" return self.labelled.fscore * 100 @property def ents_p(self): + """RETURNS (float): Named entity accuracy (precision).""" return self.ner.precision * 100 @property def ents_r(self): + """RETURNS (float): Named entity accuracy (recall).""" return self.ner.recall * 100 @property def ents_f(self): + """RETURNS (float): Named entity accuracy (F-score).""" return self.ner.fscore * 100 @property def scores(self): + """RETURNS (dict): All scores with keys `uas`, `las`, `ents_p`, + `ents_r`, `ents_f`, `tags_acc` and `token_acc`. + """ return { "uas": self.uas, "las": self.las, @@ -84,9 +106,20 @@ class Scorer(object): "token_acc": self.token_acc, } - def score(self, tokens, gold, verbose=False, punct_labels=("p", "punct")): - if len(tokens) != len(gold): - gold = GoldParse.from_annot_tuples(tokens, zip(*gold.orig_annot)) + def score(self, doc, gold, verbose=False, punct_labels=("p", "punct")): + """Update the evaluation scores from a single Doc / GoldParse pair. + + doc (Doc): The predicted annotations. + gold (GoldParse): The correct annotations. + verbose (bool): Print debugging information. + punct_labels (tuple): Dependency labels for punctuation. Used to + evaluate dependency attachments to punctuation if `eval_punct` is + `True`. + + DOCS: https://spacy.io/api/scorer#score + """ + if len(doc) != len(gold): + gold = GoldParse.from_annot_tuples(doc, zip(*gold.orig_annot)) gold_deps = set() gold_tags = set() gold_ents = set(tags_to_entities([annot[-1] for annot in gold.orig_annot])) @@ -96,7 +129,7 @@ class Scorer(object): gold_deps.add((id_, head, dep.lower())) cand_deps = set() cand_tags = set() - for token in tokens: + for token in doc: if token.orth_.isspace(): continue gold_i = gold.cand_to_gold[token.i] @@ -116,7 +149,7 @@ class Scorer(object): cand_deps.add((gold_i, gold_head, token.dep_.lower())) if "-" not in [token[-1] for token in gold.orig_annot]: cand_ents = set() - for ent in tokens.ents: + for ent in doc.ents: first = gold.cand_to_gold[ent.start] last = gold.cand_to_gold[ent.end - 1] if first is None or last is None: diff --git a/website/docs/api/scorer.md b/website/docs/api/scorer.md new file mode 100644 index 000000000..e6a8595fd --- /dev/null +++ b/website/docs/api/scorer.md @@ -0,0 +1,58 @@ +--- +title: Scorer +teaser: Compute evaluation scores +tag: class +source: spacy/scorer.py +--- + +The `Scorer` computes and stores evaluation scores. It's typically created by +[`Language.evaluate`](/api/language#evaluate). + +## Scorer.\_\_init\_\_ {#init tag="method"} + +Create a new `Scorer`. + +> #### Example +> +> ```python +> from spacy.scorer import Scorer +> +> scorer = Scorer() +> ``` + +| Name | Type | Description | +| ------------ | -------- | ------------------------------------------------------------ | +| `eval_punct` | bool | Evaluate the dependency attachments to and from punctuation. | +| **RETURNS** | `Scorer` | The newly created object. | + +## Scorer.score {#score tag="method"} + +Update the evaluation scores from a single [`Doc`](/api/doc) / +[`GoldParse`](/api/goldparse) pair. + +> #### Example +> +> ```python +> scorer = Scorer() +> scorer.score(doc, gold) +> ``` + +| Name | Type | Description | +| -------------- | ----------- | -------------------------------------------------------------------------------------------------------------------- | +| `doc` | `Doc` | The predicted annotations. | +| `gold` | `GoldParse` | The correct annotations. | +| `verbose` | bool | Print debugging information. | +| `punct_labels` | tuple | Dependency labels for punctuation. Used to evaluate dependency attachments to punctuation if `eval_punct` is `True`. | + +## Properties + +| Name | Type | Description | +| ----------- | ----- | -------------------------------------------------------------------------------------------- | +| `token_acc` | float | Tokenization accuracy. | +| `tags_acc` | float | Part-of-speech tag accuracy (fine grained tags, i.e. `Token.tag`). | +| `uas` | float | Unlabelled dependency score. | +| `las` | float | Labelled dependency score. | +| `ents_p` | float | Named entity accuracy (precision). | +| `ents_r` | float | Named entity accuracy (recall). | +| `ents_f` | float | Named entity accuracy (F-score). | +| `scores` | dict | All scores with keys `uas`, `las`, `ents_p`, `ents_r`, `ents_f`, `tags_acc` and `token_acc`. | diff --git a/website/meta/sidebars.json b/website/meta/sidebars.json index bc8a70ea0..31083b091 100644 --- a/website/meta/sidebars.json +++ b/website/meta/sidebars.json @@ -90,7 +90,8 @@ { "text": "StringStore", "url": "/api/stringstore" }, { "text": "Vectors", "url": "/api/vectors" }, { "text": "GoldParse", "url": "/api/goldparse" }, - { "text": "GoldCorpus", "url": "/api/goldcorpus" } + { "text": "GoldCorpus", "url": "/api/goldcorpus" }, + { "text": "Scorer", "url": "/api/scorer" } ] }, { From 45e68555505b6171c09de1a4cdfbb4d0e9b84e42 Mon Sep 17 00:00:00 2001 From: Ines Montani Date: Fri, 24 May 2019 14:06:26 +0200 Subject: [PATCH 07/31] Update Language.update docs --- spacy/language.py | 4 +++- website/docs/api/language.md | 1 + 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/spacy/language.py b/spacy/language.py index 924c0b423..dab60a421 100644 --- a/spacy/language.py +++ b/spacy/language.py @@ -417,7 +417,9 @@ class Language(object): golds (iterable): A batch of `GoldParse` objects. drop (float): The droput rate. sgd (callable): An optimizer. - RETURNS (dict): Results from the update. + losses (dict): Dictionary to update with the loss, keyed by component. + component_cfg (dict): Config parameters for specific pipeline + components, keyed by component name. DOCS: https://spacy.io/api/language#update """ diff --git a/website/docs/api/language.md b/website/docs/api/language.md index 232a9020c..47d747775 100644 --- a/website/docs/api/language.md +++ b/website/docs/api/language.md @@ -119,6 +119,7 @@ Update the models in the pipeline. | `golds` | iterable | A batch of `GoldParse` objects or dictionaries. Dictionaries will be used to create [`GoldParse`](/api/goldparse) objects. For the available keys and their usage, see [`GoldParse.__init__`](/api/goldparse#init). | | `drop` | float | The dropout rate. | | `sgd` | callable | An optimizer. | +| `losses` | dict | Dictionary to update with the loss, keyed by pipeline component. | | `component_cfg` 2.1 | dict | Config parameters for specific pipeline components, keyed by component name. | ## Language.begin_training {#begin_training tag="method"} From 7634812172872090fbf23d2b9adb9478fbd97770 Mon Sep 17 00:00:00 2001 From: Ines Montani Date: Fri, 24 May 2019 14:06:36 +0200 Subject: [PATCH 08/31] Document Language.evaluate --- spacy/language.py | 13 +++++++++++++ website/docs/api/language.md | 19 +++++++++++++++++++ 2 files changed, 32 insertions(+) diff --git a/spacy/language.py b/spacy/language.py index dab60a421..39d95c689 100644 --- a/spacy/language.py +++ b/spacy/language.py @@ -600,6 +600,19 @@ class Language(object): def evaluate( self, docs_golds, verbose=False, batch_size=256, scorer=None, component_cfg=None ): + """Evaluate a model's pipeline components. + + docs_golds (iterable): Tuples of `Doc` and `GoldParse` objects. + verbose (bool): Print debugging information. + batch_size (int): Batch size to use. + scorer (Scorer): Optional `Scorer` to use. If not passed in, a new one + will be created. + component_cfg (dict): An optional dictionary with extra keyword + arguments for specific components. + RETURNS (Scorer): The scorer containing the evaluation results. + + DOCS: https://spacy.io/api/language#evaluate + """ if scorer is None: scorer = Scorer() if component_cfg is None: diff --git a/website/docs/api/language.md b/website/docs/api/language.md index 47d747775..3245a165b 100644 --- a/website/docs/api/language.md +++ b/website/docs/api/language.md @@ -122,6 +122,25 @@ Update the models in the pipeline. | `losses` | dict | Dictionary to update with the loss, keyed by pipeline component. | | `component_cfg` 2.1 | dict | Config parameters for specific pipeline components, keyed by component name. | +## Language.evaluate {#evaluate tag="method"} + +Evaluate a model's pipeline components. + +> #### Example +> +> ```python +> scorer = nlp.evaluate(docs_golds, verbose=True) +> print(scorer.scores) +> ``` + +| Name | Type | Description | +| -------------------------------------------- | -------- | ------------------------------------------------------------------------------------- | +| `docs_golds` | iterable | Tuples of `Doc` and `GoldParse` objects. | +| `verbose` | bool | Print debugging information. | +| `batch_size` | int | The batch size to use. | +| `scorer` | `Scorer` | Optional [`Scorer`](/api/scorer) to use. If not passed in, a new one will be created. | +| `component_cfg` 2.1 | dict | Config parameters for specific pipeline components, keyed by component name. | + ## Language.begin_training {#begin_training tag="method"} Allocate models, pre-process training data and acquire an optimizer. From 604acb6ace9991f3be457a11de4f8ffa40f06450 Mon Sep 17 00:00:00 2001 From: estr4ng7d Date: Fri, 24 May 2019 05:29:42 -0700 Subject: [PATCH 09/31] Marathi Language Support (#3767) * Adding Marathi language details and folder to it * Adding few changes and running tests * Adding few changes and running tests * Update __init__.py mh -> mr * Rename spacy/lang/mh/__init__.py to spacy/lang/mr/__init__.py * mh -> mr --- .github/contributors/estr4ng7d.md | 106 ++++++++++++++++ spacy/lang/mr/__init__.py | 20 +++ spacy/lang/mr/stop_words.py | 196 ++++++++++++++++++++++++++++++ 3 files changed, 322 insertions(+) create mode 100644 .github/contributors/estr4ng7d.md create mode 100644 spacy/lang/mr/__init__.py create mode 100644 spacy/lang/mr/stop_words.py diff --git a/.github/contributors/estr4ng7d.md b/.github/contributors/estr4ng7d.md new file mode 100644 index 000000000..35c095c47 --- /dev/null +++ b/.github/contributors/estr4ng7d.md @@ -0,0 +1,106 @@ +# spaCy contributor agreement + +This spaCy Contributor Agreement (**"SCA"**) is based on the +[Oracle Contributor Agreement](http://www.oracle.com/technetwork/oca-405177.pdf). +The SCA applies to any contribution that you make to any product or project +managed by us (the **"project"**), and sets out the intellectual property rights +you grant to us in the contributed materials. The term **"us"** shall mean +[ExplosionAI GmbH](https://explosion.ai/legal). The term +**"you"** shall mean the person or entity identified below. + +If you agree to be bound by these terms, fill in the information requested +below and include the filled-in version with your first pull request, under the +folder [`.github/contributors/`](/.github/contributors/). The name of the file +should be your GitHub username, with the extension `.md`. For example, the user +example_user would create the file `.github/contributors/example_user.md`. + +Read this agreement carefully before signing. These terms and conditions +constitute a binding legal agreement. + +## Contributor Agreement + +1. The term "contribution" or "contributed materials" means any source code, +object code, patch, tool, sample, graphic, specification, manual, +documentation, or any other material posted or submitted by you to the project. + +2. With respect to any worldwide copyrights, or copyright applications and +registrations, in your contribution: + + * you hereby assign to us joint ownership, and to the extent that such + assignment is or becomes invalid, ineffective or unenforceable, you hereby + grant to us a perpetual, irrevocable, non-exclusive, worldwide, no-charge, + royalty-free, unrestricted license to exercise all rights under those + copyrights. This includes, at our option, the right to sublicense these same + rights to third parties through multiple levels of sublicensees or other + licensing arrangements; + + * you agree that each of us can do all things in relation to your + contribution as if each of us were the sole owners, and if one of us makes + a derivative work of your contribution, the one who makes the derivative + work (or has it made will be the sole owner of that derivative work; + + * you agree that you will not assert any moral rights in your contribution + against us, our licensees or transferees; + + * you agree that we may register a copyright in your contribution and + exercise all ownership rights associated with it; and + + * you agree that neither of us has any duty to consult with, obtain the + consent of, pay or render an accounting to the other for any use or + distribution of your contribution. + +3. With respect to any patents you own, or that you can license without payment +to any third party, you hereby grant to us a perpetual, irrevocable, +non-exclusive, worldwide, no-charge, royalty-free license to: + + * make, have made, use, sell, offer to sell, import, and otherwise transfer + your contribution in whole or in part, alone or in combination with or + included in any product, work or materials arising out of the project to + which your contribution was submitted, and + + * at our option, to sublicense these same rights to third parties through + multiple levels of sublicensees or other licensing arrangements. + +4. Except as set out above, you keep all right, title, and interest in your +contribution. The rights that you grant to us under these terms are effective +on the date you first submitted a contribution to us, even if your submission +took place before the date you sign these terms. + +5. You covenant, represent, warrant and agree that: + + * Each contribution that you submit is and shall be an original work of + authorship and you can legally grant the rights set out in this SCA; + + * to the best of your knowledge, each contribution will not violate any + third party's copyrights, trademarks, patents, or other intellectual + property rights; and + + * each contribution shall be in compliance with U.S. export control laws and + other applicable export and import laws. You agree to notify us if you + become aware of any circumstance which would make any of the foregoing + representations inaccurate in any respect. We may publicly disclose your + participation in the project, including the fact that you have signed the SCA. + +6. This SCA is governed by the laws of the State of California and applicable +U.S. Federal law. Any choice of law rules will not apply. + +7. Please place an “x” on one of the applicable statement below. Please do NOT +mark both statements: + + * [x] I am signing on behalf of myself as an individual and no other person + or entity, including my employer, has or will have rights with respect to my + contributions. + + * [ ] I am signing on behalf of my employer or a legal entity and I have the + actual authority to contractually bind that entity. + +## Contributor Details + +| Field | Entry | +|------------------------------- | -------------------- | +| Name | Amey Baviskar | +| Company name (if applicable) | | +| Title or role (if applicable) | | +| Date | 21-May-2019 | +| GitHub username | estr4ng7d | +| Website (optional) | | diff --git a/spacy/lang/mr/__init__.py b/spacy/lang/mr/__init__.py new file mode 100644 index 000000000..538540935 --- /dev/null +++ b/spacy/lang/mr/__init__.py @@ -0,0 +1,20 @@ +#coding: utf8 +from __future__ import unicode_literals + +from .stop_words import STOP_WORDS +from ...language import Language +from ...attrs import LANG + + +class MarathiDefaults(Language.Defaults): + lex_attr_getters = dict(Language.Defaults.lex_attr_getters) + lex_attr_getters[LANG] = lambda text: "mr" + stop_words = STOP_WORDS + + +class Marathi(Language): + lang = "mr" + Defaults = MarathiDefaults + + +__all__ = ["Marathi"] diff --git a/spacy/lang/mr/stop_words.py b/spacy/lang/mr/stop_words.py new file mode 100644 index 000000000..0b0cd035d --- /dev/null +++ b/spacy/lang/mr/stop_words.py @@ -0,0 +1,196 @@ +# coding: utf8 +from __future__ import unicode_literals + + +# Source: https://github.com/stopwords-iso/stopwords-mr/blob/master/stopwords-mr.txt, https://github.com/6/stopwords-json/edit/master/dist/mr.json +STOP_WORDS = set( + """ +न +अतरी +तो +हें +तें +कां +आणि +जें +जे +मग +ते +मी +जो +परी +गा +हे +ऐसें +आतां +नाहीं +तेथ +हा +तया +असे +म्हणे +काय +कीं +जैसें +तंव +तूं +होय +जैसा +आहे +पैं +तैसा +जरी +म्हणोनि +एक +ऐसा +जी +ना +मज +एथ +या +जेथ +जया +तुज +तेणें +तैं +पां +असो +करी +ऐसी +येणें +जाहला +तेंचि +आघवें +होती +कांहीं +होऊनि +एकें +मातें +ठायीं +ये +सकळ +केलें +जेणें +जाण +जैसी +होये +जेवीं +एऱ्हवीं +मीचि +किरीटी +दिसे +देवा +हो +तरि +कीजे +तैसे +आपण +तिये +कर्म +नोहे +इये +पडे +माझें +तैसी +लागे +नाना +जंव +कीर +अधिक +अनेक +अशी +असलयाचे +असलेल्या +असा +असून +असे +आज +आणि +आता +आपल्या +आला +आली +आले +आहे +आहेत +एक +एका +कमी +करणयात +करून +का +काम +काय +काही +किवा +की +केला +केली +केले +कोटी +गेल्या +घेऊन +जात +झाला +झाली +झाले +झालेल्या +टा +तर +तरी +तसेच +ता +ती +तीन +ते +तो +त्या +त्याचा +त्याची +त्याच्या +त्याना +त्यानी +त्यामुळे +त्री +दिली +दोन +न +पण +पम +परयतन +पाटील +म +मात्र +माहिती +मी +मुबी +म्हणजे +म्हणाले +म्हणून +या +याचा +याची +याच्या +याना +यानी +येणार +येत +येथील +येथे +लाख +व +व्यकत +सर्व +सागित्ले +सुरू +हजार +हा +ही +हे +होणार +होत +होता +होती +होते +""".split() +) From ed7be3f64cb9208fee602128c7a28ded3b3677b0 Mon Sep 17 00:00:00 2001 From: Ujwal Narayan <31547494+ujwal-narayan@users.noreply.github.com> Date: Mon, 27 May 2019 15:22:52 +0530 Subject: [PATCH 10/31] Update norm_exceptions.py (#3778) * Update norm_exceptions.py Extended the Currency set to include Franc, Indian Rupee, Bangladeshi Taka, Korean Won, Mexican Dollar, and Egyptian Pound * Fix formatting [ci skip] --- spacy/lang/norm_exceptions.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/spacy/lang/norm_exceptions.py b/spacy/lang/norm_exceptions.py index 8766e2815..341967a78 100644 --- a/spacy/lang/norm_exceptions.py +++ b/spacy/lang/norm_exceptions.py @@ -53,5 +53,11 @@ BASE_NORMS = { "US$": "$", "C$": "$", "A$": "$", - "₺" : "$", + "₺": "$", + "₹": "$", + "৳": "$", + "₩": "$", + "Mex$": "$", + "₣": "$", + "E£": "$", } From a8416c46f74a6a9d159090ad6dbc644cd6d40e92 Mon Sep 17 00:00:00 2001 From: Ines Montani Date: Tue, 28 May 2019 17:11:39 +0200 Subject: [PATCH 11/31] Use string name in setup.py MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Hopefully this will trick GitHub's parser into recognising it as a Python package and show us the dependents / "used by" statistics 🤞 --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 2c05f8d70..c440b016f 100755 --- a/setup.py +++ b/setup.py @@ -209,7 +209,7 @@ def setup_package(): generate_cython(root, "spacy") setup( - name=about["__title__"], + name="spacy", zip_safe=False, packages=PACKAGES, package_data=PACKAGE_DATA, From 89379a7fa45f94bce4945284a7781eaaa7bc06ff Mon Sep 17 00:00:00 2001 From: mak <9056896+maknotavailable@users.noreply.github.com> Date: Wed, 29 May 2019 09:51:55 +0100 Subject: [PATCH 12/31] Corrected example model URL in requirements.txt (#3786) The URL used to show how to add a model to the requirements.txt had the old release path (excl. explosion). --- website/docs/usage/models.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/docs/usage/models.md b/website/docs/usage/models.md index 1dde6f94b..5df4ab458 100644 --- a/website/docs/usage/models.md +++ b/website/docs/usage/models.md @@ -326,7 +326,7 @@ URLs. ```text ### requirements.txt spacy>=2.0.0,<3.0.0 -https://github.com/spacy-models/releases/download/en_core_web_sm-2.0.0/en_core_web_sm-2.0.0.tar.gz#egg=en_core_web_sm +https://github.com/explosion/spacy-models/releases/download/en_core_web_sm-2.1.0/en_core_web_sm-2.1.0.tar.gz#egg=en_core_web_sm ``` Specifying `#egg=` with the package name tells pip which package to expect from From a7fd42d937d405088803da3fb3b301cc81cea719 Mon Sep 17 00:00:00 2001 From: Ines Montani Date: Thu, 30 May 2019 14:34:58 +0200 Subject: [PATCH 13/31] Make jsonschema dependency optional (#3784) --- requirements.txt | 3 ++- setup.py | 1 - spacy/errors.py | 2 ++ spacy/matcher/matcher.pyx | 5 ++++- spacy/util.py | 9 +++++++-- 5 files changed, 15 insertions(+), 5 deletions(-) diff --git a/requirements.txt b/requirements.txt index 169fb37cd..42045a829 100644 --- a/requirements.txt +++ b/requirements.txt @@ -9,9 +9,10 @@ srsly>=0.0.5,<1.1.0 # Third party dependencies numpy>=1.15.0 requests>=2.13.0,<3.0.0 -jsonschema>=2.6.0,<3.1.0 plac<1.0.0,>=0.9.6 pathlib==1.0.1; python_version < "3.4" +# Optional dependencies +jsonschema>=2.6.0,<3.1.0 # Development dependencies cython>=0.25 pytest>=4.0.0,<4.1.0 diff --git a/setup.py b/setup.py index c440b016f..0c2b541bd 100755 --- a/setup.py +++ b/setup.py @@ -232,7 +232,6 @@ def setup_package(): "blis>=0.2.2,<0.3.0", "plac<1.0.0,>=0.9.6", "requests>=2.13.0,<3.0.0", - "jsonschema>=2.6.0,<3.1.0", "wasabi>=0.2.0,<1.1.0", "srsly>=0.0.5,<1.1.0", 'pathlib==1.0.1; python_version < "3.4"', diff --git a/spacy/errors.py b/spacy/errors.py index b28393156..f1d42adae 100644 --- a/spacy/errors.py +++ b/spacy/errors.py @@ -385,6 +385,8 @@ class Errors(object): E134 = ("Alias '{alias}' defined for unknown entity '{entity}'.") E135 = ("If you meant to replace a built-in component, use `create_pipe`: " "`nlp.replace_pipe('{name}', nlp.create_pipe('{name}'))`") + E136 = ("This additional feature requires the jsonschema library to be " + "installed:\npip install jsonschema") @add_codes diff --git a/spacy/matcher/matcher.pyx b/spacy/matcher/matcher.pyx index b58c0e072..2dd8c2940 100644 --- a/spacy/matcher/matcher.pyx +++ b/spacy/matcher/matcher.pyx @@ -48,7 +48,10 @@ cdef class Matcher: self._extra_predicates = [] self.vocab = vocab self.mem = Pool() - self.validator = get_json_validator(TOKEN_PATTERN_SCHEMA) if validate else None + if validate: + self.validator = get_json_validator(TOKEN_PATTERN_SCHEMA) + else: + self.validator = None def __reduce__(self): data = (self.vocab, self._patterns, self._callbacks) diff --git a/spacy/util.py b/spacy/util.py index 475d556d0..1a40bb5ca 100644 --- a/spacy/util.py +++ b/spacy/util.py @@ -14,8 +14,11 @@ import functools import itertools import numpy.random import srsly -from jsonschema import Draft4Validator +try: + import jsonschema +except ImportError: + jsonschema = None try: import cupy.random @@ -682,7 +685,9 @@ def get_json_validator(schema): # validator that's used (e.g. different draft implementation), without # having to change it all across the codebase. # TODO: replace with (stable) Draft6Validator, if available - return Draft4Validator(schema) + if jsonschema is None: + raise ValueError(Errors.E136) + return jsonschema.Draft4Validator(schema) def validate_schema(schema): From 26c37c5a4dc8acf113e9b74f3162ed2a3bc74aea Mon Sep 17 00:00:00 2001 From: Ramanan Balakrishnan Date: Fri, 31 May 2019 15:49:19 +0530 Subject: [PATCH 14/31] fix all references to BILUO annotation format (#3797) --- spacy/gold.pyx | 2 +- spacy/matcher/phrasematcher.pyx | 4 ++-- website/docs/api/annotation.md | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/spacy/gold.pyx b/spacy/gold.pyx index e0ba26a04..569979a5f 100644 --- a/spacy/gold.pyx +++ b/spacy/gold.pyx @@ -532,7 +532,7 @@ cdef class GoldParse: self.labels[i] = deps[i2j_multi[i]] # Now set NER...This is annoying because if we've split # got an entity word split into two, we need to adjust the - # BILOU tags. We can't have BB or LL etc. + # BILUO tags. We can't have BB or LL etc. # Case 1: O -- easy. ner_tag = entities[i2j_multi[i]] if ner_tag == "O": diff --git a/spacy/matcher/phrasematcher.pyx b/spacy/matcher/phrasematcher.pyx index 22ce8831a..68821a085 100644 --- a/spacy/matcher/phrasematcher.pyx +++ b/spacy/matcher/phrasematcher.pyx @@ -127,7 +127,7 @@ cdef class PhraseMatcher: and self.attr not in (DEP, POS, TAG, LEMMA): string_attr = self.vocab.strings[self.attr] user_warning(Warnings.W012.format(key=key, attr=string_attr)) - tags = get_bilou(length) + tags = get_biluo(length) phrase_key = mem.alloc(length, sizeof(attr_t)) for i, tag in enumerate(tags): attr_value = self.get_lex_value(doc, i) @@ -230,7 +230,7 @@ cdef class PhraseMatcher: return "matcher:{}-{}".format(string_attr_name, string_attr_value) -def get_bilou(length): +def get_biluo(length): if length == 0: raise ValueError(Errors.E127) elif length == 1: diff --git a/website/docs/api/annotation.md b/website/docs/api/annotation.md index 366e15980..a5bb30b6f 100644 --- a/website/docs/api/annotation.md +++ b/website/docs/api/annotation.md @@ -510,7 +510,7 @@ described in any single publication. The model is a greedy transition-based parser guided by a linear model whose weights are learned using the averaged perceptron loss, via the [dynamic oracle](http://www.aclweb.org/anthology/C12-1059) imitation learning -strategy. The transition system is equivalent to the BILOU tagging scheme. +strategy. The transition system is equivalent to the BILUO tagging scheme. ## Models and training data {#training} From 1f130057510a1aae33abf378fa5af7eae3e807a6 Mon Sep 17 00:00:00 2001 From: Nipun Sadvilkar Date: Fri, 31 May 2019 20:20:45 +0530 Subject: [PATCH 15/31] Incorrect Token attribute ent_iob_ description (#3800) * Incorrect Token attribute ent_iob_ description * Add spaCy contributor agreement --- .github/contributors/nipunsadvilkar.md | 106 +++++++++++++++++++++++++ website/docs/api/token.md | 2 +- 2 files changed, 107 insertions(+), 1 deletion(-) create mode 100644 .github/contributors/nipunsadvilkar.md diff --git a/.github/contributors/nipunsadvilkar.md b/.github/contributors/nipunsadvilkar.md new file mode 100644 index 000000000..aa0ff462f --- /dev/null +++ b/.github/contributors/nipunsadvilkar.md @@ -0,0 +1,106 @@ +# spaCy contributor agreement + +This spaCy Contributor Agreement (**"SCA"**) is based on the +[Oracle Contributor Agreement](http://www.oracle.com/technetwork/oca-405177.pdf). +The SCA applies to any contribution that you make to any product or project +managed by us (the **"project"**), and sets out the intellectual property rights +you grant to us in the contributed materials. The term **"us"** shall mean +[ExplosionAI GmbH](https://explosion.ai/legal). The term +**"you"** shall mean the person or entity identified below. + +If you agree to be bound by these terms, fill in the information requested +below and include the filled-in version with your first pull request, under the +folder [`.github/contributors/`](/.github/contributors/). The name of the file +should be your GitHub username, with the extension `.md`. For example, the user +example_user would create the file `.github/contributors/example_user.md`. + +Read this agreement carefully before signing. These terms and conditions +constitute a binding legal agreement. + +## Contributor Agreement + +1. The term "contribution" or "contributed materials" means any source code, +object code, patch, tool, sample, graphic, specification, manual, +documentation, or any other material posted or submitted by you to the project. + +2. With respect to any worldwide copyrights, or copyright applications and +registrations, in your contribution: + + * you hereby assign to us joint ownership, and to the extent that such + assignment is or becomes invalid, ineffective or unenforceable, you hereby + grant to us a perpetual, irrevocable, non-exclusive, worldwide, no-charge, + royalty-free, unrestricted license to exercise all rights under those + copyrights. This includes, at our option, the right to sublicense these same + rights to third parties through multiple levels of sublicensees or other + licensing arrangements; + + * you agree that each of us can do all things in relation to your + contribution as if each of us were the sole owners, and if one of us makes + a derivative work of your contribution, the one who makes the derivative + work (or has it made will be the sole owner of that derivative work; + + * you agree that you will not assert any moral rights in your contribution + against us, our licensees or transferees; + + * you agree that we may register a copyright in your contribution and + exercise all ownership rights associated with it; and + + * you agree that neither of us has any duty to consult with, obtain the + consent of, pay or render an accounting to the other for any use or + distribution of your contribution. + +3. With respect to any patents you own, or that you can license without payment +to any third party, you hereby grant to us a perpetual, irrevocable, +non-exclusive, worldwide, no-charge, royalty-free license to: + + * make, have made, use, sell, offer to sell, import, and otherwise transfer + your contribution in whole or in part, alone or in combination with or + included in any product, work or materials arising out of the project to + which your contribution was submitted, and + + * at our option, to sublicense these same rights to third parties through + multiple levels of sublicensees or other licensing arrangements. + +4. Except as set out above, you keep all right, title, and interest in your +contribution. The rights that you grant to us under these terms are effective +on the date you first submitted a contribution to us, even if your submission +took place before the date you sign these terms. + +5. You covenant, represent, warrant and agree that: + + * Each contribution that you submit is and shall be an original work of + authorship and you can legally grant the rights set out in this SCA; + + * to the best of your knowledge, each contribution will not violate any + third party's copyrights, trademarks, patents, or other intellectual + property rights; and + + * each contribution shall be in compliance with U.S. export control laws and + other applicable export and import laws. You agree to notify us if you + become aware of any circumstance which would make any of the foregoing + representations inaccurate in any respect. We may publicly disclose your + participation in the project, including the fact that you have signed the SCA. + +6. This SCA is governed by the laws of the State of California and applicable +U.S. Federal law. Any choice of law rules will not apply. + +7. Please place an “x” on one of the applicable statement below. Please do NOT +mark both statements: + + * [x] I am signing on behalf of myself as an individual and no other person + or entity, including my employer, has or will have rights with respect to my + contributions. + + * [x] I am signing on behalf of my employer or a legal entity and I have the + actual authority to contractually bind that entity. + +## Contributor Details + +| Field | Entry | +|------------------------------- | -------------------- | +| Name | Nipun Sadvilkar | +| Company name (if applicable) | | +| Title or role (if applicable) | | +| Date | 31st May, 2019 | +| GitHub username | nipunsadvilkar| +| Website (optional) |https://nipunsadvilkar.github.io/| diff --git a/website/docs/api/token.md b/website/docs/api/token.md index 356cffb59..592a9cca5 100644 --- a/website/docs/api/token.md +++ b/website/docs/api/token.md @@ -424,7 +424,7 @@ The L2 norm of the token's vector representation. | `ent_type` | int | Named entity type. | | `ent_type_` | unicode | Named entity type. | | `ent_iob` | int | IOB code of named entity tag. `3` means the token begins an entity, `2` means it is outside an entity, `1` means it is inside an entity, and `0` means no entity tag is set. | | -| `ent_iob_` | unicode | IOB code of named entity tag. `3` means the token begins an entity, `2` means it is outside an entity, `1` means it is inside an entity, and `0` means no entity tag is set. | +| `ent_iob_` | unicode | IOB code of named entity tag. "B" means the token begins an entity, "I" means it is inside an entity, "O" means it is outside an entity, and "" means no entity tag is set. | | `ent_id` | int | ID of the entity the token is an instance of, if any. Currently not used, but potentially for coreference resolution. | | `ent_id_` | unicode | ID of the entity the token is an instance of, if any. Currently not used, but potentially for coreference resolution. | | `lemma` | int | Base form of the token, with no inflectional suffixes. | From 0c74506c9cb79c76ca06ef04a4d44a042e1b3f7c Mon Sep 17 00:00:00 2001 From: Ines Montani Date: Sat, 1 Jun 2019 11:35:01 +0200 Subject: [PATCH 16/31] Fix typos in docs (closes #3802) [ci skip] --- website/docs/usage/rule-based-matching.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/website/docs/usage/rule-based-matching.md b/website/docs/usage/rule-based-matching.md index 13d3dcd32..ee901e3fd 100644 --- a/website/docs/usage/rule-based-matching.md +++ b/website/docs/usage/rule-based-matching.md @@ -405,7 +405,7 @@ class BadHTMLMerger(object): for match_id, start, end in matches: spans.append(doc[start:end]) with doc.retokenize() as retokenizer: - for span in hashtags: + for span in spans: retokenizer.merge(span) for token in span: token._.bad_html = True # Mark token as bad HTML @@ -679,7 +679,7 @@ for match_id, start, end in matches: if doc.vocab.strings[match_id] == "HASHTAG": hashtags.append(doc[start:end]) with doc.retokenize() as retokenizer: - for span in spans: + for span in hashtags: retokenizer.merge(span) for token in span: token._.is_hashtag = True From 09e78b52cfb8f3b8b3216a2c2b8d98e429b5512f Mon Sep 17 00:00:00 2001 From: Ines Montani Date: Sat, 1 Jun 2019 14:37:27 +0200 Subject: [PATCH 17/31] Improve E024 text for incorrect GoldParse (closes #3558) --- spacy/errors.py | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/spacy/errors.py b/spacy/errors.py index f1d42adae..3a1e05e05 100644 --- a/spacy/errors.py +++ b/spacy/errors.py @@ -141,8 +141,14 @@ class Errors(object): E023 = ("Error cleaning up beam: The same state occurred twice at " "memory address {addr} and position {i}.") E024 = ("Could not find an optimal move to supervise the parser. Usually, " - "this means the GoldParse was not correct. For example, are all " - "labels added to the model?") + "this means that the model can't be updated in a way that's valid " + "and satisfies the correct annotations specified in the GoldParse. " + "For example, are all labels added to the model? If you're " + "training a named entity recognizer, also make sure that none of " + "your annotated entity spans have leading or trailing whitespace. " + "You can also use the experimental `debug-data` command to " + "validate your JSON-formatted training data. For details, run:\n" + "python -m spacy debug-data --help") E025 = ("String is too long: {length} characters. Max is 2**30.") E026 = ("Error accessing token at position {i}: out of bounds in Doc of " "length {length}.") From 6be7d07315d95ef8c4fda219f6e3bcbb22f1a389 Mon Sep 17 00:00:00 2001 From: Ines Montani Date: Sat, 1 Jun 2019 16:37:06 +0200 Subject: [PATCH 18/31] Update UNIVERSE.md --- website/UNIVERSE.md | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/website/UNIVERSE.md b/website/UNIVERSE.md index c26c0fce4..2a83b2983 100644 --- a/website/UNIVERSE.md +++ b/website/UNIVERSE.md @@ -34,11 +34,10 @@ on the issue tracker. ## JSON format -To add a project, fork this repository, edit the [`universe.json`](universe.json) +To add a project, fork this repository, edit the [`universe.json`](meta/universe.json) and add an object of the following format to the list of `"resources"`. Before you submit your pull request, make sure to use a linter to verify that your -markup is correct. We'll also be adding linting for the `universe.json` to our -automated GitHub checks soon. +markup is correct. ```json { @@ -92,4 +91,4 @@ automated GitHub checks soon. To separate them from the projects, educational materials also specify `"type": "education`. Books can also set a `"cover"` field containing a URL to a cover image. If available, it's used in the overview and displayed on -the individual book page. \ No newline at end of file +the individual book page. From a5d92a30357a729c442d9be432fae48b06601a7e Mon Sep 17 00:00:00 2001 From: Nirant Date: Sat, 1 Jun 2019 21:06:06 +0530 Subject: [PATCH 19/31] Create NirantK.md (#3807) [ci skip] --- .github/contributors/NirantK.md | 106 ++++++++++++++++++++++++++++++++ 1 file changed, 106 insertions(+) create mode 100644 .github/contributors/NirantK.md diff --git a/.github/contributors/NirantK.md b/.github/contributors/NirantK.md new file mode 100644 index 000000000..50f13dce4 --- /dev/null +++ b/.github/contributors/NirantK.md @@ -0,0 +1,106 @@ +# spaCy contributor agreement + +This spaCy Contributor Agreement (**"SCA"**) is based on the +[Oracle Contributor Agreement](http://www.oracle.com/technetwork/oca-405177.pdf). +The SCA applies to any contribution that you make to any product or project +managed by us (the **"project"**), and sets out the intellectual property rights +you grant to us in the contributed materials. The term **"us"** shall mean +[ExplosionAI GmbH](https://explosion.ai/legal). The term +**"you"** shall mean the person or entity identified below. + +If you agree to be bound by these terms, fill in the information requested +below and include the filled-in version with your first pull request, under the +folder [`.github/contributors/`](/.github/contributors/). The name of the file +should be your GitHub username, with the extension `.md`. For example, the user +example_user would create the file `.github/contributors/example_user.md`. + +Read this agreement carefully before signing. These terms and conditions +constitute a binding legal agreement. + +## Contributor Agreement + +1. The term "contribution" or "contributed materials" means any source code, +object code, patch, tool, sample, graphic, specification, manual, +documentation, or any other material posted or submitted by you to the project. + +2. With respect to any worldwide copyrights, or copyright applications and +registrations, in your contribution: + + * you hereby assign to us joint ownership, and to the extent that such + assignment is or becomes invalid, ineffective or unenforceable, you hereby + grant to us a perpetual, irrevocable, non-exclusive, worldwide, no-charge, + royalty-free, unrestricted license to exercise all rights under those + copyrights. This includes, at our option, the right to sublicense these same + rights to third parties through multiple levels of sublicensees or other + licensing arrangements; + + * you agree that each of us can do all things in relation to your + contribution as if each of us were the sole owners, and if one of us makes + a derivative work of your contribution, the one who makes the derivative + work (or has it made will be the sole owner of that derivative work; + + * you agree that you will not assert any moral rights in your contribution + against us, our licensees or transferees; + + * you agree that we may register a copyright in your contribution and + exercise all ownership rights associated with it; and + + * you agree that neither of us has any duty to consult with, obtain the + consent of, pay or render an accounting to the other for any use or + distribution of your contribution. + +3. With respect to any patents you own, or that you can license without payment +to any third party, you hereby grant to us a perpetual, irrevocable, +non-exclusive, worldwide, no-charge, royalty-free license to: + + * make, have made, use, sell, offer to sell, import, and otherwise transfer + your contribution in whole or in part, alone or in combination with or + included in any product, work or materials arising out of the project to + which your contribution was submitted, and + + * at our option, to sublicense these same rights to third parties through + multiple levels of sublicensees or other licensing arrangements. + +4. Except as set out above, you keep all right, title, and interest in your +contribution. The rights that you grant to us under these terms are effective +on the date you first submitted a contribution to us, even if your submission +took place before the date you sign these terms. + +5. You covenant, represent, warrant and agree that: + + * Each contribution that you submit is and shall be an original work of + authorship and you can legally grant the rights set out in this SCA; + + * to the best of your knowledge, each contribution will not violate any + third party's copyrights, trademarks, patents, or other intellectual + property rights; and + + * each contribution shall be in compliance with U.S. export control laws and + other applicable export and import laws. You agree to notify us if you + become aware of any circumstance which would make any of the foregoing + representations inaccurate in any respect. We may publicly disclose your + participation in the project, including the fact that you have signed the SCA. + +6. This SCA is governed by the laws of the State of California and applicable +U.S. Federal law. Any choice of law rules will not apply. + +7. Please place an “x” on one of the applicable statement below. Please do NOT +mark both statements: + + * [x] I am signing on behalf of myself as an individual and no other person + or entity, including my employer, has or will have rights with respect to my + contributions. + + * [ ] I am signing on behalf of my employer or a legal entity and I have the + actual authority to contractually bind that entity. + +## Contributor Details + +| Field | Entry | +|------------------------------- | -------------------- | +| Name | Nirant Kasliwal | +| Company name (if applicable) | | +| Title or role (if applicable) | | +| Date | | +| GitHub username | NirantK | +| Website (optional) | https://nirantk.com | From d4d1eab5e1acfde665b76c48b96f5251cd5e71fc Mon Sep 17 00:00:00 2001 From: Nirant Date: Sat, 1 Jun 2019 21:06:40 +0530 Subject: [PATCH 20/31] Add Baderlab/saber to universe.json (#3806) --- website/meta/universe.json | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/website/meta/universe.json b/website/meta/universe.json index 151b41452..39602e0bb 100644 --- a/website/meta/universe.json +++ b/website/meta/universe.json @@ -1,5 +1,22 @@ { "resources": [ + { + "id": "saber", + "title": "saber", + "slogan": "deep-learning based tool for information extraction in the biomedical domain", + "github": "BaderLab/saber", + "pip": "saber", + "thumb": "https://raw.githubusercontent.com/BaderLab/saber/master/docs/img/saber_logo.png", + "code_example": [ + ">>> from saber.saber import Saber", + ">>> saber = Saber()", + ">>> saber.load('PRGE')", + "saber.annotate('The phosphorylation of Hdm2 by MK2 promotes the ubiquitination of p53.')" + ], + "category": ["research", "biomedical"], + "tags": ["keras"] + }, + { "id": "spacymoji", "slogan": "Emoji handling and meta data as a spaCy pipeline component", From 86eb817b74f13d7561b797120e6984c021ddc411 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Germ=C3=A1n?= <46906587+munozbravo@users.noreply.github.com> Date: Sun, 2 Jun 2019 05:22:57 -0500 Subject: [PATCH 21/31] Overwrites default getter for like_num in Spanish by adding _num_words and like_num to lex_attrs.py (#3810) (closes #3803)) * (#3803) Spanish like_num returning false for number-like token * (#3803) Spanish like_num now returning True for number-like token --- .github/contributors/munozbravo.md | 106 +++++++++++++++++++++++ spacy/lang/es/__init__.py | 2 + spacy/lang/es/lex_attrs.py | 59 +++++++++++++ spacy/tests/regression/test_issue3803.py | 15 ++++ 4 files changed, 182 insertions(+) create mode 100644 .github/contributors/munozbravo.md create mode 100644 spacy/lang/es/lex_attrs.py create mode 100644 spacy/tests/regression/test_issue3803.py diff --git a/.github/contributors/munozbravo.md b/.github/contributors/munozbravo.md new file mode 100644 index 000000000..b36133e1e --- /dev/null +++ b/.github/contributors/munozbravo.md @@ -0,0 +1,106 @@ +# spaCy contributor agreement + +This spaCy Contributor Agreement (**"SCA"**) is based on the +[Oracle Contributor Agreement](http://www.oracle.com/technetwork/oca-405177.pdf). +The SCA applies to any contribution that you make to any product or project +managed by us (the **"project"**), and sets out the intellectual property rights +you grant to us in the contributed materials. The term **"us"** shall mean +[ExplosionAI GmbH](https://explosion.ai/legal). The term +**"you"** shall mean the person or entity identified below. + +If you agree to be bound by these terms, fill in the information requested +below and include the filled-in version with your first pull request, under the +folder [`.github/contributors/`](/.github/contributors/). The name of the file +should be your GitHub username, with the extension `.md`. For example, the user +example_user would create the file `.github/contributors/example_user.md`. + +Read this agreement carefully before signing. These terms and conditions +constitute a binding legal agreement. + +## Contributor Agreement + +1. The term "contribution" or "contributed materials" means any source code, +object code, patch, tool, sample, graphic, specification, manual, +documentation, or any other material posted or submitted by you to the project. + +2. With respect to any worldwide copyrights, or copyright applications and +registrations, in your contribution: + + * you hereby assign to us joint ownership, and to the extent that such + assignment is or becomes invalid, ineffective or unenforceable, you hereby + grant to us a perpetual, irrevocable, non-exclusive, worldwide, no-charge, + royalty-free, unrestricted license to exercise all rights under those + copyrights. This includes, at our option, the right to sublicense these same + rights to third parties through multiple levels of sublicensees or other + licensing arrangements; + + * you agree that each of us can do all things in relation to your + contribution as if each of us were the sole owners, and if one of us makes + a derivative work of your contribution, the one who makes the derivative + work (or has it made will be the sole owner of that derivative work; + + * you agree that you will not assert any moral rights in your contribution + against us, our licensees or transferees; + + * you agree that we may register a copyright in your contribution and + exercise all ownership rights associated with it; and + + * you agree that neither of us has any duty to consult with, obtain the + consent of, pay or render an accounting to the other for any use or + distribution of your contribution. + +3. With respect to any patents you own, or that you can license without payment +to any third party, you hereby grant to us a perpetual, irrevocable, +non-exclusive, worldwide, no-charge, royalty-free license to: + + * make, have made, use, sell, offer to sell, import, and otherwise transfer + your contribution in whole or in part, alone or in combination with or + included in any product, work or materials arising out of the project to + which your contribution was submitted, and + + * at our option, to sublicense these same rights to third parties through + multiple levels of sublicensees or other licensing arrangements. + +4. Except as set out above, you keep all right, title, and interest in your +contribution. The rights that you grant to us under these terms are effective +on the date you first submitted a contribution to us, even if your submission +took place before the date you sign these terms. + +5. You covenant, represent, warrant and agree that: + + * Each contribution that you submit is and shall be an original work of + authorship and you can legally grant the rights set out in this SCA; + + * to the best of your knowledge, each contribution will not violate any + third party's copyrights, trademarks, patents, or other intellectual + property rights; and + + * each contribution shall be in compliance with U.S. export control laws and + other applicable export and import laws. You agree to notify us if you + become aware of any circumstance which would make any of the foregoing + representations inaccurate in any respect. We may publicly disclose your + participation in the project, including the fact that you have signed the SCA. + +6. This SCA is governed by the laws of the State of California and applicable +U.S. Federal law. Any choice of law rules will not apply. + +7. Please place an “x” on one of the applicable statement below. Please do NOT +mark both statements: + + * [x] I am signing on behalf of myself as an individual and no other person + or entity, including my employer, has or will have rights with respect to my + contributions. + + * [ ] I am signing on behalf of my employer or a legal entity and I have the + actual authority to contractually bind that entity. + +## Contributor Details + +| Field | Entry | +|------------------------------- | -------------------- | +| Name | Germán Muñoz | +| Company name (if applicable) | | +| Title or role (if applicable) | | +| Date | 2019-06-01 | +| GitHub username | munozbravo | +| Website (optional) | | diff --git a/spacy/lang/es/__init__.py b/spacy/lang/es/__init__.py index d5d6e4f23..d49948b30 100644 --- a/spacy/lang/es/__init__.py +++ b/spacy/lang/es/__init__.py @@ -4,6 +4,7 @@ from __future__ import unicode_literals from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS from .tag_map import TAG_MAP from .stop_words import STOP_WORDS +from .lex_attrs import LEX_ATTRS from .lemmatizer import LOOKUP from .syntax_iterators import SYNTAX_ITERATORS @@ -16,6 +17,7 @@ from ...util import update_exc, add_lookups class SpanishDefaults(Language.Defaults): lex_attr_getters = dict(Language.Defaults.lex_attr_getters) + lex_attr_getters.update(LEX_ATTRS) lex_attr_getters[LANG] = lambda text: "es" lex_attr_getters[NORM] = add_lookups( Language.Defaults.lex_attr_getters[NORM], BASE_NORMS diff --git a/spacy/lang/es/lex_attrs.py b/spacy/lang/es/lex_attrs.py new file mode 100644 index 000000000..03ada1f43 --- /dev/null +++ b/spacy/lang/es/lex_attrs.py @@ -0,0 +1,59 @@ +# coding: utf8 +from __future__ import unicode_literals + +from ...attrs import LIKE_NUM + + +_num_words = [ + "cero", + "uno", + "dos", + "tres", + "cuatro", + "cinco", + "seis", + "siete", + "ocho", + "nueve", + "diez", + "once", + "doce", + "trece", + "catorce", + "quince", + "dieciséis", + "diecisiete", + "dieciocho", + "diecinueve", + "veinte", + "treinta", + "cuarenta", + "cincuenta", + "sesenta", + "setenta", + "ochenta", + "noventa", + "cien", + "mil", + "millón", + "billón", + "trillón", +] + + +def like_num(text): + if text.startswith(("+", "-", "±", "~")): + text = text[1:] + text = text.replace(",", "").replace(".", "") + if text.isdigit(): + return True + if text.count("/") == 1: + num, denom = text.split("/") + if num.isdigit() and denom.isdigit(): + return True + if text.lower() in _num_words: + return True + return False + + +LEX_ATTRS = {LIKE_NUM: like_num} diff --git a/spacy/tests/regression/test_issue3803.py b/spacy/tests/regression/test_issue3803.py new file mode 100644 index 000000000..4d9b664fa --- /dev/null +++ b/spacy/tests/regression/test_issue3803.py @@ -0,0 +1,15 @@ +# coding: utf8 +from __future__ import unicode_literals + +import pytest + +from spacy.lang.es import Spanish + + +def test_issue3803(): + """Test that spanish num-like tokens have True for like_num attribute.""" + nlp = Spanish() + text = "2 dos 1000 mil 12 doce" + doc = nlp(text) + + assert [t.like_num for t in doc] == [True, True, True, True, True, True] From 638caba9b5289cc2cf063a0f56f1862b3bcb057a Mon Sep 17 00:00:00 2001 From: Nirant Date: Sun, 2 Jun 2019 16:05:52 +0530 Subject: [PATCH 22/31] Add multiple packages to universe.json (#3809) [ci skip] * Add multiple packages to universe.json Added following packages: NLPArchitect, NLPRe, Chatterbot, alibi, NeuroNER * Auto-format * Update slogan (probably just copy-paste mistake) * Adjust formatting * Update tags / categories --- website/meta/universe.json | 98 +++++++++++++++++++++++++++++++++++--- 1 file changed, 91 insertions(+), 7 deletions(-) diff --git a/website/meta/universe.json b/website/meta/universe.json index 39602e0bb..12196cd91 100644 --- a/website/meta/universe.json +++ b/website/meta/universe.json @@ -1,22 +1,106 @@ { "resources": [ + { + "id": "nlp-architect", + "title": "NLP Architect", + "slogan": "Python lib for exploring Deep NLP & NLU by Intel AI", + "github": "NervanaSystems/nlp-architect", + "pip": "nlp-architect", + "thumb": "https://raw.githubusercontent.com/NervanaSystems/nlp-architect/master/assets/nlp_architect_logo.png", + "code_example": [], + "category": ["standalone", "research"], + "tags": ["pytorch"] + }, + { + "id": "NeuroNER", + "title": "NeuroNER", + "slogan": "Named-entity recognition using neural networks", + "github": "Franck-Dernoncourt/NeuroNER", + "pip": "pyneuroner[cpu]", + "thumb": "", + "code_example": [ + "from neuroner import neuromodel", + "nn = neuromodel.NeuroNER(train_model=False, use_pretrained_model=True)" + ], + "category": ["ner"], + "tags": ["standalone"] + }, + { + "id": "NLPre", + "title": "NLPre", + "slogan": "Natural Language Preprocessing Library in Health data", + "github": "NIHOPA/NLPre", + "pip": "nlpre", + "thumb": "", + "code_example": [ + "from nlpre import titlecaps, dedash, identify_parenthetical_phrases", + "from nlpre import replace_acronyms, replace_from_dictionary", + "ABBR = identify_parenthetical_phrases()(text)", + "parsers = [dedash(), titlecaps(), replace_acronyms(ABBR),", + " replace_from_dictionary(prefix='MeSH_')]", + "for f in parsers:", + " text = f(text)", + "print(text)" + ], + "category": ["standalone"], + "tags": [] + }, + { + "id": "Chatterbot", + "title": "Chatterbot", + "slogan": "A machine-learning based conversational dialog engine for creating chat bots", + "github": "gunthercox/ChatterBot", + "pip": "chatterbot", + "thumb": "https://chatterbot.readthedocs.io/en/stable/_images/banner.png", + "code_example": [ + "from chatterbot import ChatBot", + "from chatterbot.trainers import ListTrainer", + "# Create a new chat bot named Charlie", + "chatbot = ChatBot('Charlie')", + "trainer = ListTrainer(chatbot)", + "trainer.train([", + "'Hi, can I help you?',", + "'Sure, I would like to book a flight to Iceland.", + "'Your flight has been booked.'", + "])", + "", + "response = chatbot.get_response('I would like to book a flight.')" + ], + "category": ["conversational", "standalone"], + "tags": ["chatbots"] + }, { "id": "saber", "title": "saber", - "slogan": "deep-learning based tool for information extraction in the biomedical domain", + "slogan": "Deep-learning based tool for information extraction in the biomedical domain", "github": "BaderLab/saber", "pip": "saber", "thumb": "https://raw.githubusercontent.com/BaderLab/saber/master/docs/img/saber_logo.png", "code_example": [ - ">>> from saber.saber import Saber", - ">>> saber = Saber()", - ">>> saber.load('PRGE')", + "from saber.saber import Saber", + "saber = Saber()", + "saber.load('PRGE')", "saber.annotate('The phosphorylation of Hdm2 by MK2 promotes the ubiquitination of p53.')" ], - "category": ["research", "biomedical"], - "tags": ["keras"] + "category": ["research"], + "tags": ["keras", "biomedical"] + }, + { + "id": "alibi", + "title": "alibi", + "slogan": "Algorithms for monitoring and explaining machine learning models ", + "github": "SeldonIO/alibi", + "pip": "alibi", + "thumb": "https://docs.seldon.io/projects/alibi/en/v0.2.0/_static/Alibi_Logo.png", + "code_example": [ + ">>> from alibi.explainers import AnchorTabular", + ">>> explainer = AnchorTabular(predict_fn, feature_names)", + ">>> explainer.fit(X_train)", + ">>> explainer.explain(x)" + ], + "category": ["standalone", "research"], + "tags": [] }, - { "id": "spacymoji", "slogan": "Emoji handling and meta data as a spaCy pipeline component", From 42de5be90c777f4cf29a160fc19f6e94199d34ac Mon Sep 17 00:00:00 2001 From: Ines Montani Date: Sun, 2 Jun 2019 12:38:48 +0200 Subject: [PATCH 23/31] Tidy up universe [ci skip] --- website/meta/universe.json | 23 +++++++++-------------- 1 file changed, 9 insertions(+), 14 deletions(-) diff --git a/website/meta/universe.json b/website/meta/universe.json index 12196cd91..e66bf450c 100644 --- a/website/meta/universe.json +++ b/website/meta/universe.json @@ -6,8 +6,7 @@ "slogan": "Python lib for exploring Deep NLP & NLU by Intel AI", "github": "NervanaSystems/nlp-architect", "pip": "nlp-architect", - "thumb": "https://raw.githubusercontent.com/NervanaSystems/nlp-architect/master/assets/nlp_architect_logo.png", - "code_example": [], + "thumb": "https://i.imgur.com/vMideRx.png", "category": ["standalone", "research"], "tags": ["pytorch"] }, @@ -17,7 +16,6 @@ "slogan": "Named-entity recognition using neural networks", "github": "Franck-Dernoncourt/NeuroNER", "pip": "pyneuroner[cpu]", - "thumb": "", "code_example": [ "from neuroner import neuromodel", "nn = neuromodel.NeuroNER(train_model=False, use_pretrained_model=True)" @@ -31,7 +29,6 @@ "slogan": "Natural Language Preprocessing Library in Health data", "github": "NIHOPA/NLPre", "pip": "nlpre", - "thumb": "", "code_example": [ "from nlpre import titlecaps, dedash, identify_parenthetical_phrases", "from nlpre import replace_acronyms, replace_from_dictionary", @@ -42,8 +39,7 @@ " text = f(text)", "print(text)" ], - "category": ["standalone"], - "tags": [] + "category": ["standalone"] }, { "id": "Chatterbot", @@ -51,7 +47,7 @@ "slogan": "A machine-learning based conversational dialog engine for creating chat bots", "github": "gunthercox/ChatterBot", "pip": "chatterbot", - "thumb": "https://chatterbot.readthedocs.io/en/stable/_images/banner.png", + "thumb": "https://i.imgur.com/eyAhwXk.jpg", "code_example": [ "from chatterbot import ChatBot", "from chatterbot.trainers import ListTrainer", @@ -91,15 +87,14 @@ "slogan": "Algorithms for monitoring and explaining machine learning models ", "github": "SeldonIO/alibi", "pip": "alibi", - "thumb": "https://docs.seldon.io/projects/alibi/en/v0.2.0/_static/Alibi_Logo.png", + "thumb": "https://i.imgur.com/YkzQHRp.png", "code_example": [ - ">>> from alibi.explainers import AnchorTabular", - ">>> explainer = AnchorTabular(predict_fn, feature_names)", - ">>> explainer.fit(X_train)", - ">>> explainer.explain(x)" + "from alibi.explainers import AnchorTabular", + "explainer = AnchorTabular(predict_fn, feature_names)", + "explainer.fit(X_train)", + "explainer.explain(x)" ], - "category": ["standalone", "research"], - "tags": [] + "category": ["standalone", "research"] }, { "id": "spacymoji", From 892e72451fb604ab5916b94fc19fdc550bb568b2 Mon Sep 17 00:00:00 2001 From: Ines Montani Date: Sun, 2 Jun 2019 12:58:12 +0200 Subject: [PATCH 24/31] Update universe [ci skip] --- website/meta/universe.json | 53 +++++++++++++++++++++++++++++++------- 1 file changed, 43 insertions(+), 10 deletions(-) diff --git a/website/meta/universe.json b/website/meta/universe.json index e66bf450c..5155493bd 100644 --- a/website/meta/universe.json +++ b/website/meta/universe.json @@ -26,7 +26,7 @@ { "id": "NLPre", "title": "NLPre", - "slogan": "Natural Language Preprocessing Library in Health data", + "slogan": "Natural Language Preprocessing Library for health data and more", "github": "NIHOPA/NLPre", "pip": "nlpre", "code_example": [ @@ -39,7 +39,7 @@ " text = f(text)", "print(text)" ], - "category": ["standalone"] + "category": ["scientific"] }, { "id": "Chatterbot", @@ -62,6 +62,10 @@ "", "response = chatbot.get_response('I would like to book a flight.')" ], + "author": "Gunther Cox", + "author_links": { + "github": "gunthercox" + }, "category": ["conversational", "standalone"], "tags": ["chatbots"] }, @@ -78,7 +82,8 @@ "saber.load('PRGE')", "saber.annotate('The phosphorylation of Hdm2 by MK2 promotes the ubiquitination of p53.')" ], - "category": ["research"], + "author": "Bader Lab, University of Toronto", + "category": ["scientific"], "tags": ["keras", "biomedical"] }, { @@ -94,6 +99,7 @@ "explainer.fit(X_train)", "explainer.explain(x)" ], + "author": "Seldon", "category": ["standalone", "research"] }, { @@ -239,7 +245,7 @@ "doc = nlp(my_doc_text)" ], "author": "tc64", - "author_link": { + "author_links": { "github": "tc64" }, "category": ["pipeline"] @@ -442,7 +448,7 @@ "author_links": { "github": "huggingface" }, - "category": ["standalone", "conversational"], + "category": ["standalone", "conversational", "models"], "tags": ["coref"] }, { @@ -634,7 +640,7 @@ "twitter": "allenai_org", "website": "http://allenai.org" }, - "category": ["models", "research"] + "category": ["scientific", "models", "research"] }, { "id": "textacy", @@ -697,7 +703,7 @@ "github": "ahalterman", "twitter": "ahalterman" }, - "category": ["standalone"] + "category": ["standalone", "scientific"] }, { "id": "kindred", @@ -722,7 +728,7 @@ "author_links": { "github": "jakelever" }, - "category": ["standalone"] + "category": ["standalone", "scientific"] }, { "id": "sense2vec", @@ -990,6 +996,23 @@ "author": "Aaron Kramer", "category": ["courses"] }, + { + "type": "education", + "id": "spacy-course", + "title": "Advanced NLP with spaCy", + "slogan": "spaCy, 2019", + "description": "In this free interactive course, you'll learn how to use spaCy to build advanced natural language understanding systems, using both rule-based and machine learning approaches.", + "url": "https://course.spacy.io", + "image": "https://i.imgur.com/JC00pHW.jpg", + "thumb": "https://i.imgur.com/5RXLtrr.jpg", + "author": "Ines Montani", + "author_links": { + "twitter": "_inesmontani", + "github": "ines", + "website": "https://ines.io" + }, + "category": ["courses"] + }, { "type": "education", "id": "video-spacys-ner-model", @@ -1150,7 +1173,7 @@ "github": "ecohealthalliance", "website": " https://ecohealthalliance.org/" }, - "category": ["research", "standalone"] + "category": ["scientific", "standalone"] }, { "id": "self-attentive-parser", @@ -1472,7 +1495,7 @@ "url": "https://github.com/msg-systems/holmes-extractor", "description": "Holmes is a Python 3 library that supports a number of use cases involving information extraction from English and German texts, including chatbot, structural search, topic matching and supervised document classification.", "pip": "holmes-extractor", - "category": ["conversational", "research", "standalone"], + "category": ["conversational", "standalone"], "tags": ["chatbots", "text-processing"], "code_example": [ "import holmes_extractor as holmes", @@ -1511,6 +1534,11 @@ "title": "Research", "description": "Frameworks and utilities for developing better NLP models, especially using neural networks" }, + { + "id": "scientific", + "title": "Scientific", + "description": "Frameworks and utilities for scientific text processing" + }, { "id": "visualizers", "title": "Visualizers", @@ -1530,6 +1558,11 @@ "id": "standalone", "title": "Standalone", "description": "Self-contained libraries or tools that use spaCy under the hood" + }, + { + "id": "models", + "title": "Models", + "description": "Third-party pre-trained models for different languages and domains" } ] }, From e7033011299d0444bb08f115778b03aedfcb4a38 Mon Sep 17 00:00:00 2001 From: Ines Montani Date: Sun, 2 Jun 2019 13:55:55 +0200 Subject: [PATCH 25/31] Update universe [ci skip] --- website/meta/universe.json | 36 ++++++++++++++++++++++++++++++++++++ 1 file changed, 36 insertions(+) diff --git a/website/meta/universe.json b/website/meta/universe.json index 5155493bd..10e9be349 100644 --- a/website/meta/universe.json +++ b/website/meta/universe.json @@ -939,6 +939,42 @@ }, "category": ["standalone"] }, + { + "id": "prefect", + "title": "Prefect", + "slogan": "Workflow management system designed for modern infrastructure", + "github": "PrefectHQ/prefect", + "pip": "prefect", + "thumb": "https://i.imgur.com/oLTwr0e.png", + "code_example": [ + "from prefect import Flow", + "from prefect.tasks.spacy.spacy_tasks import SpacyNLP", + "import spacy", + "", + "nlp = spacy.load(\"en_core_web_sm\")", + "", + "with Flow(\"Natural Language Processing\") as flow:", + " doc = SpacyNLP(text=\"This is some text\", nlp=nlp)", + "", + "flow.run()" + ], + "author": "Prefect", + "author_links": { + "website": "https://prefect.io" + }, + "category": ["standalone"] + }, + { + "id": "graphbrain", + "title": "Graphbrain", + "slogan": "Automated meaning extraction and text understanding", + "description": "Graphbrain is an Artificial Intelligence open-source software library and scientific research tool. Its aim is to facilitate automated meaning extraction and text understanding, as well as the exploration and inference of knowledge.", + "github": "graphbrain/graphbrain", + "pip": "graphbrain", + "thumb": "https://i.imgur.com/cct9W1E.png", + "author": "Graphbrain", + "category": ["standalone"] + }, { "type": "education", "id": "oreilly-python-ds", From 62ebc65c62d9945b637b68315beac0e26270b3a4 Mon Sep 17 00:00:00 2001 From: Ines Montani Date: Mon, 3 Jun 2019 12:19:13 +0200 Subject: [PATCH 26/31] Update universe [ci skip] --- website/meta/universe.json | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/website/meta/universe.json b/website/meta/universe.json index 10e9be349..b3f0ccc5f 100644 --- a/website/meta/universe.json +++ b/website/meta/universe.json @@ -1151,6 +1151,22 @@ }, "category": ["podcasts"] }, + { + "type": "education", + "id": "analytics-vidhya", + "title": "DataHack Radio #23: The Brains behind spaCy", + "slogan": "June 2019", + "description": "\"What would you do if you had the chance to pick the brains behind one of the most popular Natural Language Processing (NLP) libraries of our era? A library that has helped usher in the current boom in NLP applications and nurtured tons of NLP scientists? Well – you invite the creators on our popular DataHack Radio podcast and let them do the talking! We are delighted to welcome Ines Montani and Matt Honnibal, the developers of spaCy – a powerful and advanced library for NLP.\"", + "thumb": "https://i.imgur.com/3zJKZ1P.jpg", + "url": "https://www.analyticsvidhya.com/blog/2019/06/datahack-radio-ines-montani-matthew-honnibal-brains-behind-spacy/", + "soundcloud": "630741825", + "author": "Analytics Vidhya", + "author_links": { + "website": "https://www.analyticsvidhya.com", + "twitter": "analyticsvidhya" + }, + "category": ["podcasts"] + }, { "id": "adam_qas", "title": "ADAM: Question Answering System", From 2bba2a35362cbe00449f6f452dd6166620cb92d6 Mon Sep 17 00:00:00 2001 From: intrafind <2115805+intrafindBreno@users.noreply.github.com> Date: Mon, 3 Jun 2019 18:32:47 +0200 Subject: [PATCH 27/31] Fix for #3811 (#3815) Corrected type of seed parameter. --- spacy/cli/pretrain.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/spacy/cli/pretrain.py b/spacy/cli/pretrain.py index b2c22d929..3b2981f0d 100644 --- a/spacy/cli/pretrain.py +++ b/spacy/cli/pretrain.py @@ -33,7 +33,7 @@ from .. import util batch_size=("Number of words per training batch", "option", "bs", int), max_length=("Max words per example.", "option", "xw", int), min_length=("Min words per example.", "option", "nw", int), - seed=("Seed for random number generators", "option", "s", float), + seed=("Seed for random number generators", "option", "s", int), n_iter=("Number of iterations to pretrain", "option", "i", int), n_save_every=("Save model every X batches.", "option", "se", int), ) From 436a578369217cce1960c6dd5ddeee2c0e4c5600 Mon Sep 17 00:00:00 2001 From: intrafind <2115805+intrafindBreno@users.noreply.github.com> Date: Mon, 3 Jun 2019 18:33:09 +0200 Subject: [PATCH 28/31] Create intrafindBreno.md (#3814) --- .github/contributors/intrafindBreno.md | 106 +++++++++++++++++++++++++ 1 file changed, 106 insertions(+) create mode 100644 .github/contributors/intrafindBreno.md diff --git a/.github/contributors/intrafindBreno.md b/.github/contributors/intrafindBreno.md new file mode 100644 index 000000000..204d20c07 --- /dev/null +++ b/.github/contributors/intrafindBreno.md @@ -0,0 +1,106 @@ +# spaCy contributor agreement + +This spaCy Contributor Agreement (**"SCA"**) is based on the +[Oracle Contributor Agreement](http://www.oracle.com/technetwork/oca-405177.pdf). +The SCA applies to any contribution that you make to any product or project +managed by us (the **"project"**), and sets out the intellectual property rights +you grant to us in the contributed materials. The term **"us"** shall mean +[ExplosionAI UG (haftungsbeschränkt)](https://explosion.ai/legal). The term +**"you"** shall mean the person or entity identified below. + +If you agree to be bound by these terms, fill in the information requested +below and include the filled-in version with your first pull request, under the +folder [`.github/contributors/`](/.github/contributors/). The name of the file +should be your GitHub username, with the extension `.md`. For example, the user +example_user would create the file `.github/contributors/example_user.md`. + +Read this agreement carefully before signing. These terms and conditions +constitute a binding legal agreement. + +## Contributor Agreement + +1. The term "contribution" or "contributed materials" means any source code, +object code, patch, tool, sample, graphic, specification, manual, +documentation, or any other material posted or submitted by you to the project. + +2. With respect to any worldwide copyrights, or copyright applications and +registrations, in your contribution: + + * you hereby assign to us joint ownership, and to the extent that such + assignment is or becomes invalid, ineffective or unenforceable, you hereby + grant to us a perpetual, irrevocable, non-exclusive, worldwide, no-charge, + royalty-free, unrestricted license to exercise all rights under those + copyrights. This includes, at our option, the right to sublicense these same + rights to third parties through multiple levels of sublicensees or other + licensing arrangements; + + * you agree that each of us can do all things in relation to your + contribution as if each of us were the sole owners, and if one of us makes + a derivative work of your contribution, the one who makes the derivative + work (or has it made will be the sole owner of that derivative work; + + * you agree that you will not assert any moral rights in your contribution + against us, our licensees or transferees; + + * you agree that we may register a copyright in your contribution and + exercise all ownership rights associated with it; and + + * you agree that neither of us has any duty to consult with, obtain the + consent of, pay or render an accounting to the other for any use or + distribution of your contribution. + +3. With respect to any patents you own, or that you can license without payment +to any third party, you hereby grant to us a perpetual, irrevocable, +non-exclusive, worldwide, no-charge, royalty-free license to: + + * make, have made, use, sell, offer to sell, import, and otherwise transfer + your contribution in whole or in part, alone or in combination with or + included in any product, work or materials arising out of the project to + which your contribution was submitted, and + + * at our option, to sublicense these same rights to third parties through + multiple levels of sublicensees or other licensing arrangements. + +4. Except as set out above, you keep all right, title, and interest in your +contribution. The rights that you grant to us under these terms are effective +on the date you first submitted a contribution to us, even if your submission +took place before the date you sign these terms. + +5. You covenant, represent, warrant and agree that: + + * Each contribution that you submit is and shall be an original work of + authorship and you can legally grant the rights set out in this SCA; + + * to the best of your knowledge, each contribution will not violate any + third party's copyrights, trademarks, patents, or other intellectual + property rights; and + + * each contribution shall be in compliance with U.S. export control laws and + other applicable export and import laws. You agree to notify us if you + become aware of any circumstance which would make any of the foregoing + representations inaccurate in any respect. We may publicly disclose your + participation in the project, including the fact that you have signed the SCA. + +6. This SCA is governed by the laws of the State of California and applicable +U.S. Federal law. Any choice of law rules will not apply. + +7. Please place an “x” on one of the applicable statement below. Please do NOT +mark both statements: + + * [x] I am signing on behalf of myself as an individual and no other person + or entity, including my employer, has or will have rights with respect to my + contributions. + + * [ ] I am signing on behalf of my employer or a legal entity and I have the + actual authority to contractually bind that entity. + +## Contributor Details + +| Field | Entry | +|------------------------------- | ------------------------ | +| Name | Breno Faria | +| Company name (if applicable) | IntraFind | +| Title or role (if applicable) | Product Lead | +| Date | 03.06.2019 | +| GitHub username | intrafindBreno | +| Website (optional) | | From eb12703d1044c52ea50df7fb6944bedf640af3da Mon Sep 17 00:00:00 2001 From: Ramanan Balakrishnan Date: Tue, 4 Jun 2019 14:45:35 +0530 Subject: [PATCH 29/31] minor fix to broken link in documentation (#3819) [ci skip] --- website/docs/usage/processing-pipelines.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/docs/usage/processing-pipelines.md b/website/docs/usage/processing-pipelines.md index 871ca3db6..0fa243501 100644 --- a/website/docs/usage/processing-pipelines.md +++ b/website/docs/usage/processing-pipelines.md @@ -41,7 +41,7 @@ components. spaCy then does the following: `Language` class contains the shared vocabulary, tokenization rules and the language-specific annotation scheme. 2. Iterate over the **pipeline names** and create each component using - [`create_pipe`](/api/anguage#create_pipe), which looks them up in + [`create_pipe`](/api/language#create_pipe), which looks them up in `Language.factories`. 3. Add each pipeline component to the pipeline in order, using [`add_pipe`](/api/language#add_pipe). From 511977ae5e6e1eca3be7c1cbe02e9294ca5114dc Mon Sep 17 00:00:00 2001 From: Ines Montani Date: Tue, 4 Jun 2019 11:15:51 +0200 Subject: [PATCH 30/31] Update universe [ci skip] --- website/meta/universe.json | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/website/meta/universe.json b/website/meta/universe.json index b3f0ccc5f..851b6107f 100644 --- a/website/meta/universe.json +++ b/website/meta/universe.json @@ -1092,7 +1092,7 @@ { "type": "education", "id": "podcast-nlp-highlights", - "title": "NLP Highlights 78: Where do corpora come from?", + "title": "NLP Highlights #78: Where do corpora come from?", "slogan": "January 2019", "description": "Most NLP projects rely crucially on the quality of annotations used for training and evaluating models. In this episode, Matt and Ines of Explosion AI tell us how Prodigy can improve data annotation and model development workflows. Prodigy is an annotation tool implemented as a python library, and it comes with a web application and a command line interface. A developer can define input data streams and design simple annotation interfaces. Prodigy can help break down complex annotation decisions into a series of binary decisions, and it provides easy integration with spaCy models. Developers can specify how models should be modified as new annotations come in in an active learning framework.", "soundcloud": "559200912", @@ -1107,7 +1107,7 @@ { "type": "education", "id": "podcast-init", - "title": "Podcast.__init__ 87: spaCy with Matthew Honnibal", + "title": "Podcast.__init__ #87: spaCy with Matthew Honnibal", "slogan": "December 2017", "description": "As the amount of text available on the internet and in businesses continues to increase, the need for fast and accurate language analysis becomes more prominent. This week Matthew Honnibal, the creator of SpaCy, talks about his experiences researching natural language processing and creating a library to make his findings accessible to industry.", "iframe": "https://www.pythonpodcast.com/wp-content/plugins/podlove-podcasting-plugin-for-wordpress/lib/modules/podlove_web_player/player_v4/dist/share.html?episode=https://www.pythonpodcast.com/?podlove_player4=176", @@ -1123,7 +1123,7 @@ { "type": "education", "id": "talk-python-podcast", - "title": "Talk Python 202: Building a software business", + "title": "Talk Python #202: Building a software business", "slogan": "March 2019", "description": "One core question around open source is how do you fund it? Well, there is always that PayPal donate button. But that's been a tremendous failure for many projects. Often the go-to answer is consulting. But what if you don't want to trade time for money? You could take things up a notch and change the equation, exchanging value for money. That's what Ines Montani and her co-founder did when they started Explosion AI with spaCy as the foundation.", "thumb": "https://i.imgur.com/q1twuK8.png", From 5d6b4bb3bdd856d2a1d7926f4b5cbdde3849a921 Mon Sep 17 00:00:00 2001 From: Ines Montani Date: Fri, 7 Jun 2019 11:14:32 +0200 Subject: [PATCH 31/31] Update srsly pin --- requirements.txt | 2 +- setup.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/requirements.txt b/requirements.txt index 42045a829..8cc52dfe4 100644 --- a/requirements.txt +++ b/requirements.txt @@ -5,7 +5,7 @@ thinc>=7.0.2,<7.1.0 blis>=0.2.2,<0.3.0 murmurhash>=0.28.0,<1.1.0 wasabi>=0.2.0,<1.1.0 -srsly>=0.0.5,<1.1.0 +srsly>=0.0.6,<1.1.0 # Third party dependencies numpy>=1.15.0 requests>=2.13.0,<3.0.0 diff --git a/setup.py b/setup.py index 0c2b541bd..33623588c 100755 --- a/setup.py +++ b/setup.py @@ -233,7 +233,7 @@ def setup_package(): "plac<1.0.0,>=0.9.6", "requests>=2.13.0,<3.0.0", "wasabi>=0.2.0,<1.1.0", - "srsly>=0.0.5,<1.1.0", + "srsly>=0.0.6,<1.1.0", 'pathlib==1.0.1; python_version < "3.4"', ], setup_requires=["wheel"],