2017-05-18 09:36:53 +00:00
|
|
|
|
import os
|
2017-05-07 21:24:51 +00:00
|
|
|
|
import importlib
|
2019-12-22 00:53:56 +00:00
|
|
|
|
import importlib.util
|
2019-02-01 07:05:22 +00:00
|
|
|
|
import re
|
2017-04-15 10:05:47 +00:00
|
|
|
|
from pathlib import Path
|
2017-05-21 14:05:05 +00:00
|
|
|
|
import random
|
2020-01-29 16:06:46 +00:00
|
|
|
|
from typing import List
|
|
|
|
|
import thinc
|
2020-02-27 17:42:27 +00:00
|
|
|
|
from thinc.api import NumpyOps, get_current_ops, Adam, require_gpu, Config
|
2017-10-17 16:20:52 +00:00
|
|
|
|
import functools
|
2017-11-10 18:05:18 +00:00
|
|
|
|
import itertools
|
2018-02-13 11:52:48 +00:00
|
|
|
|
import numpy.random
|
💫 Replace ujson, msgpack and dill/pickle/cloudpickle with srsly (#3003)
Remove hacks and wrappers, keep code in sync across our libraries and move spaCy a few steps closer to only depending on packages with binary wheels 🎉
See here: https://github.com/explosion/srsly
Serialization is hard, especially across Python versions and multiple platforms. After dealing with many subtle bugs over the years (encodings, locales, large files) our libraries like spaCy and Prodigy have steadily grown a number of utility functions to wrap the multiple serialization formats we need to support (especially json, msgpack and pickle). These wrapping functions ended up duplicated across our codebases, so we wanted to put them in one place.
At the same time, we noticed that having a lot of small dependencies was making maintainence harder, and making installation slower. To solve this, we've made srsly standalone, by including the component packages directly within it. This way we can provide all the serialization utilities we need in a single binary wheel.
srsly currently includes forks of the following packages:
ujson
msgpack
msgpack-numpy
cloudpickle
* WIP: replace json/ujson with srsly
* Replace ujson in examples
Use regular json instead of srsly to make code easier to read and follow
* Update requirements
* Fix imports
* Fix typos
* Replace msgpack with srsly
* Fix warning
2018-12-03 00:28:22 +00:00
|
|
|
|
import srsly
|
Generalize handling of tokenizer special cases (#4259)
* Generalize handling of tokenizer special cases
Handle tokenizer special cases more generally by using the Matcher
internally to match special cases after the affix/token_match
tokenization is complete.
Instead of only matching special cases while processing balanced or
nearly balanced prefixes and suffixes, this recognizes special cases in
a wider range of contexts:
* Allows arbitrary numbers of prefixes/affixes around special cases
* Allows special cases separated by infixes
Existing tests/settings that couldn't be preserved as before:
* The emoticon '")' is no longer a supported special case
* The emoticon ':)' in "example:)" is a false positive again
When merged with #4258 (or the relevant cache bugfix), the affix and
token_match properties should be modified to flush and reload all
special cases to use the updated internal tokenization with the Matcher.
* Remove accidentally added test case
* Really remove accidentally added test
* Reload special cases when necessary
Reload special cases when affixes or token_match are modified. Skip
reloading during initialization.
* Update error code number
* Fix offset and whitespace in Matcher special cases
* Fix offset bugs when merging and splitting tokens
* Set final whitespace on final token in inserted special case
* Improve cache flushing in tokenizer
* Separate cache and specials memory (temporarily)
* Flush cache when adding special cases
* Repeated `self._cache = PreshMap()` and `self._specials = PreshMap()`
are necessary due to this bug:
https://github.com/explosion/preshed/issues/21
* Remove reinitialized PreshMaps on cache flush
* Update UD bin scripts
* Update imports for `bin/`
* Add all currently supported languages
* Update subtok merger for new Matcher validation
* Modify blinded check to look at tokens instead of lemmas (for corpora
with tokens but not lemmas like Telugu)
* Use special Matcher only for cases with affixes
* Reinsert specials cache checks during normal tokenization for special
cases as much as possible
* Additionally include specials cache checks while splitting on infixes
* Since the special Matcher needs consistent affix-only tokenization
for the special cases themselves, introduce the argument
`with_special_cases` in order to do tokenization with or without
specials cache checks
* After normal tokenization, postprocess with special cases Matcher for
special cases containing affixes
* Replace PhraseMatcher with Aho-Corasick
Replace PhraseMatcher with the Aho-Corasick algorithm over numpy arrays
of the hash values for the relevant attribute. The implementation is
based on FlashText.
The speed should be similar to the previous PhraseMatcher. It is now
possible to easily remove match IDs and matches don't go missing with
large keyword lists / vocabularies.
Fixes #4308.
* Restore support for pickling
* Fix internal keyword add/remove for numpy arrays
* Add test for #4248, clean up test
* Improve efficiency of special cases handling
* Use PhraseMatcher instead of Matcher
* Improve efficiency of merging/splitting special cases in document
* Process merge/splits in one pass without repeated token shifting
* Merge in place if no splits
* Update error message number
* Remove UD script modifications
Only used for timing/testing, should be a separate PR
* Remove final traces of UD script modifications
* Update UD bin scripts
* Update imports for `bin/`
* Add all currently supported languages
* Update subtok merger for new Matcher validation
* Modify blinded check to look at tokens instead of lemmas (for corpora
with tokens but not lemmas like Telugu)
* Add missing loop for match ID set in search loop
* Remove cruft in matching loop for partial matches
There was a bit of unnecessary code left over from FlashText in the
matching loop to handle partial token matches, which we don't have with
PhraseMatcher.
* Replace dict trie with MapStruct trie
* Fix how match ID hash is stored/added
* Update fix for match ID vocab
* Switch from map_get_unless_missing to map_get
* Switch from numpy array to Token.get_struct_attr
Access token attributes directly in Doc instead of making a copy of the
relevant values in a numpy array.
Add unsatisfactory warning for hash collision with reserved terminal
hash key. (Ideally it would change the reserved terminal hash and redo
the whole trie, but for now, I'm hoping there won't be collisions.)
* Restructure imports to export find_matches
* Implement full remove()
Remove unnecessary trie paths and free unused maps.
Parallel to Matcher, raise KeyError when attempting to remove a match ID
that has not been added.
* Switch to PhraseMatcher.find_matches
* Switch to local cdef functions for span filtering
* Switch special case reload threshold to variable
Refer to variable instead of hard-coded threshold
* Move more of special case retokenize to cdef nogil
Move as much of the special case retokenization to nogil as possible.
* Rewrap sort as stdsort for OS X
* Rewrap stdsort with specific types
* Switch to qsort
* Fix merge
* Improve cmp functions
* Fix realloc
* Fix realloc again
* Initialize span struct while retokenizing
* Temporarily skip retokenizing
* Revert "Move more of special case retokenize to cdef nogil"
This reverts commit 0b7e52c797cd8ff1548f214bd4186ebb3a7ce8b1.
* Revert "Switch to qsort"
This reverts commit a98d71a942fc9bca531cf5eb05cf89fa88153b60.
* Fix specials check while caching
* Modify URL test with emoticons
The multiple suffix tests result in the emoticon `:>`, which is now
retokenized into one token as a special case after the suffixes are
split off.
* Refactor _apply_special_cases()
* Use cdef ints for span info used in multiple spots
* Modify _filter_special_spans() to prefer earlier
Parallel to #4414, modify _filter_special_spans() so that the earlier
span is preferred for overlapping spans of the same length.
* Replace MatchStruct with Entity
Replace MatchStruct with Entity since the existing Entity struct is
nearly identical.
* Replace Entity with more general SpanC
* Replace MatchStruct with SpanC
* Add error in debug-data if no dev docs are available (see #4575)
* Update azure-pipelines.yml
* Revert "Update azure-pipelines.yml"
This reverts commit ed1060cf59e5895b5fe92ad5b894fd1078ec4c49.
* Use latest wasabi
* Reorganise install_requires
* add dframcy to universe.json (#4580)
* Update universe.json [ci skip]
* Fix multiprocessing for as_tuples=True (#4582)
* Fix conllu script (#4579)
* force extensions to avoid clash between example scripts
* fix arg order and default file encoding
* add example config for conllu script
* newline
* move extension definitions to main function
* few more encodings fixes
* Add load_from_docbin example [ci skip]
TODO: upload the file somewhere
* Update README.md
* Add warnings about 3.8 (resolves #4593) [ci skip]
* Fixed typo: Added space between "recognize" and "various" (#4600)
* Fix DocBin.merge() example (#4599)
* Replace function registries with catalogue (#4584)
* Replace functions registries with catalogue
* Update __init__.py
* Fix test
* Revert unrelated flag [ci skip]
* Bugfix/dep matcher issue 4590 (#4601)
* add contributor agreement for prilopes
* add test for issue #4590
* fix on_match params for DependencyMacther (#4590)
* Minor updates to language example sentences (#4608)
* Add punctuation to Spanish example sentences
* Combine multilanguage examples for lang xx
* Add punctuation to nb examples
* Always realloc to a larger size
Avoid potential (unlikely) edge case and cymem error seen in #4604.
* Add error in debug-data if no dev docs are available (see #4575)
* Update debug-data for GoldCorpus / Example
* Ignore None label in misaligned NER data
2019-11-13 20:24:35 +00:00
|
|
|
|
import catalogue
|
2019-08-22 12:21:32 +00:00
|
|
|
|
import sys
|
2020-02-28 11:20:23 +00:00
|
|
|
|
import warnings
|
2019-02-07 08:44:31 +00:00
|
|
|
|
|
2020-01-29 16:06:46 +00:00
|
|
|
|
|
2018-12-08 11:37:38 +00:00
|
|
|
|
try:
|
|
|
|
|
import cupy.random
|
|
|
|
|
except ImportError:
|
|
|
|
|
cupy = None
|
|
|
|
|
|
2020-05-22 13:42:46 +00:00
|
|
|
|
try: # Python 3.8
|
|
|
|
|
import importlib.metadata as importlib_metadata
|
|
|
|
|
except ImportError:
|
|
|
|
|
import importlib_metadata
|
|
|
|
|
|
2017-10-27 12:39:09 +00:00
|
|
|
|
from .symbols import ORTH
|
2019-12-22 00:53:56 +00:00
|
|
|
|
from .compat import cupy, CudaStream
|
2020-02-28 11:20:23 +00:00
|
|
|
|
from .errors import Errors, Warnings
|
2020-05-22 13:42:46 +00:00
|
|
|
|
from . import about
|
|
|
|
|
|
2017-03-20 21:48:32 +00:00
|
|
|
|
|
2017-10-27 12:39:09 +00:00
|
|
|
|
_PRINT_ENV = False
|
|
|
|
|
|
|
|
|
|
|
2020-01-29 16:06:46 +00:00
|
|
|
|
class registry(thinc.registry):
|
Generalize handling of tokenizer special cases (#4259)
* Generalize handling of tokenizer special cases
Handle tokenizer special cases more generally by using the Matcher
internally to match special cases after the affix/token_match
tokenization is complete.
Instead of only matching special cases while processing balanced or
nearly balanced prefixes and suffixes, this recognizes special cases in
a wider range of contexts:
* Allows arbitrary numbers of prefixes/affixes around special cases
* Allows special cases separated by infixes
Existing tests/settings that couldn't be preserved as before:
* The emoticon '")' is no longer a supported special case
* The emoticon ':)' in "example:)" is a false positive again
When merged with #4258 (or the relevant cache bugfix), the affix and
token_match properties should be modified to flush and reload all
special cases to use the updated internal tokenization with the Matcher.
* Remove accidentally added test case
* Really remove accidentally added test
* Reload special cases when necessary
Reload special cases when affixes or token_match are modified. Skip
reloading during initialization.
* Update error code number
* Fix offset and whitespace in Matcher special cases
* Fix offset bugs when merging and splitting tokens
* Set final whitespace on final token in inserted special case
* Improve cache flushing in tokenizer
* Separate cache and specials memory (temporarily)
* Flush cache when adding special cases
* Repeated `self._cache = PreshMap()` and `self._specials = PreshMap()`
are necessary due to this bug:
https://github.com/explosion/preshed/issues/21
* Remove reinitialized PreshMaps on cache flush
* Update UD bin scripts
* Update imports for `bin/`
* Add all currently supported languages
* Update subtok merger for new Matcher validation
* Modify blinded check to look at tokens instead of lemmas (for corpora
with tokens but not lemmas like Telugu)
* Use special Matcher only for cases with affixes
* Reinsert specials cache checks during normal tokenization for special
cases as much as possible
* Additionally include specials cache checks while splitting on infixes
* Since the special Matcher needs consistent affix-only tokenization
for the special cases themselves, introduce the argument
`with_special_cases` in order to do tokenization with or without
specials cache checks
* After normal tokenization, postprocess with special cases Matcher for
special cases containing affixes
* Replace PhraseMatcher with Aho-Corasick
Replace PhraseMatcher with the Aho-Corasick algorithm over numpy arrays
of the hash values for the relevant attribute. The implementation is
based on FlashText.
The speed should be similar to the previous PhraseMatcher. It is now
possible to easily remove match IDs and matches don't go missing with
large keyword lists / vocabularies.
Fixes #4308.
* Restore support for pickling
* Fix internal keyword add/remove for numpy arrays
* Add test for #4248, clean up test
* Improve efficiency of special cases handling
* Use PhraseMatcher instead of Matcher
* Improve efficiency of merging/splitting special cases in document
* Process merge/splits in one pass without repeated token shifting
* Merge in place if no splits
* Update error message number
* Remove UD script modifications
Only used for timing/testing, should be a separate PR
* Remove final traces of UD script modifications
* Update UD bin scripts
* Update imports for `bin/`
* Add all currently supported languages
* Update subtok merger for new Matcher validation
* Modify blinded check to look at tokens instead of lemmas (for corpora
with tokens but not lemmas like Telugu)
* Add missing loop for match ID set in search loop
* Remove cruft in matching loop for partial matches
There was a bit of unnecessary code left over from FlashText in the
matching loop to handle partial token matches, which we don't have with
PhraseMatcher.
* Replace dict trie with MapStruct trie
* Fix how match ID hash is stored/added
* Update fix for match ID vocab
* Switch from map_get_unless_missing to map_get
* Switch from numpy array to Token.get_struct_attr
Access token attributes directly in Doc instead of making a copy of the
relevant values in a numpy array.
Add unsatisfactory warning for hash collision with reserved terminal
hash key. (Ideally it would change the reserved terminal hash and redo
the whole trie, but for now, I'm hoping there won't be collisions.)
* Restructure imports to export find_matches
* Implement full remove()
Remove unnecessary trie paths and free unused maps.
Parallel to Matcher, raise KeyError when attempting to remove a match ID
that has not been added.
* Switch to PhraseMatcher.find_matches
* Switch to local cdef functions for span filtering
* Switch special case reload threshold to variable
Refer to variable instead of hard-coded threshold
* Move more of special case retokenize to cdef nogil
Move as much of the special case retokenization to nogil as possible.
* Rewrap sort as stdsort for OS X
* Rewrap stdsort with specific types
* Switch to qsort
* Fix merge
* Improve cmp functions
* Fix realloc
* Fix realloc again
* Initialize span struct while retokenizing
* Temporarily skip retokenizing
* Revert "Move more of special case retokenize to cdef nogil"
This reverts commit 0b7e52c797cd8ff1548f214bd4186ebb3a7ce8b1.
* Revert "Switch to qsort"
This reverts commit a98d71a942fc9bca531cf5eb05cf89fa88153b60.
* Fix specials check while caching
* Modify URL test with emoticons
The multiple suffix tests result in the emoticon `:>`, which is now
retokenized into one token as a special case after the suffixes are
split off.
* Refactor _apply_special_cases()
* Use cdef ints for span info used in multiple spots
* Modify _filter_special_spans() to prefer earlier
Parallel to #4414, modify _filter_special_spans() so that the earlier
span is preferred for overlapping spans of the same length.
* Replace MatchStruct with Entity
Replace MatchStruct with Entity since the existing Entity struct is
nearly identical.
* Replace Entity with more general SpanC
* Replace MatchStruct with SpanC
* Add error in debug-data if no dev docs are available (see #4575)
* Update azure-pipelines.yml
* Revert "Update azure-pipelines.yml"
This reverts commit ed1060cf59e5895b5fe92ad5b894fd1078ec4c49.
* Use latest wasabi
* Reorganise install_requires
* add dframcy to universe.json (#4580)
* Update universe.json [ci skip]
* Fix multiprocessing for as_tuples=True (#4582)
* Fix conllu script (#4579)
* force extensions to avoid clash between example scripts
* fix arg order and default file encoding
* add example config for conllu script
* newline
* move extension definitions to main function
* few more encodings fixes
* Add load_from_docbin example [ci skip]
TODO: upload the file somewhere
* Update README.md
* Add warnings about 3.8 (resolves #4593) [ci skip]
* Fixed typo: Added space between "recognize" and "various" (#4600)
* Fix DocBin.merge() example (#4599)
* Replace function registries with catalogue (#4584)
* Replace functions registries with catalogue
* Update __init__.py
* Fix test
* Revert unrelated flag [ci skip]
* Bugfix/dep matcher issue 4590 (#4601)
* add contributor agreement for prilopes
* add test for issue #4590
* fix on_match params for DependencyMacther (#4590)
* Minor updates to language example sentences (#4608)
* Add punctuation to Spanish example sentences
* Combine multilanguage examples for lang xx
* Add punctuation to nb examples
* Always realloc to a larger size
Avoid potential (unlikely) edge case and cymem error seen in #4604.
* Add error in debug-data if no dev docs are available (see #4575)
* Update debug-data for GoldCorpus / Example
* Ignore None label in misaligned NER data
2019-11-13 20:24:35 +00:00
|
|
|
|
languages = catalogue.create("spacy", "languages", entry_points=True)
|
|
|
|
|
architectures = catalogue.create("spacy", "architectures", entry_points=True)
|
|
|
|
|
lookups = catalogue.create("spacy", "lookups", entry_points=True)
|
|
|
|
|
factories = catalogue.create("spacy", "factories", entry_points=True)
|
|
|
|
|
displacy_colors = catalogue.create("spacy", "displacy_colors", entry_points=True)
|
2020-05-20 09:41:12 +00:00
|
|
|
|
assets = catalogue.create("spacy", "assets", entry_points=True)
|
2020-05-22 13:42:46 +00:00
|
|
|
|
# This is mostly used to get a list of all installed models in the current
|
|
|
|
|
# environment. spaCy models packaged with `spacy package` will "advertise"
|
|
|
|
|
# themselves via entry points.
|
|
|
|
|
models = catalogue.create("spacy", "models", entry_points=True)
|
2019-09-30 22:01:27 +00:00
|
|
|
|
|
|
|
|
|
|
2017-10-27 12:39:09 +00:00
|
|
|
|
def set_env_log(value):
|
|
|
|
|
global _PRINT_ENV
|
|
|
|
|
_PRINT_ENV = value
|
2016-03-25 17:54:45 +00:00
|
|
|
|
|
|
|
|
|
|
2019-03-11 14:23:20 +00:00
|
|
|
|
def lang_class_is_loaded(lang):
|
|
|
|
|
"""Check whether a Language class is already loaded. Language classes are
|
|
|
|
|
loaded lazily, to avoid expensive setup code associated with the language
|
|
|
|
|
data.
|
|
|
|
|
|
|
|
|
|
lang (unicode): Two-letter language code, e.g. 'en'.
|
|
|
|
|
RETURNS (bool): Whether a Language class has been loaded.
|
|
|
|
|
"""
|
Generalize handling of tokenizer special cases (#4259)
* Generalize handling of tokenizer special cases
Handle tokenizer special cases more generally by using the Matcher
internally to match special cases after the affix/token_match
tokenization is complete.
Instead of only matching special cases while processing balanced or
nearly balanced prefixes and suffixes, this recognizes special cases in
a wider range of contexts:
* Allows arbitrary numbers of prefixes/affixes around special cases
* Allows special cases separated by infixes
Existing tests/settings that couldn't be preserved as before:
* The emoticon '")' is no longer a supported special case
* The emoticon ':)' in "example:)" is a false positive again
When merged with #4258 (or the relevant cache bugfix), the affix and
token_match properties should be modified to flush and reload all
special cases to use the updated internal tokenization with the Matcher.
* Remove accidentally added test case
* Really remove accidentally added test
* Reload special cases when necessary
Reload special cases when affixes or token_match are modified. Skip
reloading during initialization.
* Update error code number
* Fix offset and whitespace in Matcher special cases
* Fix offset bugs when merging and splitting tokens
* Set final whitespace on final token in inserted special case
* Improve cache flushing in tokenizer
* Separate cache and specials memory (temporarily)
* Flush cache when adding special cases
* Repeated `self._cache = PreshMap()` and `self._specials = PreshMap()`
are necessary due to this bug:
https://github.com/explosion/preshed/issues/21
* Remove reinitialized PreshMaps on cache flush
* Update UD bin scripts
* Update imports for `bin/`
* Add all currently supported languages
* Update subtok merger for new Matcher validation
* Modify blinded check to look at tokens instead of lemmas (for corpora
with tokens but not lemmas like Telugu)
* Use special Matcher only for cases with affixes
* Reinsert specials cache checks during normal tokenization for special
cases as much as possible
* Additionally include specials cache checks while splitting on infixes
* Since the special Matcher needs consistent affix-only tokenization
for the special cases themselves, introduce the argument
`with_special_cases` in order to do tokenization with or without
specials cache checks
* After normal tokenization, postprocess with special cases Matcher for
special cases containing affixes
* Replace PhraseMatcher with Aho-Corasick
Replace PhraseMatcher with the Aho-Corasick algorithm over numpy arrays
of the hash values for the relevant attribute. The implementation is
based on FlashText.
The speed should be similar to the previous PhraseMatcher. It is now
possible to easily remove match IDs and matches don't go missing with
large keyword lists / vocabularies.
Fixes #4308.
* Restore support for pickling
* Fix internal keyword add/remove for numpy arrays
* Add test for #4248, clean up test
* Improve efficiency of special cases handling
* Use PhraseMatcher instead of Matcher
* Improve efficiency of merging/splitting special cases in document
* Process merge/splits in one pass without repeated token shifting
* Merge in place if no splits
* Update error message number
* Remove UD script modifications
Only used for timing/testing, should be a separate PR
* Remove final traces of UD script modifications
* Update UD bin scripts
* Update imports for `bin/`
* Add all currently supported languages
* Update subtok merger for new Matcher validation
* Modify blinded check to look at tokens instead of lemmas (for corpora
with tokens but not lemmas like Telugu)
* Add missing loop for match ID set in search loop
* Remove cruft in matching loop for partial matches
There was a bit of unnecessary code left over from FlashText in the
matching loop to handle partial token matches, which we don't have with
PhraseMatcher.
* Replace dict trie with MapStruct trie
* Fix how match ID hash is stored/added
* Update fix for match ID vocab
* Switch from map_get_unless_missing to map_get
* Switch from numpy array to Token.get_struct_attr
Access token attributes directly in Doc instead of making a copy of the
relevant values in a numpy array.
Add unsatisfactory warning for hash collision with reserved terminal
hash key. (Ideally it would change the reserved terminal hash and redo
the whole trie, but for now, I'm hoping there won't be collisions.)
* Restructure imports to export find_matches
* Implement full remove()
Remove unnecessary trie paths and free unused maps.
Parallel to Matcher, raise KeyError when attempting to remove a match ID
that has not been added.
* Switch to PhraseMatcher.find_matches
* Switch to local cdef functions for span filtering
* Switch special case reload threshold to variable
Refer to variable instead of hard-coded threshold
* Move more of special case retokenize to cdef nogil
Move as much of the special case retokenization to nogil as possible.
* Rewrap sort as stdsort for OS X
* Rewrap stdsort with specific types
* Switch to qsort
* Fix merge
* Improve cmp functions
* Fix realloc
* Fix realloc again
* Initialize span struct while retokenizing
* Temporarily skip retokenizing
* Revert "Move more of special case retokenize to cdef nogil"
This reverts commit 0b7e52c797cd8ff1548f214bd4186ebb3a7ce8b1.
* Revert "Switch to qsort"
This reverts commit a98d71a942fc9bca531cf5eb05cf89fa88153b60.
* Fix specials check while caching
* Modify URL test with emoticons
The multiple suffix tests result in the emoticon `:>`, which is now
retokenized into one token as a special case after the suffixes are
split off.
* Refactor _apply_special_cases()
* Use cdef ints for span info used in multiple spots
* Modify _filter_special_spans() to prefer earlier
Parallel to #4414, modify _filter_special_spans() so that the earlier
span is preferred for overlapping spans of the same length.
* Replace MatchStruct with Entity
Replace MatchStruct with Entity since the existing Entity struct is
nearly identical.
* Replace Entity with more general SpanC
* Replace MatchStruct with SpanC
* Add error in debug-data if no dev docs are available (see #4575)
* Update azure-pipelines.yml
* Revert "Update azure-pipelines.yml"
This reverts commit ed1060cf59e5895b5fe92ad5b894fd1078ec4c49.
* Use latest wasabi
* Reorganise install_requires
* add dframcy to universe.json (#4580)
* Update universe.json [ci skip]
* Fix multiprocessing for as_tuples=True (#4582)
* Fix conllu script (#4579)
* force extensions to avoid clash between example scripts
* fix arg order and default file encoding
* add example config for conllu script
* newline
* move extension definitions to main function
* few more encodings fixes
* Add load_from_docbin example [ci skip]
TODO: upload the file somewhere
* Update README.md
* Add warnings about 3.8 (resolves #4593) [ci skip]
* Fixed typo: Added space between "recognize" and "various" (#4600)
* Fix DocBin.merge() example (#4599)
* Replace function registries with catalogue (#4584)
* Replace functions registries with catalogue
* Update __init__.py
* Fix test
* Revert unrelated flag [ci skip]
* Bugfix/dep matcher issue 4590 (#4601)
* add contributor agreement for prilopes
* add test for issue #4590
* fix on_match params for DependencyMacther (#4590)
* Minor updates to language example sentences (#4608)
* Add punctuation to Spanish example sentences
* Combine multilanguage examples for lang xx
* Add punctuation to nb examples
* Always realloc to a larger size
Avoid potential (unlikely) edge case and cymem error seen in #4604.
* Add error in debug-data if no dev docs are available (see #4575)
* Update debug-data for GoldCorpus / Example
* Ignore None label in misaligned NER data
2019-11-13 20:24:35 +00:00
|
|
|
|
return lang in registry.languages
|
2019-03-11 16:10:50 +00:00
|
|
|
|
|
2019-03-11 14:23:20 +00:00
|
|
|
|
|
2017-05-13 23:31:10 +00:00
|
|
|
|
def get_lang_class(lang):
|
|
|
|
|
"""Import and load a Language class.
|
2016-03-25 17:54:45 +00:00
|
|
|
|
|
2017-05-13 23:31:10 +00:00
|
|
|
|
lang (unicode): Two-letter language code, e.g. 'en'.
|
|
|
|
|
RETURNS (Language): Language class.
|
|
|
|
|
"""
|
Generalize handling of tokenizer special cases (#4259)
* Generalize handling of tokenizer special cases
Handle tokenizer special cases more generally by using the Matcher
internally to match special cases after the affix/token_match
tokenization is complete.
Instead of only matching special cases while processing balanced or
nearly balanced prefixes and suffixes, this recognizes special cases in
a wider range of contexts:
* Allows arbitrary numbers of prefixes/affixes around special cases
* Allows special cases separated by infixes
Existing tests/settings that couldn't be preserved as before:
* The emoticon '")' is no longer a supported special case
* The emoticon ':)' in "example:)" is a false positive again
When merged with #4258 (or the relevant cache bugfix), the affix and
token_match properties should be modified to flush and reload all
special cases to use the updated internal tokenization with the Matcher.
* Remove accidentally added test case
* Really remove accidentally added test
* Reload special cases when necessary
Reload special cases when affixes or token_match are modified. Skip
reloading during initialization.
* Update error code number
* Fix offset and whitespace in Matcher special cases
* Fix offset bugs when merging and splitting tokens
* Set final whitespace on final token in inserted special case
* Improve cache flushing in tokenizer
* Separate cache and specials memory (temporarily)
* Flush cache when adding special cases
* Repeated `self._cache = PreshMap()` and `self._specials = PreshMap()`
are necessary due to this bug:
https://github.com/explosion/preshed/issues/21
* Remove reinitialized PreshMaps on cache flush
* Update UD bin scripts
* Update imports for `bin/`
* Add all currently supported languages
* Update subtok merger for new Matcher validation
* Modify blinded check to look at tokens instead of lemmas (for corpora
with tokens but not lemmas like Telugu)
* Use special Matcher only for cases with affixes
* Reinsert specials cache checks during normal tokenization for special
cases as much as possible
* Additionally include specials cache checks while splitting on infixes
* Since the special Matcher needs consistent affix-only tokenization
for the special cases themselves, introduce the argument
`with_special_cases` in order to do tokenization with or without
specials cache checks
* After normal tokenization, postprocess with special cases Matcher for
special cases containing affixes
* Replace PhraseMatcher with Aho-Corasick
Replace PhraseMatcher with the Aho-Corasick algorithm over numpy arrays
of the hash values for the relevant attribute. The implementation is
based on FlashText.
The speed should be similar to the previous PhraseMatcher. It is now
possible to easily remove match IDs and matches don't go missing with
large keyword lists / vocabularies.
Fixes #4308.
* Restore support for pickling
* Fix internal keyword add/remove for numpy arrays
* Add test for #4248, clean up test
* Improve efficiency of special cases handling
* Use PhraseMatcher instead of Matcher
* Improve efficiency of merging/splitting special cases in document
* Process merge/splits in one pass without repeated token shifting
* Merge in place if no splits
* Update error message number
* Remove UD script modifications
Only used for timing/testing, should be a separate PR
* Remove final traces of UD script modifications
* Update UD bin scripts
* Update imports for `bin/`
* Add all currently supported languages
* Update subtok merger for new Matcher validation
* Modify blinded check to look at tokens instead of lemmas (for corpora
with tokens but not lemmas like Telugu)
* Add missing loop for match ID set in search loop
* Remove cruft in matching loop for partial matches
There was a bit of unnecessary code left over from FlashText in the
matching loop to handle partial token matches, which we don't have with
PhraseMatcher.
* Replace dict trie with MapStruct trie
* Fix how match ID hash is stored/added
* Update fix for match ID vocab
* Switch from map_get_unless_missing to map_get
* Switch from numpy array to Token.get_struct_attr
Access token attributes directly in Doc instead of making a copy of the
relevant values in a numpy array.
Add unsatisfactory warning for hash collision with reserved terminal
hash key. (Ideally it would change the reserved terminal hash and redo
the whole trie, but for now, I'm hoping there won't be collisions.)
* Restructure imports to export find_matches
* Implement full remove()
Remove unnecessary trie paths and free unused maps.
Parallel to Matcher, raise KeyError when attempting to remove a match ID
that has not been added.
* Switch to PhraseMatcher.find_matches
* Switch to local cdef functions for span filtering
* Switch special case reload threshold to variable
Refer to variable instead of hard-coded threshold
* Move more of special case retokenize to cdef nogil
Move as much of the special case retokenization to nogil as possible.
* Rewrap sort as stdsort for OS X
* Rewrap stdsort with specific types
* Switch to qsort
* Fix merge
* Improve cmp functions
* Fix realloc
* Fix realloc again
* Initialize span struct while retokenizing
* Temporarily skip retokenizing
* Revert "Move more of special case retokenize to cdef nogil"
This reverts commit 0b7e52c797cd8ff1548f214bd4186ebb3a7ce8b1.
* Revert "Switch to qsort"
This reverts commit a98d71a942fc9bca531cf5eb05cf89fa88153b60.
* Fix specials check while caching
* Modify URL test with emoticons
The multiple suffix tests result in the emoticon `:>`, which is now
retokenized into one token as a special case after the suffixes are
split off.
* Refactor _apply_special_cases()
* Use cdef ints for span info used in multiple spots
* Modify _filter_special_spans() to prefer earlier
Parallel to #4414, modify _filter_special_spans() so that the earlier
span is preferred for overlapping spans of the same length.
* Replace MatchStruct with Entity
Replace MatchStruct with Entity since the existing Entity struct is
nearly identical.
* Replace Entity with more general SpanC
* Replace MatchStruct with SpanC
* Add error in debug-data if no dev docs are available (see #4575)
* Update azure-pipelines.yml
* Revert "Update azure-pipelines.yml"
This reverts commit ed1060cf59e5895b5fe92ad5b894fd1078ec4c49.
* Use latest wasabi
* Reorganise install_requires
* add dframcy to universe.json (#4580)
* Update universe.json [ci skip]
* Fix multiprocessing for as_tuples=True (#4582)
* Fix conllu script (#4579)
* force extensions to avoid clash between example scripts
* fix arg order and default file encoding
* add example config for conllu script
* newline
* move extension definitions to main function
* few more encodings fixes
* Add load_from_docbin example [ci skip]
TODO: upload the file somewhere
* Update README.md
* Add warnings about 3.8 (resolves #4593) [ci skip]
* Fixed typo: Added space between "recognize" and "various" (#4600)
* Fix DocBin.merge() example (#4599)
* Replace function registries with catalogue (#4584)
* Replace functions registries with catalogue
* Update __init__.py
* Fix test
* Revert unrelated flag [ci skip]
* Bugfix/dep matcher issue 4590 (#4601)
* add contributor agreement for prilopes
* add test for issue #4590
* fix on_match params for DependencyMacther (#4590)
* Minor updates to language example sentences (#4608)
* Add punctuation to Spanish example sentences
* Combine multilanguage examples for lang xx
* Add punctuation to nb examples
* Always realloc to a larger size
Avoid potential (unlikely) edge case and cymem error seen in #4604.
* Add error in debug-data if no dev docs are available (see #4575)
* Update debug-data for GoldCorpus / Example
* Ignore None label in misaligned NER data
2019-11-13 20:24:35 +00:00
|
|
|
|
# Check if language is registered / entry point is available
|
|
|
|
|
if lang in registry.languages:
|
|
|
|
|
return registry.languages.get(lang)
|
|
|
|
|
else:
|
2017-05-13 23:31:10 +00:00
|
|
|
|
try:
|
2019-12-25 16:59:52 +00:00
|
|
|
|
module = importlib.import_module(f".lang.{lang}", "spacy")
|
2019-02-13 15:52:25 +00:00
|
|
|
|
except ImportError as err:
|
|
|
|
|
raise ImportError(Errors.E048.format(lang=lang, err=err))
|
Generalize handling of tokenizer special cases (#4259)
* Generalize handling of tokenizer special cases
Handle tokenizer special cases more generally by using the Matcher
internally to match special cases after the affix/token_match
tokenization is complete.
Instead of only matching special cases while processing balanced or
nearly balanced prefixes and suffixes, this recognizes special cases in
a wider range of contexts:
* Allows arbitrary numbers of prefixes/affixes around special cases
* Allows special cases separated by infixes
Existing tests/settings that couldn't be preserved as before:
* The emoticon '")' is no longer a supported special case
* The emoticon ':)' in "example:)" is a false positive again
When merged with #4258 (or the relevant cache bugfix), the affix and
token_match properties should be modified to flush and reload all
special cases to use the updated internal tokenization with the Matcher.
* Remove accidentally added test case
* Really remove accidentally added test
* Reload special cases when necessary
Reload special cases when affixes or token_match are modified. Skip
reloading during initialization.
* Update error code number
* Fix offset and whitespace in Matcher special cases
* Fix offset bugs when merging and splitting tokens
* Set final whitespace on final token in inserted special case
* Improve cache flushing in tokenizer
* Separate cache and specials memory (temporarily)
* Flush cache when adding special cases
* Repeated `self._cache = PreshMap()` and `self._specials = PreshMap()`
are necessary due to this bug:
https://github.com/explosion/preshed/issues/21
* Remove reinitialized PreshMaps on cache flush
* Update UD bin scripts
* Update imports for `bin/`
* Add all currently supported languages
* Update subtok merger for new Matcher validation
* Modify blinded check to look at tokens instead of lemmas (for corpora
with tokens but not lemmas like Telugu)
* Use special Matcher only for cases with affixes
* Reinsert specials cache checks during normal tokenization for special
cases as much as possible
* Additionally include specials cache checks while splitting on infixes
* Since the special Matcher needs consistent affix-only tokenization
for the special cases themselves, introduce the argument
`with_special_cases` in order to do tokenization with or without
specials cache checks
* After normal tokenization, postprocess with special cases Matcher for
special cases containing affixes
* Replace PhraseMatcher with Aho-Corasick
Replace PhraseMatcher with the Aho-Corasick algorithm over numpy arrays
of the hash values for the relevant attribute. The implementation is
based on FlashText.
The speed should be similar to the previous PhraseMatcher. It is now
possible to easily remove match IDs and matches don't go missing with
large keyword lists / vocabularies.
Fixes #4308.
* Restore support for pickling
* Fix internal keyword add/remove for numpy arrays
* Add test for #4248, clean up test
* Improve efficiency of special cases handling
* Use PhraseMatcher instead of Matcher
* Improve efficiency of merging/splitting special cases in document
* Process merge/splits in one pass without repeated token shifting
* Merge in place if no splits
* Update error message number
* Remove UD script modifications
Only used for timing/testing, should be a separate PR
* Remove final traces of UD script modifications
* Update UD bin scripts
* Update imports for `bin/`
* Add all currently supported languages
* Update subtok merger for new Matcher validation
* Modify blinded check to look at tokens instead of lemmas (for corpora
with tokens but not lemmas like Telugu)
* Add missing loop for match ID set in search loop
* Remove cruft in matching loop for partial matches
There was a bit of unnecessary code left over from FlashText in the
matching loop to handle partial token matches, which we don't have with
PhraseMatcher.
* Replace dict trie with MapStruct trie
* Fix how match ID hash is stored/added
* Update fix for match ID vocab
* Switch from map_get_unless_missing to map_get
* Switch from numpy array to Token.get_struct_attr
Access token attributes directly in Doc instead of making a copy of the
relevant values in a numpy array.
Add unsatisfactory warning for hash collision with reserved terminal
hash key. (Ideally it would change the reserved terminal hash and redo
the whole trie, but for now, I'm hoping there won't be collisions.)
* Restructure imports to export find_matches
* Implement full remove()
Remove unnecessary trie paths and free unused maps.
Parallel to Matcher, raise KeyError when attempting to remove a match ID
that has not been added.
* Switch to PhraseMatcher.find_matches
* Switch to local cdef functions for span filtering
* Switch special case reload threshold to variable
Refer to variable instead of hard-coded threshold
* Move more of special case retokenize to cdef nogil
Move as much of the special case retokenization to nogil as possible.
* Rewrap sort as stdsort for OS X
* Rewrap stdsort with specific types
* Switch to qsort
* Fix merge
* Improve cmp functions
* Fix realloc
* Fix realloc again
* Initialize span struct while retokenizing
* Temporarily skip retokenizing
* Revert "Move more of special case retokenize to cdef nogil"
This reverts commit 0b7e52c797cd8ff1548f214bd4186ebb3a7ce8b1.
* Revert "Switch to qsort"
This reverts commit a98d71a942fc9bca531cf5eb05cf89fa88153b60.
* Fix specials check while caching
* Modify URL test with emoticons
The multiple suffix tests result in the emoticon `:>`, which is now
retokenized into one token as a special case after the suffixes are
split off.
* Refactor _apply_special_cases()
* Use cdef ints for span info used in multiple spots
* Modify _filter_special_spans() to prefer earlier
Parallel to #4414, modify _filter_special_spans() so that the earlier
span is preferred for overlapping spans of the same length.
* Replace MatchStruct with Entity
Replace MatchStruct with Entity since the existing Entity struct is
nearly identical.
* Replace Entity with more general SpanC
* Replace MatchStruct with SpanC
* Add error in debug-data if no dev docs are available (see #4575)
* Update azure-pipelines.yml
* Revert "Update azure-pipelines.yml"
This reverts commit ed1060cf59e5895b5fe92ad5b894fd1078ec4c49.
* Use latest wasabi
* Reorganise install_requires
* add dframcy to universe.json (#4580)
* Update universe.json [ci skip]
* Fix multiprocessing for as_tuples=True (#4582)
* Fix conllu script (#4579)
* force extensions to avoid clash between example scripts
* fix arg order and default file encoding
* add example config for conllu script
* newline
* move extension definitions to main function
* few more encodings fixes
* Add load_from_docbin example [ci skip]
TODO: upload the file somewhere
* Update README.md
* Add warnings about 3.8 (resolves #4593) [ci skip]
* Fixed typo: Added space between "recognize" and "various" (#4600)
* Fix DocBin.merge() example (#4599)
* Replace function registries with catalogue (#4584)
* Replace functions registries with catalogue
* Update __init__.py
* Fix test
* Revert unrelated flag [ci skip]
* Bugfix/dep matcher issue 4590 (#4601)
* add contributor agreement for prilopes
* add test for issue #4590
* fix on_match params for DependencyMacther (#4590)
* Minor updates to language example sentences (#4608)
* Add punctuation to Spanish example sentences
* Combine multilanguage examples for lang xx
* Add punctuation to nb examples
* Always realloc to a larger size
Avoid potential (unlikely) edge case and cymem error seen in #4604.
* Add error in debug-data if no dev docs are available (see #4575)
* Update debug-data for GoldCorpus / Example
* Ignore None label in misaligned NER data
2019-11-13 20:24:35 +00:00
|
|
|
|
set_lang_class(lang, getattr(module, module.__all__[0]))
|
|
|
|
|
return registry.languages.get(lang)
|
2016-03-25 17:54:45 +00:00
|
|
|
|
|
|
|
|
|
|
2017-05-13 23:31:10 +00:00
|
|
|
|
def set_lang_class(name, cls):
|
|
|
|
|
"""Set a custom Language class name that can be loaded via get_lang_class.
|
2017-05-13 19:22:49 +00:00
|
|
|
|
|
2017-05-13 23:31:10 +00:00
|
|
|
|
name (unicode): Name of Language class.
|
|
|
|
|
cls (Language): Language class.
|
2017-05-13 19:22:49 +00:00
|
|
|
|
"""
|
Generalize handling of tokenizer special cases (#4259)
* Generalize handling of tokenizer special cases
Handle tokenizer special cases more generally by using the Matcher
internally to match special cases after the affix/token_match
tokenization is complete.
Instead of only matching special cases while processing balanced or
nearly balanced prefixes and suffixes, this recognizes special cases in
a wider range of contexts:
* Allows arbitrary numbers of prefixes/affixes around special cases
* Allows special cases separated by infixes
Existing tests/settings that couldn't be preserved as before:
* The emoticon '")' is no longer a supported special case
* The emoticon ':)' in "example:)" is a false positive again
When merged with #4258 (or the relevant cache bugfix), the affix and
token_match properties should be modified to flush and reload all
special cases to use the updated internal tokenization with the Matcher.
* Remove accidentally added test case
* Really remove accidentally added test
* Reload special cases when necessary
Reload special cases when affixes or token_match are modified. Skip
reloading during initialization.
* Update error code number
* Fix offset and whitespace in Matcher special cases
* Fix offset bugs when merging and splitting tokens
* Set final whitespace on final token in inserted special case
* Improve cache flushing in tokenizer
* Separate cache and specials memory (temporarily)
* Flush cache when adding special cases
* Repeated `self._cache = PreshMap()` and `self._specials = PreshMap()`
are necessary due to this bug:
https://github.com/explosion/preshed/issues/21
* Remove reinitialized PreshMaps on cache flush
* Update UD bin scripts
* Update imports for `bin/`
* Add all currently supported languages
* Update subtok merger for new Matcher validation
* Modify blinded check to look at tokens instead of lemmas (for corpora
with tokens but not lemmas like Telugu)
* Use special Matcher only for cases with affixes
* Reinsert specials cache checks during normal tokenization for special
cases as much as possible
* Additionally include specials cache checks while splitting on infixes
* Since the special Matcher needs consistent affix-only tokenization
for the special cases themselves, introduce the argument
`with_special_cases` in order to do tokenization with or without
specials cache checks
* After normal tokenization, postprocess with special cases Matcher for
special cases containing affixes
* Replace PhraseMatcher with Aho-Corasick
Replace PhraseMatcher with the Aho-Corasick algorithm over numpy arrays
of the hash values for the relevant attribute. The implementation is
based on FlashText.
The speed should be similar to the previous PhraseMatcher. It is now
possible to easily remove match IDs and matches don't go missing with
large keyword lists / vocabularies.
Fixes #4308.
* Restore support for pickling
* Fix internal keyword add/remove for numpy arrays
* Add test for #4248, clean up test
* Improve efficiency of special cases handling
* Use PhraseMatcher instead of Matcher
* Improve efficiency of merging/splitting special cases in document
* Process merge/splits in one pass without repeated token shifting
* Merge in place if no splits
* Update error message number
* Remove UD script modifications
Only used for timing/testing, should be a separate PR
* Remove final traces of UD script modifications
* Update UD bin scripts
* Update imports for `bin/`
* Add all currently supported languages
* Update subtok merger for new Matcher validation
* Modify blinded check to look at tokens instead of lemmas (for corpora
with tokens but not lemmas like Telugu)
* Add missing loop for match ID set in search loop
* Remove cruft in matching loop for partial matches
There was a bit of unnecessary code left over from FlashText in the
matching loop to handle partial token matches, which we don't have with
PhraseMatcher.
* Replace dict trie with MapStruct trie
* Fix how match ID hash is stored/added
* Update fix for match ID vocab
* Switch from map_get_unless_missing to map_get
* Switch from numpy array to Token.get_struct_attr
Access token attributes directly in Doc instead of making a copy of the
relevant values in a numpy array.
Add unsatisfactory warning for hash collision with reserved terminal
hash key. (Ideally it would change the reserved terminal hash and redo
the whole trie, but for now, I'm hoping there won't be collisions.)
* Restructure imports to export find_matches
* Implement full remove()
Remove unnecessary trie paths and free unused maps.
Parallel to Matcher, raise KeyError when attempting to remove a match ID
that has not been added.
* Switch to PhraseMatcher.find_matches
* Switch to local cdef functions for span filtering
* Switch special case reload threshold to variable
Refer to variable instead of hard-coded threshold
* Move more of special case retokenize to cdef nogil
Move as much of the special case retokenization to nogil as possible.
* Rewrap sort as stdsort for OS X
* Rewrap stdsort with specific types
* Switch to qsort
* Fix merge
* Improve cmp functions
* Fix realloc
* Fix realloc again
* Initialize span struct while retokenizing
* Temporarily skip retokenizing
* Revert "Move more of special case retokenize to cdef nogil"
This reverts commit 0b7e52c797cd8ff1548f214bd4186ebb3a7ce8b1.
* Revert "Switch to qsort"
This reverts commit a98d71a942fc9bca531cf5eb05cf89fa88153b60.
* Fix specials check while caching
* Modify URL test with emoticons
The multiple suffix tests result in the emoticon `:>`, which is now
retokenized into one token as a special case after the suffixes are
split off.
* Refactor _apply_special_cases()
* Use cdef ints for span info used in multiple spots
* Modify _filter_special_spans() to prefer earlier
Parallel to #4414, modify _filter_special_spans() so that the earlier
span is preferred for overlapping spans of the same length.
* Replace MatchStruct with Entity
Replace MatchStruct with Entity since the existing Entity struct is
nearly identical.
* Replace Entity with more general SpanC
* Replace MatchStruct with SpanC
* Add error in debug-data if no dev docs are available (see #4575)
* Update azure-pipelines.yml
* Revert "Update azure-pipelines.yml"
This reverts commit ed1060cf59e5895b5fe92ad5b894fd1078ec4c49.
* Use latest wasabi
* Reorganise install_requires
* add dframcy to universe.json (#4580)
* Update universe.json [ci skip]
* Fix multiprocessing for as_tuples=True (#4582)
* Fix conllu script (#4579)
* force extensions to avoid clash between example scripts
* fix arg order and default file encoding
* add example config for conllu script
* newline
* move extension definitions to main function
* few more encodings fixes
* Add load_from_docbin example [ci skip]
TODO: upload the file somewhere
* Update README.md
* Add warnings about 3.8 (resolves #4593) [ci skip]
* Fixed typo: Added space between "recognize" and "various" (#4600)
* Fix DocBin.merge() example (#4599)
* Replace function registries with catalogue (#4584)
* Replace functions registries with catalogue
* Update __init__.py
* Fix test
* Revert unrelated flag [ci skip]
* Bugfix/dep matcher issue 4590 (#4601)
* add contributor agreement for prilopes
* add test for issue #4590
* fix on_match params for DependencyMacther (#4590)
* Minor updates to language example sentences (#4608)
* Add punctuation to Spanish example sentences
* Combine multilanguage examples for lang xx
* Add punctuation to nb examples
* Always realloc to a larger size
Avoid potential (unlikely) edge case and cymem error seen in #4604.
* Add error in debug-data if no dev docs are available (see #4575)
* Update debug-data for GoldCorpus / Example
* Ignore None label in misaligned NER data
2019-11-13 20:24:35 +00:00
|
|
|
|
registry.languages.register(name, func=cls)
|
2017-05-08 21:50:45 +00:00
|
|
|
|
|
|
|
|
|
|
2017-04-15 10:11:16 +00:00
|
|
|
|
def ensure_path(path):
|
2017-05-13 23:30:29 +00:00
|
|
|
|
"""Ensure string is converted to a Path.
|
|
|
|
|
|
|
|
|
|
path: Anything. If string, it's converted to Path.
|
|
|
|
|
RETURNS: Path or original argument.
|
|
|
|
|
"""
|
2019-12-22 00:53:56 +00:00
|
|
|
|
if isinstance(path, str):
|
2017-04-15 10:11:16 +00:00
|
|
|
|
return Path(path)
|
|
|
|
|
else:
|
|
|
|
|
return path
|
2016-09-24 18:26:17 +00:00
|
|
|
|
|
|
|
|
|
|
Reduce size of language data (#4141)
* Move Turkish lemmas to a json file
Rather than a large dict in Python source, the data is now a big json
file. This includes a method for loading the json file, falling back to
a compressed file, and an update to MANIFEST.in that excludes json in
the spacy/lang directory.
This focuses on Turkish specifically because it has the most language
data in core.
* Transition all lemmatizer.py files to json
This covers all lemmatizer.py files of a significant size (>500k or so).
Small files were left alone.
None of the affected files have logic, so this was pretty
straightforward.
One unusual thing is that the lemma data for Urdu doesn't seem to be
used anywhere. That may require further investigation.
* Move large lang data to json for fr/nb/nl/sv
These are the languages that use a lemmatizer directory (rather than a
single file) and are larger than English.
For most of these languages there were many language data files, in
which case only the large ones (>500k or so) were converted to json. It
may or may not be a good idea to migrate the remaining Python files to
json in the future.
* Fix id lemmas.json
The contents of this file were originally just copied from the Python
source, but that used single quotes, so it had to be properly converted
to json first.
* Add .json.gz to gitignore
This covers the json.gz files built as part of distribution.
* Add language data gzip to build process
Currently this gzip data on every build; it works, but it should be
changed to only gzip when the source file has been updated.
* Remove Danish lemmatizer.py
Missed this when I added the json.
* Update to match latest explosion/srsly#9
The way gzipped json is loaded/saved in srsly changed a bit.
* Only compress language data if necessary
If a .json.gz file exists and is newer than the corresponding json file,
it's not recompressed.
* Move en/el language data to json
This only affected files >500kb, which was nouns for both languages and
the generic lookup table for English.
* Remove empty files in Norwegian tokenizer
It's unclear why, but the Norwegian (nb) tokenizer had empty files for
adj/adv/noun/verb lemmas. This may have been a result of copying the
structure of the English lemmatizer.
This removed the files, but still creates the empty sets in the
lemmatizer. That may not actually be necessary.
* Remove dubious entries in English lookup.json
" furthest" and " skilled" - both prefixed with a space - were in the
English lookup table. That seems obviously wrong so I have removed them.
* Fix small issues with en/fr lemmatizers
The en tokenizer was including the removed _nouns.py file, so that's
removed.
The fr tokenizer is unusual in that it has a lemmatizer directory with
both __init__.py and lemmatizer.py. lemmatizer.py had not been converted
to load the json language data, so that was fixed.
* Auto-format
* Auto-format
* Update srsly pin
* Consistently use pathlib paths
2019-08-20 12:54:11 +00:00
|
|
|
|
def load_language_data(path):
|
2019-08-22 12:21:32 +00:00
|
|
|
|
"""Load JSON language data using the given path as a base. If the provided
|
|
|
|
|
path isn't present, will attempt to load a gzipped version before giving up.
|
Reduce size of language data (#4141)
* Move Turkish lemmas to a json file
Rather than a large dict in Python source, the data is now a big json
file. This includes a method for loading the json file, falling back to
a compressed file, and an update to MANIFEST.in that excludes json in
the spacy/lang directory.
This focuses on Turkish specifically because it has the most language
data in core.
* Transition all lemmatizer.py files to json
This covers all lemmatizer.py files of a significant size (>500k or so).
Small files were left alone.
None of the affected files have logic, so this was pretty
straightforward.
One unusual thing is that the lemma data for Urdu doesn't seem to be
used anywhere. That may require further investigation.
* Move large lang data to json for fr/nb/nl/sv
These are the languages that use a lemmatizer directory (rather than a
single file) and are larger than English.
For most of these languages there were many language data files, in
which case only the large ones (>500k or so) were converted to json. It
may or may not be a good idea to migrate the remaining Python files to
json in the future.
* Fix id lemmas.json
The contents of this file were originally just copied from the Python
source, but that used single quotes, so it had to be properly converted
to json first.
* Add .json.gz to gitignore
This covers the json.gz files built as part of distribution.
* Add language data gzip to build process
Currently this gzip data on every build; it works, but it should be
changed to only gzip when the source file has been updated.
* Remove Danish lemmatizer.py
Missed this when I added the json.
* Update to match latest explosion/srsly#9
The way gzipped json is loaded/saved in srsly changed a bit.
* Only compress language data if necessary
If a .json.gz file exists and is newer than the corresponding json file,
it's not recompressed.
* Move en/el language data to json
This only affected files >500kb, which was nouns for both languages and
the generic lookup table for English.
* Remove empty files in Norwegian tokenizer
It's unclear why, but the Norwegian (nb) tokenizer had empty files for
adj/adv/noun/verb lemmas. This may have been a result of copying the
structure of the English lemmatizer.
This removed the files, but still creates the empty sets in the
lemmatizer. That may not actually be necessary.
* Remove dubious entries in English lookup.json
" furthest" and " skilled" - both prefixed with a space - were in the
English lookup table. That seems obviously wrong so I have removed them.
* Fix small issues with en/fr lemmatizers
The en tokenizer was including the removed _nouns.py file, so that's
removed.
The fr tokenizer is unusual in that it has a lemmatizer directory with
both __init__.py and lemmatizer.py. lemmatizer.py had not been converted
to load the json language data, so that was fixed.
* Auto-format
* Auto-format
* Update srsly pin
* Consistently use pathlib paths
2019-08-20 12:54:11 +00:00
|
|
|
|
|
2019-08-22 12:21:32 +00:00
|
|
|
|
path (unicode / Path): The data to load.
|
|
|
|
|
RETURNS: The loaded data.
|
Reduce size of language data (#4141)
* Move Turkish lemmas to a json file
Rather than a large dict in Python source, the data is now a big json
file. This includes a method for loading the json file, falling back to
a compressed file, and an update to MANIFEST.in that excludes json in
the spacy/lang directory.
This focuses on Turkish specifically because it has the most language
data in core.
* Transition all lemmatizer.py files to json
This covers all lemmatizer.py files of a significant size (>500k or so).
Small files were left alone.
None of the affected files have logic, so this was pretty
straightforward.
One unusual thing is that the lemma data for Urdu doesn't seem to be
used anywhere. That may require further investigation.
* Move large lang data to json for fr/nb/nl/sv
These are the languages that use a lemmatizer directory (rather than a
single file) and are larger than English.
For most of these languages there were many language data files, in
which case only the large ones (>500k or so) were converted to json. It
may or may not be a good idea to migrate the remaining Python files to
json in the future.
* Fix id lemmas.json
The contents of this file were originally just copied from the Python
source, but that used single quotes, so it had to be properly converted
to json first.
* Add .json.gz to gitignore
This covers the json.gz files built as part of distribution.
* Add language data gzip to build process
Currently this gzip data on every build; it works, but it should be
changed to only gzip when the source file has been updated.
* Remove Danish lemmatizer.py
Missed this when I added the json.
* Update to match latest explosion/srsly#9
The way gzipped json is loaded/saved in srsly changed a bit.
* Only compress language data if necessary
If a .json.gz file exists and is newer than the corresponding json file,
it's not recompressed.
* Move en/el language data to json
This only affected files >500kb, which was nouns for both languages and
the generic lookup table for English.
* Remove empty files in Norwegian tokenizer
It's unclear why, but the Norwegian (nb) tokenizer had empty files for
adj/adv/noun/verb lemmas. This may have been a result of copying the
structure of the English lemmatizer.
This removed the files, but still creates the empty sets in the
lemmatizer. That may not actually be necessary.
* Remove dubious entries in English lookup.json
" furthest" and " skilled" - both prefixed with a space - were in the
English lookup table. That seems obviously wrong so I have removed them.
* Fix small issues with en/fr lemmatizers
The en tokenizer was including the removed _nouns.py file, so that's
removed.
The fr tokenizer is unusual in that it has a lemmatizer directory with
both __init__.py and lemmatizer.py. lemmatizer.py had not been converted
to load the json language data, so that was fixed.
* Auto-format
* Auto-format
* Update srsly pin
* Consistently use pathlib paths
2019-08-20 12:54:11 +00:00
|
|
|
|
"""
|
2019-08-22 12:21:32 +00:00
|
|
|
|
path = ensure_path(path)
|
|
|
|
|
if path.exists():
|
Reduce size of language data (#4141)
* Move Turkish lemmas to a json file
Rather than a large dict in Python source, the data is now a big json
file. This includes a method for loading the json file, falling back to
a compressed file, and an update to MANIFEST.in that excludes json in
the spacy/lang directory.
This focuses on Turkish specifically because it has the most language
data in core.
* Transition all lemmatizer.py files to json
This covers all lemmatizer.py files of a significant size (>500k or so).
Small files were left alone.
None of the affected files have logic, so this was pretty
straightforward.
One unusual thing is that the lemma data for Urdu doesn't seem to be
used anywhere. That may require further investigation.
* Move large lang data to json for fr/nb/nl/sv
These are the languages that use a lemmatizer directory (rather than a
single file) and are larger than English.
For most of these languages there were many language data files, in
which case only the large ones (>500k or so) were converted to json. It
may or may not be a good idea to migrate the remaining Python files to
json in the future.
* Fix id lemmas.json
The contents of this file were originally just copied from the Python
source, but that used single quotes, so it had to be properly converted
to json first.
* Add .json.gz to gitignore
This covers the json.gz files built as part of distribution.
* Add language data gzip to build process
Currently this gzip data on every build; it works, but it should be
changed to only gzip when the source file has been updated.
* Remove Danish lemmatizer.py
Missed this when I added the json.
* Update to match latest explosion/srsly#9
The way gzipped json is loaded/saved in srsly changed a bit.
* Only compress language data if necessary
If a .json.gz file exists and is newer than the corresponding json file,
it's not recompressed.
* Move en/el language data to json
This only affected files >500kb, which was nouns for both languages and
the generic lookup table for English.
* Remove empty files in Norwegian tokenizer
It's unclear why, but the Norwegian (nb) tokenizer had empty files for
adj/adv/noun/verb lemmas. This may have been a result of copying the
structure of the English lemmatizer.
This removed the files, but still creates the empty sets in the
lemmatizer. That may not actually be necessary.
* Remove dubious entries in English lookup.json
" furthest" and " skilled" - both prefixed with a space - were in the
English lookup table. That seems obviously wrong so I have removed them.
* Fix small issues with en/fr lemmatizers
The en tokenizer was including the removed _nouns.py file, so that's
removed.
The fr tokenizer is unusual in that it has a lemmatizer directory with
both __init__.py and lemmatizer.py. lemmatizer.py had not been converted
to load the json language data, so that was fixed.
* Auto-format
* Auto-format
* Update srsly pin
* Consistently use pathlib paths
2019-08-20 12:54:11 +00:00
|
|
|
|
return srsly.read_json(path)
|
2019-08-22 12:21:32 +00:00
|
|
|
|
path = path.with_suffix(path.suffix + ".gz")
|
|
|
|
|
if path.exists():
|
|
|
|
|
return srsly.read_gzip_json(path)
|
2019-12-22 00:53:56 +00:00
|
|
|
|
raise ValueError(Errors.E160.format(path=path))
|
2019-08-22 12:21:32 +00:00
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def get_module_path(module):
|
|
|
|
|
if not hasattr(module, "__module__"):
|
2019-09-21 12:37:06 +00:00
|
|
|
|
raise ValueError(Errors.E169.format(module=repr(module)))
|
2019-08-22 12:21:32 +00:00
|
|
|
|
return Path(sys.modules[module.__module__].__file__).parent
|
Reduce size of language data (#4141)
* Move Turkish lemmas to a json file
Rather than a large dict in Python source, the data is now a big json
file. This includes a method for loading the json file, falling back to
a compressed file, and an update to MANIFEST.in that excludes json in
the spacy/lang directory.
This focuses on Turkish specifically because it has the most language
data in core.
* Transition all lemmatizer.py files to json
This covers all lemmatizer.py files of a significant size (>500k or so).
Small files were left alone.
None of the affected files have logic, so this was pretty
straightforward.
One unusual thing is that the lemma data for Urdu doesn't seem to be
used anywhere. That may require further investigation.
* Move large lang data to json for fr/nb/nl/sv
These are the languages that use a lemmatizer directory (rather than a
single file) and are larger than English.
For most of these languages there were many language data files, in
which case only the large ones (>500k or so) were converted to json. It
may or may not be a good idea to migrate the remaining Python files to
json in the future.
* Fix id lemmas.json
The contents of this file were originally just copied from the Python
source, but that used single quotes, so it had to be properly converted
to json first.
* Add .json.gz to gitignore
This covers the json.gz files built as part of distribution.
* Add language data gzip to build process
Currently this gzip data on every build; it works, but it should be
changed to only gzip when the source file has been updated.
* Remove Danish lemmatizer.py
Missed this when I added the json.
* Update to match latest explosion/srsly#9
The way gzipped json is loaded/saved in srsly changed a bit.
* Only compress language data if necessary
If a .json.gz file exists and is newer than the corresponding json file,
it's not recompressed.
* Move en/el language data to json
This only affected files >500kb, which was nouns for both languages and
the generic lookup table for English.
* Remove empty files in Norwegian tokenizer
It's unclear why, but the Norwegian (nb) tokenizer had empty files for
adj/adv/noun/verb lemmas. This may have been a result of copying the
structure of the English lemmatizer.
This removed the files, but still creates the empty sets in the
lemmatizer. That may not actually be necessary.
* Remove dubious entries in English lookup.json
" furthest" and " skilled" - both prefixed with a space - were in the
English lookup table. That seems obviously wrong so I have removed them.
* Fix small issues with en/fr lemmatizers
The en tokenizer was including the removed _nouns.py file, so that's
removed.
The fr tokenizer is unusual in that it has a lemmatizer directory with
both __init__.py and lemmatizer.py. lemmatizer.py had not been converted
to load the json language data, so that was fixed.
* Auto-format
* Auto-format
* Update srsly pin
* Consistently use pathlib paths
2019-08-20 12:54:11 +00:00
|
|
|
|
|
|
|
|
|
|
2017-05-29 12:10:10 +00:00
|
|
|
|
def load_model(name, **overrides):
|
2020-02-18 16:20:17 +00:00
|
|
|
|
"""Load a model from a package or data path.
|
2017-05-13 19:22:49 +00:00
|
|
|
|
|
2020-02-18 16:20:17 +00:00
|
|
|
|
name (unicode): Package name or model path.
|
2017-05-29 12:10:10 +00:00
|
|
|
|
**overrides: Specific overrides, like pipeline components to disable.
|
2017-05-27 22:22:00 +00:00
|
|
|
|
RETURNS (Language): `Language` class with the loaded model.
|
2017-05-13 19:22:49 +00:00
|
|
|
|
"""
|
2020-02-18 16:20:17 +00:00
|
|
|
|
if isinstance(name, str): # name or string path
|
2017-10-27 12:39:09 +00:00
|
|
|
|
if is_package(name): # installed as package
|
2017-06-05 11:02:31 +00:00
|
|
|
|
return load_model_from_package(name, **overrides)
|
2017-10-27 12:39:09 +00:00
|
|
|
|
if Path(name).exists(): # path to model data directory
|
2017-06-05 11:02:31 +00:00
|
|
|
|
return load_model_from_path(Path(name), **overrides)
|
💫 Tidy up and auto-format .py files (#2983)
<!--- Provide a general summary of your changes in the title. -->
## Description
- [x] Use [`black`](https://github.com/ambv/black) to auto-format all `.py` files.
- [x] Update flake8 config to exclude very large files (lemmatization tables etc.)
- [x] Update code to be compatible with flake8 rules
- [x] Fix various small bugs, inconsistencies and messy stuff in the language data
- [x] Update docs to explain new code style (`black`, `flake8`, when to use `# fmt: off` and `# fmt: on` and what `# noqa` means)
Once #2932 is merged, which auto-formats and tidies up the CLI, we'll be able to run `flake8 spacy` actually get meaningful results.
At the moment, the code style and linting isn't applied automatically, but I'm hoping that the new [GitHub Actions](https://github.com/features/actions) will let us auto-format pull requests and post comments with relevant linting information.
### Types of change
enhancement, code style
## Checklist
<!--- Before you submit the PR, go over this checklist and make sure you can
tick off all the boxes. [] -> [x] -->
- [x] I have submitted the spaCy Contributor Agreement.
- [x] I ran the tests, and all new and existing tests passed.
- [x] My changes don't require a change to the documentation, or if they do, I've added all required information.
2018-11-30 16:03:03 +00:00
|
|
|
|
elif hasattr(name, "exists"): # Path or Path-like to model data
|
2017-06-05 11:02:31 +00:00
|
|
|
|
return load_model_from_path(name, **overrides)
|
2018-04-03 13:50:31 +00:00
|
|
|
|
raise IOError(Errors.E050.format(name=name))
|
2017-05-08 21:51:15 +00:00
|
|
|
|
|
|
|
|
|
|
2017-06-05 11:02:31 +00:00
|
|
|
|
def load_model_from_package(name, **overrides):
|
|
|
|
|
"""Load a model from an installed package."""
|
|
|
|
|
cls = importlib.import_module(name)
|
|
|
|
|
return cls.load(**overrides)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def load_model_from_path(model_path, meta=False, **overrides):
|
|
|
|
|
"""Load a model from a data directory path. Creates Language class with
|
|
|
|
|
pipeline from meta.json and then calls from_disk() with path."""
|
|
|
|
|
if not meta:
|
|
|
|
|
meta = get_model_meta(model_path)
|
2020-02-27 17:42:27 +00:00
|
|
|
|
nlp_config = get_model_config(model_path)
|
|
|
|
|
if nlp_config.get("nlp", None):
|
|
|
|
|
return load_model_from_config(nlp_config["nlp"])
|
|
|
|
|
|
2019-07-27 11:17:43 +00:00
|
|
|
|
# Support language factories registered via entry points (e.g. custom
|
|
|
|
|
# language subclass) while keeping top-level language identifier "lang"
|
|
|
|
|
lang = meta.get("lang_factory", meta["lang"])
|
|
|
|
|
cls = get_lang_class(lang)
|
2017-10-06 22:25:54 +00:00
|
|
|
|
nlp = cls(meta=meta, **overrides)
|
💫 Tidy up and auto-format .py files (#2983)
<!--- Provide a general summary of your changes in the title. -->
## Description
- [x] Use [`black`](https://github.com/ambv/black) to auto-format all `.py` files.
- [x] Update flake8 config to exclude very large files (lemmatization tables etc.)
- [x] Update code to be compatible with flake8 rules
- [x] Fix various small bugs, inconsistencies and messy stuff in the language data
- [x] Update docs to explain new code style (`black`, `flake8`, when to use `# fmt: off` and `# fmt: on` and what `# noqa` means)
Once #2932 is merged, which auto-formats and tidies up the CLI, we'll be able to run `flake8 spacy` actually get meaningful results.
At the moment, the code style and linting isn't applied automatically, but I'm hoping that the new [GitHub Actions](https://github.com/features/actions) will let us auto-format pull requests and post comments with relevant linting information.
### Types of change
enhancement, code style
## Checklist
<!--- Before you submit the PR, go over this checklist and make sure you can
tick off all the boxes. [] -> [x] -->
- [x] I have submitted the spaCy Contributor Agreement.
- [x] I ran the tests, and all new and existing tests passed.
- [x] My changes don't require a change to the documentation, or if they do, I've added all required information.
2018-11-30 16:03:03 +00:00
|
|
|
|
pipeline = meta.get("pipeline", [])
|
2019-10-27 12:35:49 +00:00
|
|
|
|
factories = meta.get("factories", {})
|
💫 Tidy up and auto-format .py files (#2983)
<!--- Provide a general summary of your changes in the title. -->
## Description
- [x] Use [`black`](https://github.com/ambv/black) to auto-format all `.py` files.
- [x] Update flake8 config to exclude very large files (lemmatization tables etc.)
- [x] Update code to be compatible with flake8 rules
- [x] Fix various small bugs, inconsistencies and messy stuff in the language data
- [x] Update docs to explain new code style (`black`, `flake8`, when to use `# fmt: off` and `# fmt: on` and what `# noqa` means)
Once #2932 is merged, which auto-formats and tidies up the CLI, we'll be able to run `flake8 spacy` actually get meaningful results.
At the moment, the code style and linting isn't applied automatically, but I'm hoping that the new [GitHub Actions](https://github.com/features/actions) will let us auto-format pull requests and post comments with relevant linting information.
### Types of change
enhancement, code style
## Checklist
<!--- Before you submit the PR, go over this checklist and make sure you can
tick off all the boxes. [] -> [x] -->
- [x] I have submitted the spaCy Contributor Agreement.
- [x] I ran the tests, and all new and existing tests passed.
- [x] My changes don't require a change to the documentation, or if they do, I've added all required information.
2018-11-30 16:03:03 +00:00
|
|
|
|
disable = overrides.get("disable", [])
|
2017-10-06 22:29:08 +00:00
|
|
|
|
if pipeline is True:
|
|
|
|
|
pipeline = nlp.Defaults.pipe_names
|
|
|
|
|
elif pipeline in (False, None):
|
|
|
|
|
pipeline = []
|
|
|
|
|
for name in pipeline:
|
|
|
|
|
if name not in disable:
|
💫 Tidy up and auto-format .py files (#2983)
<!--- Provide a general summary of your changes in the title. -->
## Description
- [x] Use [`black`](https://github.com/ambv/black) to auto-format all `.py` files.
- [x] Update flake8 config to exclude very large files (lemmatization tables etc.)
- [x] Update code to be compatible with flake8 rules
- [x] Fix various small bugs, inconsistencies and messy stuff in the language data
- [x] Update docs to explain new code style (`black`, `flake8`, when to use `# fmt: off` and `# fmt: on` and what `# noqa` means)
Once #2932 is merged, which auto-formats and tidies up the CLI, we'll be able to run `flake8 spacy` actually get meaningful results.
At the moment, the code style and linting isn't applied automatically, but I'm hoping that the new [GitHub Actions](https://github.com/features/actions) will let us auto-format pull requests and post comments with relevant linting information.
### Types of change
enhancement, code style
## Checklist
<!--- Before you submit the PR, go over this checklist and make sure you can
tick off all the boxes. [] -> [x] -->
- [x] I have submitted the spaCy Contributor Agreement.
- [x] I ran the tests, and all new and existing tests passed.
- [x] My changes don't require a change to the documentation, or if they do, I've added all required information.
2018-11-30 16:03:03 +00:00
|
|
|
|
config = meta.get("pipeline_args", {}).get(name, {})
|
2020-05-20 09:41:12 +00:00
|
|
|
|
config.update(overrides)
|
2019-10-27 12:35:49 +00:00
|
|
|
|
factory = factories.get(name, name)
|
2020-02-27 17:42:27 +00:00
|
|
|
|
if nlp_config.get(name, None):
|
|
|
|
|
model_config = nlp_config[name]["model"]
|
|
|
|
|
config["model"] = model_config
|
2019-10-27 12:35:49 +00:00
|
|
|
|
component = nlp.create_pipe(factory, config=config)
|
2017-10-06 22:29:08 +00:00
|
|
|
|
nlp.add_pipe(component, name=name)
|
2019-11-25 15:01:22 +00:00
|
|
|
|
return nlp.from_disk(model_path, exclude=disable)
|
2017-06-05 11:02:31 +00:00
|
|
|
|
|
|
|
|
|
|
2020-02-27 17:42:27 +00:00
|
|
|
|
def load_model_from_config(nlp_config):
|
|
|
|
|
if "name" in nlp_config:
|
|
|
|
|
nlp = load_model(**nlp_config)
|
|
|
|
|
elif "lang" in nlp_config:
|
|
|
|
|
lang_class = get_lang_class(nlp_config["lang"])
|
|
|
|
|
nlp = lang_class()
|
|
|
|
|
else:
|
|
|
|
|
raise ValueError(Errors.E993)
|
|
|
|
|
if "pipeline" in nlp_config:
|
|
|
|
|
for name, component_cfg in nlp_config["pipeline"].items():
|
|
|
|
|
factory = component_cfg.pop("factory")
|
|
|
|
|
component = nlp.create_pipe(factory, config=component_cfg)
|
|
|
|
|
nlp.add_pipe(component, name=name)
|
|
|
|
|
return nlp
|
|
|
|
|
|
|
|
|
|
|
2017-05-29 12:10:10 +00:00
|
|
|
|
def load_model_from_init_py(init_file, **overrides):
|
2017-05-27 22:22:00 +00:00
|
|
|
|
"""Helper function to use in the `load()` method of a model package's
|
|
|
|
|
__init__.py.
|
|
|
|
|
|
|
|
|
|
init_file (unicode): Path to model's __init__.py, i.e. `__file__`.
|
2017-05-29 12:10:10 +00:00
|
|
|
|
**overrides: Specific overrides, like pipeline components to disable.
|
2017-05-27 22:22:00 +00:00
|
|
|
|
RETURNS (Language): `Language` class with loaded model.
|
|
|
|
|
"""
|
|
|
|
|
model_path = Path(init_file).parent
|
2017-05-29 12:10:10 +00:00
|
|
|
|
meta = get_model_meta(model_path)
|
2019-12-25 16:59:52 +00:00
|
|
|
|
data_dir = f"{meta['lang']}_{meta['name']}-{meta['version']}"
|
2017-05-29 12:10:10 +00:00
|
|
|
|
data_path = model_path / data_dir
|
|
|
|
|
if not model_path.exists():
|
2019-12-22 00:53:56 +00:00
|
|
|
|
raise IOError(Errors.E052.format(path=data_path))
|
2017-06-05 11:02:31 +00:00
|
|
|
|
return load_model_from_path(data_path, meta, **overrides)
|
2017-05-27 22:22:00 +00:00
|
|
|
|
|
|
|
|
|
|
2020-05-22 13:42:46 +00:00
|
|
|
|
def get_installed_models():
|
|
|
|
|
"""List all model packages currently installed in the environment.
|
|
|
|
|
|
|
|
|
|
RETURNS (list): The string names of the models.
|
|
|
|
|
"""
|
|
|
|
|
return list(registry.models.get_all().keys())
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def get_package_version(name):
|
|
|
|
|
"""Get the version of an installed package. Typically used to get model
|
|
|
|
|
package versions.
|
|
|
|
|
|
|
|
|
|
name (unicode): The name of the installed Python package.
|
|
|
|
|
RETURNS (unicode / None): The version or None if package not installed.
|
|
|
|
|
"""
|
|
|
|
|
try:
|
|
|
|
|
return importlib_metadata.version(name)
|
|
|
|
|
except importlib_metadata.PackageNotFoundError:
|
|
|
|
|
return None
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def split_version(version):
|
|
|
|
|
"""RETURNS (tuple): Two integers, the major and minor spaCy version."""
|
|
|
|
|
pieces = version.split(".", 3)
|
|
|
|
|
return int(pieces[0]), int(pieces[1])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def is_compatible_model(meta):
|
|
|
|
|
"""Check if a model is compatible with the current version of spaCy, based
|
|
|
|
|
on its meta.json. We compare the version of spaCy the model was created with
|
|
|
|
|
with the current version. If the minor version is different, it's considered
|
|
|
|
|
incompatible.
|
|
|
|
|
|
|
|
|
|
meta (dict): The model's meta.
|
|
|
|
|
RETURNS (bool / None): Whether the model is compatible with the current
|
|
|
|
|
spaCy or None if we don't have enough info.
|
|
|
|
|
"""
|
|
|
|
|
cur_v = about.__version__
|
|
|
|
|
pkg_v = meta.get("spacy_version")
|
|
|
|
|
if not pkg_v or not isinstance(pkg_v, str):
|
|
|
|
|
return None
|
|
|
|
|
# Handle spacy_version values like >=x,<y, just in case
|
|
|
|
|
pkg_v = re.sub(r"[^0-9.]", "", pkg_v.split(",")[0])
|
|
|
|
|
cur_major, cur_minor = split_version(cur_v)
|
|
|
|
|
pkg_major, pkg_minor = split_version(pkg_v)
|
|
|
|
|
if cur_major != pkg_major or cur_minor != pkg_minor:
|
|
|
|
|
return False
|
|
|
|
|
return True
|
|
|
|
|
|
|
|
|
|
|
2020-02-27 17:42:27 +00:00
|
|
|
|
def load_config(path, create_objects=False):
|
2020-01-29 16:06:46 +00:00
|
|
|
|
"""Load a Thinc-formatted config file, optionally filling in objects where
|
|
|
|
|
the config references registry entries. See "Thinc config files" for details.
|
|
|
|
|
|
|
|
|
|
path (unicode or Path): Path to the config file
|
|
|
|
|
create_objects (bool): Whether to automatically create objects when the config
|
|
|
|
|
references registry entries. Defaults to False.
|
|
|
|
|
|
|
|
|
|
RETURNS (dict): The objects from the config file.
|
|
|
|
|
"""
|
|
|
|
|
config = thinc.config.Config().from_disk(path)
|
|
|
|
|
if create_objects:
|
|
|
|
|
return registry.make_from_config(config, validate=True)
|
|
|
|
|
else:
|
|
|
|
|
return config
|
|
|
|
|
|
|
|
|
|
|
2020-05-02 12:09:21 +00:00
|
|
|
|
def load_config_from_str(string, create_objects=False):
|
|
|
|
|
"""Load a Thinc-formatted config, optionally filling in objects where
|
|
|
|
|
the config references registry entries. See "Thinc config files" for details.
|
|
|
|
|
|
|
|
|
|
string (unicode or Path): Text contents of the config file.
|
|
|
|
|
create_objects (bool): Whether to automatically create objects when the config
|
|
|
|
|
references registry entries. Defaults to False.
|
|
|
|
|
|
|
|
|
|
RETURNS (dict): The objects from the config file.
|
|
|
|
|
"""
|
|
|
|
|
config = thinc.config.Config().from_str(string)
|
|
|
|
|
if create_objects:
|
|
|
|
|
return registry.make_from_config(config, validate=True)
|
|
|
|
|
else:
|
|
|
|
|
return config
|
|
|
|
|
|
|
|
|
|
|
2017-05-29 12:10:10 +00:00
|
|
|
|
def get_model_meta(path):
|
|
|
|
|
"""Get model meta.json from a directory path and validate its contents.
|
2017-05-27 22:22:00 +00:00
|
|
|
|
|
2017-05-29 12:10:10 +00:00
|
|
|
|
path (unicode or Path): Path to model directory.
|
|
|
|
|
RETURNS (dict): The model's meta data.
|
2017-05-27 22:22:00 +00:00
|
|
|
|
"""
|
2017-05-29 12:10:10 +00:00
|
|
|
|
model_path = ensure_path(path)
|
|
|
|
|
if not model_path.exists():
|
2019-12-22 00:53:56 +00:00
|
|
|
|
raise IOError(Errors.E052.format(path=model_path))
|
💫 Tidy up and auto-format .py files (#2983)
<!--- Provide a general summary of your changes in the title. -->
## Description
- [x] Use [`black`](https://github.com/ambv/black) to auto-format all `.py` files.
- [x] Update flake8 config to exclude very large files (lemmatization tables etc.)
- [x] Update code to be compatible with flake8 rules
- [x] Fix various small bugs, inconsistencies and messy stuff in the language data
- [x] Update docs to explain new code style (`black`, `flake8`, when to use `# fmt: off` and `# fmt: on` and what `# noqa` means)
Once #2932 is merged, which auto-formats and tidies up the CLI, we'll be able to run `flake8 spacy` actually get meaningful results.
At the moment, the code style and linting isn't applied automatically, but I'm hoping that the new [GitHub Actions](https://github.com/features/actions) will let us auto-format pull requests and post comments with relevant linting information.
### Types of change
enhancement, code style
## Checklist
<!--- Before you submit the PR, go over this checklist and make sure you can
tick off all the boxes. [] -> [x] -->
- [x] I have submitted the spaCy Contributor Agreement.
- [x] I ran the tests, and all new and existing tests passed.
- [x] My changes don't require a change to the documentation, or if they do, I've added all required information.
2018-11-30 16:03:03 +00:00
|
|
|
|
meta_path = model_path / "meta.json"
|
2017-05-27 22:22:00 +00:00
|
|
|
|
if not meta_path.is_file():
|
2020-02-27 17:42:27 +00:00
|
|
|
|
raise IOError(Errors.E053.format(path=meta_path, name="meta.json"))
|
💫 Replace ujson, msgpack and dill/pickle/cloudpickle with srsly (#3003)
Remove hacks and wrappers, keep code in sync across our libraries and move spaCy a few steps closer to only depending on packages with binary wheels 🎉
See here: https://github.com/explosion/srsly
Serialization is hard, especially across Python versions and multiple platforms. After dealing with many subtle bugs over the years (encodings, locales, large files) our libraries like spaCy and Prodigy have steadily grown a number of utility functions to wrap the multiple serialization formats we need to support (especially json, msgpack and pickle). These wrapping functions ended up duplicated across our codebases, so we wanted to put them in one place.
At the same time, we noticed that having a lot of small dependencies was making maintainence harder, and making installation slower. To solve this, we've made srsly standalone, by including the component packages directly within it. This way we can provide all the serialization utilities we need in a single binary wheel.
srsly currently includes forks of the following packages:
ujson
msgpack
msgpack-numpy
cloudpickle
* WIP: replace json/ujson with srsly
* Replace ujson in examples
Use regular json instead of srsly to make code easier to read and follow
* Update requirements
* Fix imports
* Fix typos
* Replace msgpack with srsly
* Fix warning
2018-12-03 00:28:22 +00:00
|
|
|
|
meta = srsly.read_json(meta_path)
|
💫 Tidy up and auto-format .py files (#2983)
<!--- Provide a general summary of your changes in the title. -->
## Description
- [x] Use [`black`](https://github.com/ambv/black) to auto-format all `.py` files.
- [x] Update flake8 config to exclude very large files (lemmatization tables etc.)
- [x] Update code to be compatible with flake8 rules
- [x] Fix various small bugs, inconsistencies and messy stuff in the language data
- [x] Update docs to explain new code style (`black`, `flake8`, when to use `# fmt: off` and `# fmt: on` and what `# noqa` means)
Once #2932 is merged, which auto-formats and tidies up the CLI, we'll be able to run `flake8 spacy` actually get meaningful results.
At the moment, the code style and linting isn't applied automatically, but I'm hoping that the new [GitHub Actions](https://github.com/features/actions) will let us auto-format pull requests and post comments with relevant linting information.
### Types of change
enhancement, code style
## Checklist
<!--- Before you submit the PR, go over this checklist and make sure you can
tick off all the boxes. [] -> [x] -->
- [x] I have submitted the spaCy Contributor Agreement.
- [x] I ran the tests, and all new and existing tests passed.
- [x] My changes don't require a change to the documentation, or if they do, I've added all required information.
2018-11-30 16:03:03 +00:00
|
|
|
|
for setting in ["lang", "name", "version"]:
|
2017-08-29 09:21:44 +00:00
|
|
|
|
if setting not in meta or not meta[setting]:
|
2018-04-03 13:50:31 +00:00
|
|
|
|
raise ValueError(Errors.E054.format(setting=setting))
|
2017-05-29 12:10:10 +00:00
|
|
|
|
return meta
|
2017-05-27 22:22:00 +00:00
|
|
|
|
|
|
|
|
|
|
2020-02-27 17:42:27 +00:00
|
|
|
|
def get_model_config(path):
|
|
|
|
|
"""Get the model's config from a directory path.
|
|
|
|
|
|
|
|
|
|
path (unicode or Path): Path to model directory.
|
|
|
|
|
RETURNS (Config): The model's config data.
|
|
|
|
|
"""
|
|
|
|
|
model_path = ensure_path(path)
|
|
|
|
|
if not model_path.exists():
|
|
|
|
|
raise IOError(Errors.E052.format(path=model_path))
|
|
|
|
|
config_path = model_path / "config.cfg"
|
|
|
|
|
# model directories are allowed not to have config files ?
|
|
|
|
|
if not config_path.is_file():
|
|
|
|
|
return Config({})
|
|
|
|
|
# raise IOError(Errors.E053.format(path=config_path, name="config.cfg"))
|
|
|
|
|
return Config().from_disk(config_path)
|
|
|
|
|
|
|
|
|
|
|
2017-05-13 19:22:49 +00:00
|
|
|
|
def is_package(name):
|
|
|
|
|
"""Check if string maps to a package installed via pip.
|
|
|
|
|
|
2017-05-13 23:30:29 +00:00
|
|
|
|
name (unicode): Name of package.
|
|
|
|
|
RETURNS (bool): True if installed package, False if not.
|
2017-05-08 21:51:15 +00:00
|
|
|
|
"""
|
2020-05-24 12:48:56 +00:00
|
|
|
|
try:
|
|
|
|
|
importlib_metadata.distribution(name)
|
|
|
|
|
return True
|
|
|
|
|
except: # noqa: E722
|
|
|
|
|
return False
|
2017-05-08 21:51:15 +00:00
|
|
|
|
|
|
|
|
|
|
2017-05-27 22:22:00 +00:00
|
|
|
|
def get_package_path(name):
|
|
|
|
|
"""Get the path to an installed package.
|
2017-05-13 19:22:49 +00:00
|
|
|
|
|
2017-05-27 22:22:00 +00:00
|
|
|
|
name (unicode): Package name.
|
|
|
|
|
RETURNS (Path): Path to installed package.
|
2017-05-13 19:22:49 +00:00
|
|
|
|
"""
|
2017-09-29 18:55:17 +00:00
|
|
|
|
name = name.lower() # use lowercase version to be safe
|
2017-05-08 21:51:15 +00:00
|
|
|
|
# Here we're importing the module just to find it. This is worryingly
|
|
|
|
|
# indirect, but it's otherwise very difficult to find the package.
|
2017-05-29 08:51:19 +00:00
|
|
|
|
pkg = importlib.import_module(name)
|
2017-05-27 22:22:00 +00:00
|
|
|
|
return Path(pkg.__file__).parent
|
2017-05-08 21:51:15 +00:00
|
|
|
|
|
|
|
|
|
|
2017-05-18 12:13:14 +00:00
|
|
|
|
def is_in_jupyter():
|
2017-05-20 23:12:09 +00:00
|
|
|
|
"""Check if user is running spaCy from a Jupyter notebook by detecting the
|
|
|
|
|
IPython kernel. Mainly used for the displaCy visualizer.
|
2017-05-18 12:13:14 +00:00
|
|
|
|
RETURNS (bool): True if in Jupyter, False if not.
|
|
|
|
|
"""
|
2018-12-20 16:32:04 +00:00
|
|
|
|
# https://stackoverflow.com/a/39662359/6400719
|
2017-05-18 12:13:14 +00:00
|
|
|
|
try:
|
2018-12-20 16:32:04 +00:00
|
|
|
|
shell = get_ipython().__class__.__name__
|
|
|
|
|
if shell == "ZMQInteractiveShell":
|
|
|
|
|
return True # Jupyter notebook or qtconsole
|
2017-05-18 12:13:14 +00:00
|
|
|
|
except NameError:
|
2018-12-20 16:32:04 +00:00
|
|
|
|
return False # Probably standard Python interpreter
|
2017-05-18 12:13:14 +00:00
|
|
|
|
return False
|
|
|
|
|
|
|
|
|
|
|
2019-10-27 12:35:49 +00:00
|
|
|
|
def get_component_name(component):
|
|
|
|
|
if hasattr(component, "name"):
|
|
|
|
|
return component.name
|
|
|
|
|
if hasattr(component, "__name__"):
|
|
|
|
|
return component.__name__
|
|
|
|
|
if hasattr(component, "__class__") and hasattr(component.__class__, "__name__"):
|
|
|
|
|
return component.__class__.__name__
|
|
|
|
|
return repr(component)
|
|
|
|
|
|
|
|
|
|
|
2019-11-19 14:54:34 +00:00
|
|
|
|
def get_cuda_stream(require=False, non_blocking=True):
|
2020-01-29 16:06:46 +00:00
|
|
|
|
ops = get_current_ops()
|
2018-03-27 17:22:52 +00:00
|
|
|
|
if CudaStream is None:
|
|
|
|
|
return None
|
2020-01-29 16:06:46 +00:00
|
|
|
|
elif isinstance(ops, NumpyOps):
|
2018-03-27 17:22:52 +00:00
|
|
|
|
return None
|
|
|
|
|
else:
|
2019-11-19 14:54:34 +00:00
|
|
|
|
return CudaStream(non_blocking=non_blocking)
|
2017-05-15 19:46:08 +00:00
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def get_async(stream, numpy_array):
|
|
|
|
|
if cupy is None:
|
|
|
|
|
return numpy_array
|
|
|
|
|
else:
|
💫 Tidy up and auto-format .py files (#2983)
<!--- Provide a general summary of your changes in the title. -->
## Description
- [x] Use [`black`](https://github.com/ambv/black) to auto-format all `.py` files.
- [x] Update flake8 config to exclude very large files (lemmatization tables etc.)
- [x] Update code to be compatible with flake8 rules
- [x] Fix various small bugs, inconsistencies and messy stuff in the language data
- [x] Update docs to explain new code style (`black`, `flake8`, when to use `# fmt: off` and `# fmt: on` and what `# noqa` means)
Once #2932 is merged, which auto-formats and tidies up the CLI, we'll be able to run `flake8 spacy` actually get meaningful results.
At the moment, the code style and linting isn't applied automatically, but I'm hoping that the new [GitHub Actions](https://github.com/features/actions) will let us auto-format pull requests and post comments with relevant linting information.
### Types of change
enhancement, code style
## Checklist
<!--- Before you submit the PR, go over this checklist and make sure you can
tick off all the boxes. [] -> [x] -->
- [x] I have submitted the spaCy Contributor Agreement.
- [x] I ran the tests, and all new and existing tests passed.
- [x] My changes don't require a change to the documentation, or if they do, I've added all required information.
2018-11-30 16:03:03 +00:00
|
|
|
|
array = cupy.ndarray(numpy_array.shape, order="C", dtype=numpy_array.dtype)
|
2017-05-18 09:36:53 +00:00
|
|
|
|
array.set(numpy_array, stream=stream)
|
|
|
|
|
return array
|
|
|
|
|
|
2017-05-26 10:37:45 +00:00
|
|
|
|
|
2020-01-29 16:06:46 +00:00
|
|
|
|
def eg2doc(example):
|
|
|
|
|
"""Get a Doc object from an Example (or if it's a Doc, use it directly)"""
|
|
|
|
|
# Put the import here to avoid circular import problems
|
|
|
|
|
from .tokens.doc import Doc
|
|
|
|
|
|
|
|
|
|
return example if isinstance(example, Doc) else example.doc
|
|
|
|
|
|
|
|
|
|
|
2017-05-18 09:36:53 +00:00
|
|
|
|
def env_opt(name, default=None):
|
2017-05-18 13:32:03 +00:00
|
|
|
|
if type(default) is float:
|
|
|
|
|
type_convert = float
|
2017-05-18 09:36:53 +00:00
|
|
|
|
else:
|
2017-05-18 13:32:03 +00:00
|
|
|
|
type_convert = int
|
💫 Tidy up and auto-format .py files (#2983)
<!--- Provide a general summary of your changes in the title. -->
## Description
- [x] Use [`black`](https://github.com/ambv/black) to auto-format all `.py` files.
- [x] Update flake8 config to exclude very large files (lemmatization tables etc.)
- [x] Update code to be compatible with flake8 rules
- [x] Fix various small bugs, inconsistencies and messy stuff in the language data
- [x] Update docs to explain new code style (`black`, `flake8`, when to use `# fmt: off` and `# fmt: on` and what `# noqa` means)
Once #2932 is merged, which auto-formats and tidies up the CLI, we'll be able to run `flake8 spacy` actually get meaningful results.
At the moment, the code style and linting isn't applied automatically, but I'm hoping that the new [GitHub Actions](https://github.com/features/actions) will let us auto-format pull requests and post comments with relevant linting information.
### Types of change
enhancement, code style
## Checklist
<!--- Before you submit the PR, go over this checklist and make sure you can
tick off all the boxes. [] -> [x] -->
- [x] I have submitted the spaCy Contributor Agreement.
- [x] I ran the tests, and all new and existing tests passed.
- [x] My changes don't require a change to the documentation, or if they do, I've added all required information.
2018-11-30 16:03:03 +00:00
|
|
|
|
if "SPACY_" + name.upper() in os.environ:
|
|
|
|
|
value = type_convert(os.environ["SPACY_" + name.upper()])
|
2017-05-31 12:14:11 +00:00
|
|
|
|
if _PRINT_ENV:
|
|
|
|
|
print(name, "=", repr(value), "via", "$SPACY_" + name.upper())
|
2017-05-18 13:32:03 +00:00
|
|
|
|
return value
|
|
|
|
|
elif name in os.environ:
|
|
|
|
|
value = type_convert(os.environ[name])
|
2017-05-31 12:14:11 +00:00
|
|
|
|
if _PRINT_ENV:
|
💫 Tidy up and auto-format .py files (#2983)
<!--- Provide a general summary of your changes in the title. -->
## Description
- [x] Use [`black`](https://github.com/ambv/black) to auto-format all `.py` files.
- [x] Update flake8 config to exclude very large files (lemmatization tables etc.)
- [x] Update code to be compatible with flake8 rules
- [x] Fix various small bugs, inconsistencies and messy stuff in the language data
- [x] Update docs to explain new code style (`black`, `flake8`, when to use `# fmt: off` and `# fmt: on` and what `# noqa` means)
Once #2932 is merged, which auto-formats and tidies up the CLI, we'll be able to run `flake8 spacy` actually get meaningful results.
At the moment, the code style and linting isn't applied automatically, but I'm hoping that the new [GitHub Actions](https://github.com/features/actions) will let us auto-format pull requests and post comments with relevant linting information.
### Types of change
enhancement, code style
## Checklist
<!--- Before you submit the PR, go over this checklist and make sure you can
tick off all the boxes. [] -> [x] -->
- [x] I have submitted the spaCy Contributor Agreement.
- [x] I ran the tests, and all new and existing tests passed.
- [x] My changes don't require a change to the documentation, or if they do, I've added all required information.
2018-11-30 16:03:03 +00:00
|
|
|
|
print(name, "=", repr(value), "via", "$" + name)
|
2017-05-18 13:32:03 +00:00
|
|
|
|
return value
|
|
|
|
|
else:
|
2017-05-31 12:14:11 +00:00
|
|
|
|
if _PRINT_ENV:
|
💫 Tidy up and auto-format .py files (#2983)
<!--- Provide a general summary of your changes in the title. -->
## Description
- [x] Use [`black`](https://github.com/ambv/black) to auto-format all `.py` files.
- [x] Update flake8 config to exclude very large files (lemmatization tables etc.)
- [x] Update code to be compatible with flake8 rules
- [x] Fix various small bugs, inconsistencies and messy stuff in the language data
- [x] Update docs to explain new code style (`black`, `flake8`, when to use `# fmt: off` and `# fmt: on` and what `# noqa` means)
Once #2932 is merged, which auto-formats and tidies up the CLI, we'll be able to run `flake8 spacy` actually get meaningful results.
At the moment, the code style and linting isn't applied automatically, but I'm hoping that the new [GitHub Actions](https://github.com/features/actions) will let us auto-format pull requests and post comments with relevant linting information.
### Types of change
enhancement, code style
## Checklist
<!--- Before you submit the PR, go over this checklist and make sure you can
tick off all the boxes. [] -> [x] -->
- [x] I have submitted the spaCy Contributor Agreement.
- [x] I ran the tests, and all new and existing tests passed.
- [x] My changes don't require a change to the documentation, or if they do, I've added all required information.
2018-11-30 16:03:03 +00:00
|
|
|
|
print(name, "=", repr(default), "by default")
|
2017-05-18 09:36:53 +00:00
|
|
|
|
return default
|
2017-05-13 22:37:53 +00:00
|
|
|
|
|
|
|
|
|
|
2016-09-24 18:26:17 +00:00
|
|
|
|
def read_regex(path):
|
2017-04-15 10:11:16 +00:00
|
|
|
|
path = ensure_path(path)
|
2019-10-29 12:16:55 +00:00
|
|
|
|
with path.open(encoding="utf8") as file_:
|
💫 Tidy up and auto-format .py files (#2983)
<!--- Provide a general summary of your changes in the title. -->
## Description
- [x] Use [`black`](https://github.com/ambv/black) to auto-format all `.py` files.
- [x] Update flake8 config to exclude very large files (lemmatization tables etc.)
- [x] Update code to be compatible with flake8 rules
- [x] Fix various small bugs, inconsistencies and messy stuff in the language data
- [x] Update docs to explain new code style (`black`, `flake8`, when to use `# fmt: off` and `# fmt: on` and what `# noqa` means)
Once #2932 is merged, which auto-formats and tidies up the CLI, we'll be able to run `flake8 spacy` actually get meaningful results.
At the moment, the code style and linting isn't applied automatically, but I'm hoping that the new [GitHub Actions](https://github.com/features/actions) will let us auto-format pull requests and post comments with relevant linting information.
### Types of change
enhancement, code style
## Checklist
<!--- Before you submit the PR, go over this checklist and make sure you can
tick off all the boxes. [] -> [x] -->
- [x] I have submitted the spaCy Contributor Agreement.
- [x] I ran the tests, and all new and existing tests passed.
- [x] My changes don't require a change to the documentation, or if they do, I've added all required information.
2018-11-30 16:03:03 +00:00
|
|
|
|
entries = file_.read().split("\n")
|
|
|
|
|
expression = "|".join(
|
|
|
|
|
["^" + re.escape(piece) for piece in entries if piece.strip()]
|
|
|
|
|
)
|
2016-09-24 18:26:17 +00:00
|
|
|
|
return re.compile(expression)
|
|
|
|
|
|
|
|
|
|
|
2016-09-25 12:49:53 +00:00
|
|
|
|
def compile_prefix_regex(entries):
|
2019-02-24 17:39:59 +00:00
|
|
|
|
"""Compile a sequence of prefix rules into a regex object.
|
2019-02-24 17:34:10 +00:00
|
|
|
|
|
|
|
|
|
entries (tuple): The prefix rules, e.g. spacy.lang.punctuation.TOKENIZER_PREFIXES.
|
|
|
|
|
RETURNS (regex object): The regex object. to be used for Tokenizer.prefix_search.
|
|
|
|
|
"""
|
💫 Tidy up and auto-format .py files (#2983)
<!--- Provide a general summary of your changes in the title. -->
## Description
- [x] Use [`black`](https://github.com/ambv/black) to auto-format all `.py` files.
- [x] Update flake8 config to exclude very large files (lemmatization tables etc.)
- [x] Update code to be compatible with flake8 rules
- [x] Fix various small bugs, inconsistencies and messy stuff in the language data
- [x] Update docs to explain new code style (`black`, `flake8`, when to use `# fmt: off` and `# fmt: on` and what `# noqa` means)
Once #2932 is merged, which auto-formats and tidies up the CLI, we'll be able to run `flake8 spacy` actually get meaningful results.
At the moment, the code style and linting isn't applied automatically, but I'm hoping that the new [GitHub Actions](https://github.com/features/actions) will let us auto-format pull requests and post comments with relevant linting information.
### Types of change
enhancement, code style
## Checklist
<!--- Before you submit the PR, go over this checklist and make sure you can
tick off all the boxes. [] -> [x] -->
- [x] I have submitted the spaCy Contributor Agreement.
- [x] I ran the tests, and all new and existing tests passed.
- [x] My changes don't require a change to the documentation, or if they do, I've added all required information.
2018-11-30 16:03:03 +00:00
|
|
|
|
if "(" in entries:
|
2017-01-08 19:33:28 +00:00
|
|
|
|
# Handle deprecated data
|
💫 Tidy up and auto-format .py files (#2983)
<!--- Provide a general summary of your changes in the title. -->
## Description
- [x] Use [`black`](https://github.com/ambv/black) to auto-format all `.py` files.
- [x] Update flake8 config to exclude very large files (lemmatization tables etc.)
- [x] Update code to be compatible with flake8 rules
- [x] Fix various small bugs, inconsistencies and messy stuff in the language data
- [x] Update docs to explain new code style (`black`, `flake8`, when to use `# fmt: off` and `# fmt: on` and what `# noqa` means)
Once #2932 is merged, which auto-formats and tidies up the CLI, we'll be able to run `flake8 spacy` actually get meaningful results.
At the moment, the code style and linting isn't applied automatically, but I'm hoping that the new [GitHub Actions](https://github.com/features/actions) will let us auto-format pull requests and post comments with relevant linting information.
### Types of change
enhancement, code style
## Checklist
<!--- Before you submit the PR, go over this checklist and make sure you can
tick off all the boxes. [] -> [x] -->
- [x] I have submitted the spaCy Contributor Agreement.
- [x] I ran the tests, and all new and existing tests passed.
- [x] My changes don't require a change to the documentation, or if they do, I've added all required information.
2018-11-30 16:03:03 +00:00
|
|
|
|
expression = "|".join(
|
|
|
|
|
["^" + re.escape(piece) for piece in entries if piece.strip()]
|
|
|
|
|
)
|
2017-01-08 19:33:28 +00:00
|
|
|
|
return re.compile(expression)
|
|
|
|
|
else:
|
💫 Tidy up and auto-format .py files (#2983)
<!--- Provide a general summary of your changes in the title. -->
## Description
- [x] Use [`black`](https://github.com/ambv/black) to auto-format all `.py` files.
- [x] Update flake8 config to exclude very large files (lemmatization tables etc.)
- [x] Update code to be compatible with flake8 rules
- [x] Fix various small bugs, inconsistencies and messy stuff in the language data
- [x] Update docs to explain new code style (`black`, `flake8`, when to use `# fmt: off` and `# fmt: on` and what `# noqa` means)
Once #2932 is merged, which auto-formats and tidies up the CLI, we'll be able to run `flake8 spacy` actually get meaningful results.
At the moment, the code style and linting isn't applied automatically, but I'm hoping that the new [GitHub Actions](https://github.com/features/actions) will let us auto-format pull requests and post comments with relevant linting information.
### Types of change
enhancement, code style
## Checklist
<!--- Before you submit the PR, go over this checklist and make sure you can
tick off all the boxes. [] -> [x] -->
- [x] I have submitted the spaCy Contributor Agreement.
- [x] I ran the tests, and all new and existing tests passed.
- [x] My changes don't require a change to the documentation, or if they do, I've added all required information.
2018-11-30 16:03:03 +00:00
|
|
|
|
expression = "|".join(["^" + piece for piece in entries if piece.strip()])
|
2017-01-08 19:33:28 +00:00
|
|
|
|
return re.compile(expression)
|
2016-09-24 18:26:17 +00:00
|
|
|
|
|
|
|
|
|
|
2016-09-25 12:49:53 +00:00
|
|
|
|
def compile_suffix_regex(entries):
|
2019-02-24 17:39:59 +00:00
|
|
|
|
"""Compile a sequence of suffix rules into a regex object.
|
2019-02-24 17:34:10 +00:00
|
|
|
|
|
|
|
|
|
entries (tuple): The suffix rules, e.g. spacy.lang.punctuation.TOKENIZER_SUFFIXES.
|
|
|
|
|
RETURNS (regex object): The regex object. to be used for Tokenizer.suffix_search.
|
|
|
|
|
"""
|
💫 Tidy up and auto-format .py files (#2983)
<!--- Provide a general summary of your changes in the title. -->
## Description
- [x] Use [`black`](https://github.com/ambv/black) to auto-format all `.py` files.
- [x] Update flake8 config to exclude very large files (lemmatization tables etc.)
- [x] Update code to be compatible with flake8 rules
- [x] Fix various small bugs, inconsistencies and messy stuff in the language data
- [x] Update docs to explain new code style (`black`, `flake8`, when to use `# fmt: off` and `# fmt: on` and what `# noqa` means)
Once #2932 is merged, which auto-formats and tidies up the CLI, we'll be able to run `flake8 spacy` actually get meaningful results.
At the moment, the code style and linting isn't applied automatically, but I'm hoping that the new [GitHub Actions](https://github.com/features/actions) will let us auto-format pull requests and post comments with relevant linting information.
### Types of change
enhancement, code style
## Checklist
<!--- Before you submit the PR, go over this checklist and make sure you can
tick off all the boxes. [] -> [x] -->
- [x] I have submitted the spaCy Contributor Agreement.
- [x] I ran the tests, and all new and existing tests passed.
- [x] My changes don't require a change to the documentation, or if they do, I've added all required information.
2018-11-30 16:03:03 +00:00
|
|
|
|
expression = "|".join([piece + "$" for piece in entries if piece.strip()])
|
2016-09-24 18:26:17 +00:00
|
|
|
|
return re.compile(expression)
|
|
|
|
|
|
|
|
|
|
|
2016-09-25 12:49:53 +00:00
|
|
|
|
def compile_infix_regex(entries):
|
2019-02-24 17:39:59 +00:00
|
|
|
|
"""Compile a sequence of infix rules into a regex object.
|
2019-02-24 17:34:10 +00:00
|
|
|
|
|
|
|
|
|
entries (tuple): The infix rules, e.g. spacy.lang.punctuation.TOKENIZER_INFIXES.
|
|
|
|
|
RETURNS (regex object): The regex object. to be used for Tokenizer.infix_finditer.
|
|
|
|
|
"""
|
💫 Tidy up and auto-format .py files (#2983)
<!--- Provide a general summary of your changes in the title. -->
## Description
- [x] Use [`black`](https://github.com/ambv/black) to auto-format all `.py` files.
- [x] Update flake8 config to exclude very large files (lemmatization tables etc.)
- [x] Update code to be compatible with flake8 rules
- [x] Fix various small bugs, inconsistencies and messy stuff in the language data
- [x] Update docs to explain new code style (`black`, `flake8`, when to use `# fmt: off` and `# fmt: on` and what `# noqa` means)
Once #2932 is merged, which auto-formats and tidies up the CLI, we'll be able to run `flake8 spacy` actually get meaningful results.
At the moment, the code style and linting isn't applied automatically, but I'm hoping that the new [GitHub Actions](https://github.com/features/actions) will let us auto-format pull requests and post comments with relevant linting information.
### Types of change
enhancement, code style
## Checklist
<!--- Before you submit the PR, go over this checklist and make sure you can
tick off all the boxes. [] -> [x] -->
- [x] I have submitted the spaCy Contributor Agreement.
- [x] I ran the tests, and all new and existing tests passed.
- [x] My changes don't require a change to the documentation, or if they do, I've added all required information.
2018-11-30 16:03:03 +00:00
|
|
|
|
expression = "|".join([piece for piece in entries if piece.strip()])
|
2016-09-24 18:26:17 +00:00
|
|
|
|
return re.compile(expression)
|
|
|
|
|
|
|
|
|
|
|
2017-06-03 17:44:47 +00:00
|
|
|
|
def add_lookups(default_func, *lookups):
|
|
|
|
|
"""Extend an attribute function with special cases. If a word is in the
|
|
|
|
|
lookups, the value is returned. Otherwise the previous function is used.
|
|
|
|
|
|
|
|
|
|
default_func (callable): The default function to execute.
|
|
|
|
|
*lookups (dict): Lookup dictionary mapping string to attribute value.
|
|
|
|
|
RETURNS (callable): Lexical attribute getter.
|
|
|
|
|
"""
|
2017-10-17 16:20:52 +00:00
|
|
|
|
# This is implemented as functools.partial instead of a closure, to allow
|
|
|
|
|
# pickle to work.
|
|
|
|
|
return functools.partial(_get_attr_unless_lookup, default_func, lookups)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def _get_attr_unless_lookup(default_func, lookups, string):
|
|
|
|
|
for lookup in lookups:
|
|
|
|
|
if string in lookup:
|
|
|
|
|
return lookup[string]
|
|
|
|
|
return default_func(string)
|
2017-06-03 17:44:47 +00:00
|
|
|
|
|
|
|
|
|
|
2017-05-08 13:42:12 +00:00
|
|
|
|
def update_exc(base_exceptions, *addition_dicts):
|
2017-05-13 19:22:49 +00:00
|
|
|
|
"""Update and validate tokenizer exceptions. Will overwrite exceptions.
|
|
|
|
|
|
2017-05-13 23:30:29 +00:00
|
|
|
|
base_exceptions (dict): Base exceptions.
|
|
|
|
|
*addition_dicts (dict): Exceptions to add to the base dict, in order.
|
|
|
|
|
RETURNS (dict): Combined tokenizer exceptions.
|
2017-05-13 19:22:49 +00:00
|
|
|
|
"""
|
2017-05-08 13:42:12 +00:00
|
|
|
|
exc = dict(base_exceptions)
|
|
|
|
|
for additions in addition_dicts:
|
|
|
|
|
for orth, token_attrs in additions.items():
|
2019-12-22 00:53:56 +00:00
|
|
|
|
if not all(isinstance(attr[ORTH], str) for attr in token_attrs):
|
2018-04-03 13:50:31 +00:00
|
|
|
|
raise ValueError(Errors.E055.format(key=orth, orths=token_attrs))
|
💫 Tidy up and auto-format .py files (#2983)
<!--- Provide a general summary of your changes in the title. -->
## Description
- [x] Use [`black`](https://github.com/ambv/black) to auto-format all `.py` files.
- [x] Update flake8 config to exclude very large files (lemmatization tables etc.)
- [x] Update code to be compatible with flake8 rules
- [x] Fix various small bugs, inconsistencies and messy stuff in the language data
- [x] Update docs to explain new code style (`black`, `flake8`, when to use `# fmt: off` and `# fmt: on` and what `# noqa` means)
Once #2932 is merged, which auto-formats and tidies up the CLI, we'll be able to run `flake8 spacy` actually get meaningful results.
At the moment, the code style and linting isn't applied automatically, but I'm hoping that the new [GitHub Actions](https://github.com/features/actions) will let us auto-format pull requests and post comments with relevant linting information.
### Types of change
enhancement, code style
## Checklist
<!--- Before you submit the PR, go over this checklist and make sure you can
tick off all the boxes. [] -> [x] -->
- [x] I have submitted the spaCy Contributor Agreement.
- [x] I ran the tests, and all new and existing tests passed.
- [x] My changes don't require a change to the documentation, or if they do, I've added all required information.
2018-11-30 16:03:03 +00:00
|
|
|
|
described_orth = "".join(attr[ORTH] for attr in token_attrs)
|
2017-05-08 13:42:12 +00:00
|
|
|
|
if orth != described_orth:
|
2018-04-03 13:50:31 +00:00
|
|
|
|
raise ValueError(Errors.E056.format(key=orth, orths=described_orth))
|
2017-05-08 13:42:12 +00:00
|
|
|
|
exc.update(additions)
|
2017-05-13 19:22:25 +00:00
|
|
|
|
exc = expand_exc(exc, "'", "’")
|
2017-05-08 13:42:12 +00:00
|
|
|
|
return exc
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def expand_exc(excs, search, replace):
|
2017-05-13 19:22:49 +00:00
|
|
|
|
"""Find string in tokenizer exceptions, duplicate entry and replace string.
|
|
|
|
|
For example, to add additional versions with typographic apostrophes.
|
|
|
|
|
|
2017-05-13 23:30:29 +00:00
|
|
|
|
excs (dict): Tokenizer exceptions.
|
|
|
|
|
search (unicode): String to find and replace.
|
|
|
|
|
replace (unicode): Replacement.
|
|
|
|
|
RETURNS (dict): Combined tokenizer exceptions.
|
2017-05-13 19:22:49 +00:00
|
|
|
|
"""
|
💫 Tidy up and auto-format .py files (#2983)
<!--- Provide a general summary of your changes in the title. -->
## Description
- [x] Use [`black`](https://github.com/ambv/black) to auto-format all `.py` files.
- [x] Update flake8 config to exclude very large files (lemmatization tables etc.)
- [x] Update code to be compatible with flake8 rules
- [x] Fix various small bugs, inconsistencies and messy stuff in the language data
- [x] Update docs to explain new code style (`black`, `flake8`, when to use `# fmt: off` and `# fmt: on` and what `# noqa` means)
Once #2932 is merged, which auto-formats and tidies up the CLI, we'll be able to run `flake8 spacy` actually get meaningful results.
At the moment, the code style and linting isn't applied automatically, but I'm hoping that the new [GitHub Actions](https://github.com/features/actions) will let us auto-format pull requests and post comments with relevant linting information.
### Types of change
enhancement, code style
## Checklist
<!--- Before you submit the PR, go over this checklist and make sure you can
tick off all the boxes. [] -> [x] -->
- [x] I have submitted the spaCy Contributor Agreement.
- [x] I ran the tests, and all new and existing tests passed.
- [x] My changes don't require a change to the documentation, or if they do, I've added all required information.
2018-11-30 16:03:03 +00:00
|
|
|
|
|
2017-05-08 13:42:12 +00:00
|
|
|
|
def _fix_token(token, search, replace):
|
|
|
|
|
fixed = dict(token)
|
|
|
|
|
fixed[ORTH] = fixed[ORTH].replace(search, replace)
|
|
|
|
|
return fixed
|
💫 Tidy up and auto-format .py files (#2983)
<!--- Provide a general summary of your changes in the title. -->
## Description
- [x] Use [`black`](https://github.com/ambv/black) to auto-format all `.py` files.
- [x] Update flake8 config to exclude very large files (lemmatization tables etc.)
- [x] Update code to be compatible with flake8 rules
- [x] Fix various small bugs, inconsistencies and messy stuff in the language data
- [x] Update docs to explain new code style (`black`, `flake8`, when to use `# fmt: off` and `# fmt: on` and what `# noqa` means)
Once #2932 is merged, which auto-formats and tidies up the CLI, we'll be able to run `flake8 spacy` actually get meaningful results.
At the moment, the code style and linting isn't applied automatically, but I'm hoping that the new [GitHub Actions](https://github.com/features/actions) will let us auto-format pull requests and post comments with relevant linting information.
### Types of change
enhancement, code style
## Checklist
<!--- Before you submit the PR, go over this checklist and make sure you can
tick off all the boxes. [] -> [x] -->
- [x] I have submitted the spaCy Contributor Agreement.
- [x] I ran the tests, and all new and existing tests passed.
- [x] My changes don't require a change to the documentation, or if they do, I've added all required information.
2018-11-30 16:03:03 +00:00
|
|
|
|
|
2017-05-13 19:22:25 +00:00
|
|
|
|
new_excs = dict(excs)
|
2017-05-08 13:42:12 +00:00
|
|
|
|
for token_string, tokens in excs.items():
|
|
|
|
|
if search in token_string:
|
|
|
|
|
new_key = token_string.replace(search, replace)
|
|
|
|
|
new_value = [_fix_token(t, search, replace) for t in tokens]
|
2017-05-13 19:22:25 +00:00
|
|
|
|
new_excs[new_key] = new_value
|
|
|
|
|
return new_excs
|
2017-05-08 13:42:12 +00:00
|
|
|
|
|
|
|
|
|
|
2015-10-07 08:25:35 +00:00
|
|
|
|
def normalize_slice(length, start, stop, step=None):
|
|
|
|
|
if not (step is None or step == 1):
|
2018-04-03 13:50:31 +00:00
|
|
|
|
raise ValueError(Errors.E057)
|
2015-10-07 08:25:35 +00:00
|
|
|
|
if start is None:
|
2017-10-27 12:39:09 +00:00
|
|
|
|
start = 0
|
2015-10-07 08:25:35 +00:00
|
|
|
|
elif start < 0:
|
2017-10-27 12:39:09 +00:00
|
|
|
|
start += length
|
2015-10-07 08:25:35 +00:00
|
|
|
|
start = min(length, max(0, start))
|
|
|
|
|
if stop is None:
|
2017-10-27 12:39:09 +00:00
|
|
|
|
stop = length
|
2015-10-07 08:25:35 +00:00
|
|
|
|
elif stop < 0:
|
2017-10-27 12:39:09 +00:00
|
|
|
|
stop += length
|
2015-10-07 08:25:35 +00:00
|
|
|
|
stop = min(length, max(start, stop))
|
|
|
|
|
return start, stop
|
|
|
|
|
|
|
|
|
|
|
2017-11-06 22:45:36 +00:00
|
|
|
|
def minibatch(items, size=8):
|
|
|
|
|
"""Iterate over batches of items. `size` may be an iterator,
|
|
|
|
|
so that batch-size can vary on each step.
|
|
|
|
|
"""
|
|
|
|
|
if isinstance(size, int):
|
2017-11-06 23:22:43 +00:00
|
|
|
|
size_ = itertools.repeat(size)
|
2017-11-06 22:45:36 +00:00
|
|
|
|
else:
|
|
|
|
|
size_ = size
|
|
|
|
|
items = iter(items)
|
|
|
|
|
while True:
|
|
|
|
|
batch_size = next(size_)
|
2018-12-03 01:19:12 +00:00
|
|
|
|
batch = list(itertools.islice(items, int(batch_size)))
|
2017-11-06 22:45:36 +00:00
|
|
|
|
if len(batch) == 0:
|
|
|
|
|
break
|
|
|
|
|
yield list(batch)
|
|
|
|
|
|
|
|
|
|
|
2017-05-25 21:16:10 +00:00
|
|
|
|
def compounding(start, stop, compound):
|
2017-05-27 22:04:04 +00:00
|
|
|
|
"""Yield an infinite series of compounding values. Each time the
|
2017-05-25 21:16:10 +00:00
|
|
|
|
generator is called, a value is produced by multiplying the previous
|
|
|
|
|
value by the compound rate.
|
|
|
|
|
|
2017-05-27 22:04:04 +00:00
|
|
|
|
EXAMPLE:
|
2017-05-25 21:16:10 +00:00
|
|
|
|
>>> sizes = compounding(1., 10., 1.5)
|
|
|
|
|
>>> assert next(sizes) == 1.
|
|
|
|
|
>>> assert next(sizes) == 1 * 1.5
|
|
|
|
|
>>> assert next(sizes) == 1.5 * 1.5
|
2017-05-27 22:04:04 +00:00
|
|
|
|
"""
|
💫 Tidy up and auto-format .py files (#2983)
<!--- Provide a general summary of your changes in the title. -->
## Description
- [x] Use [`black`](https://github.com/ambv/black) to auto-format all `.py` files.
- [x] Update flake8 config to exclude very large files (lemmatization tables etc.)
- [x] Update code to be compatible with flake8 rules
- [x] Fix various small bugs, inconsistencies and messy stuff in the language data
- [x] Update docs to explain new code style (`black`, `flake8`, when to use `# fmt: off` and `# fmt: on` and what `# noqa` means)
Once #2932 is merged, which auto-formats and tidies up the CLI, we'll be able to run `flake8 spacy` actually get meaningful results.
At the moment, the code style and linting isn't applied automatically, but I'm hoping that the new [GitHub Actions](https://github.com/features/actions) will let us auto-format pull requests and post comments with relevant linting information.
### Types of change
enhancement, code style
## Checklist
<!--- Before you submit the PR, go over this checklist and make sure you can
tick off all the boxes. [] -> [x] -->
- [x] I have submitted the spaCy Contributor Agreement.
- [x] I ran the tests, and all new and existing tests passed.
- [x] My changes don't require a change to the documentation, or if they do, I've added all required information.
2018-11-30 16:03:03 +00:00
|
|
|
|
|
2017-05-25 21:16:10 +00:00
|
|
|
|
def clip(value):
|
2017-10-27 12:39:09 +00:00
|
|
|
|
return max(value, stop) if (start > stop) else min(value, stop)
|
💫 Tidy up and auto-format .py files (#2983)
<!--- Provide a general summary of your changes in the title. -->
## Description
- [x] Use [`black`](https://github.com/ambv/black) to auto-format all `.py` files.
- [x] Update flake8 config to exclude very large files (lemmatization tables etc.)
- [x] Update code to be compatible with flake8 rules
- [x] Fix various small bugs, inconsistencies and messy stuff in the language data
- [x] Update docs to explain new code style (`black`, `flake8`, when to use `# fmt: off` and `# fmt: on` and what `# noqa` means)
Once #2932 is merged, which auto-formats and tidies up the CLI, we'll be able to run `flake8 spacy` actually get meaningful results.
At the moment, the code style and linting isn't applied automatically, but I'm hoping that the new [GitHub Actions](https://github.com/features/actions) will let us auto-format pull requests and post comments with relevant linting information.
### Types of change
enhancement, code style
## Checklist
<!--- Before you submit the PR, go over this checklist and make sure you can
tick off all the boxes. [] -> [x] -->
- [x] I have submitted the spaCy Contributor Agreement.
- [x] I ran the tests, and all new and existing tests passed.
- [x] My changes don't require a change to the documentation, or if they do, I've added all required information.
2018-11-30 16:03:03 +00:00
|
|
|
|
|
2017-05-25 21:16:10 +00:00
|
|
|
|
curr = float(start)
|
|
|
|
|
while True:
|
|
|
|
|
yield clip(curr)
|
|
|
|
|
curr *= compound
|
|
|
|
|
|
|
|
|
|
|
2018-09-14 16:37:16 +00:00
|
|
|
|
def stepping(start, stop, steps):
|
|
|
|
|
"""Yield an infinite series of values that step from a start value to a
|
|
|
|
|
final value over some number of steps. Each step is (stop-start)/steps.
|
|
|
|
|
|
|
|
|
|
After the final value is reached, the generator continues yielding that
|
|
|
|
|
value.
|
|
|
|
|
|
|
|
|
|
EXAMPLE:
|
|
|
|
|
>>> sizes = stepping(1., 200., 100)
|
|
|
|
|
>>> assert next(sizes) == 1.
|
|
|
|
|
>>> assert next(sizes) == 1 * (200.-1.) / 100
|
|
|
|
|
>>> assert next(sizes) == 1 + (200.-1.) / 100 + (200.-1.) / 100
|
|
|
|
|
"""
|
💫 Tidy up and auto-format .py files (#2983)
<!--- Provide a general summary of your changes in the title. -->
## Description
- [x] Use [`black`](https://github.com/ambv/black) to auto-format all `.py` files.
- [x] Update flake8 config to exclude very large files (lemmatization tables etc.)
- [x] Update code to be compatible with flake8 rules
- [x] Fix various small bugs, inconsistencies and messy stuff in the language data
- [x] Update docs to explain new code style (`black`, `flake8`, when to use `# fmt: off` and `# fmt: on` and what `# noqa` means)
Once #2932 is merged, which auto-formats and tidies up the CLI, we'll be able to run `flake8 spacy` actually get meaningful results.
At the moment, the code style and linting isn't applied automatically, but I'm hoping that the new [GitHub Actions](https://github.com/features/actions) will let us auto-format pull requests and post comments with relevant linting information.
### Types of change
enhancement, code style
## Checklist
<!--- Before you submit the PR, go over this checklist and make sure you can
tick off all the boxes. [] -> [x] -->
- [x] I have submitted the spaCy Contributor Agreement.
- [x] I ran the tests, and all new and existing tests passed.
- [x] My changes don't require a change to the documentation, or if they do, I've added all required information.
2018-11-30 16:03:03 +00:00
|
|
|
|
|
2018-09-14 16:37:16 +00:00
|
|
|
|
def clip(value):
|
|
|
|
|
return max(value, stop) if (start > stop) else min(value, stop)
|
💫 Tidy up and auto-format .py files (#2983)
<!--- Provide a general summary of your changes in the title. -->
## Description
- [x] Use [`black`](https://github.com/ambv/black) to auto-format all `.py` files.
- [x] Update flake8 config to exclude very large files (lemmatization tables etc.)
- [x] Update code to be compatible with flake8 rules
- [x] Fix various small bugs, inconsistencies and messy stuff in the language data
- [x] Update docs to explain new code style (`black`, `flake8`, when to use `# fmt: off` and `# fmt: on` and what `# noqa` means)
Once #2932 is merged, which auto-formats and tidies up the CLI, we'll be able to run `flake8 spacy` actually get meaningful results.
At the moment, the code style and linting isn't applied automatically, but I'm hoping that the new [GitHub Actions](https://github.com/features/actions) will let us auto-format pull requests and post comments with relevant linting information.
### Types of change
enhancement, code style
## Checklist
<!--- Before you submit the PR, go over this checklist and make sure you can
tick off all the boxes. [] -> [x] -->
- [x] I have submitted the spaCy Contributor Agreement.
- [x] I ran the tests, and all new and existing tests passed.
- [x] My changes don't require a change to the documentation, or if they do, I've added all required information.
2018-11-30 16:03:03 +00:00
|
|
|
|
|
2018-09-14 16:37:16 +00:00
|
|
|
|
curr = float(start)
|
|
|
|
|
while True:
|
|
|
|
|
yield clip(curr)
|
|
|
|
|
curr += (stop - start) / steps
|
|
|
|
|
|
|
|
|
|
|
2017-05-25 21:16:10 +00:00
|
|
|
|
def decaying(start, stop, decay):
|
2017-05-27 22:04:04 +00:00
|
|
|
|
"""Yield an infinite series of linearly decaying values."""
|
💫 Tidy up and auto-format .py files (#2983)
<!--- Provide a general summary of your changes in the title. -->
## Description
- [x] Use [`black`](https://github.com/ambv/black) to auto-format all `.py` files.
- [x] Update flake8 config to exclude very large files (lemmatization tables etc.)
- [x] Update code to be compatible with flake8 rules
- [x] Fix various small bugs, inconsistencies and messy stuff in the language data
- [x] Update docs to explain new code style (`black`, `flake8`, when to use `# fmt: off` and `# fmt: on` and what `# noqa` means)
Once #2932 is merged, which auto-formats and tidies up the CLI, we'll be able to run `flake8 spacy` actually get meaningful results.
At the moment, the code style and linting isn't applied automatically, but I'm hoping that the new [GitHub Actions](https://github.com/features/actions) will let us auto-format pull requests and post comments with relevant linting information.
### Types of change
enhancement, code style
## Checklist
<!--- Before you submit the PR, go over this checklist and make sure you can
tick off all the boxes. [] -> [x] -->
- [x] I have submitted the spaCy Contributor Agreement.
- [x] I ran the tests, and all new and existing tests passed.
- [x] My changes don't require a change to the documentation, or if they do, I've added all required information.
2018-11-30 16:03:03 +00:00
|
|
|
|
|
2019-03-28 12:24:47 +00:00
|
|
|
|
curr = float(start)
|
2017-05-25 21:16:10 +00:00
|
|
|
|
while True:
|
2019-03-28 12:24:47 +00:00
|
|
|
|
yield max(curr, stop)
|
2019-05-06 14:58:29 +00:00
|
|
|
|
curr -= decay
|
2017-05-25 21:16:10 +00:00
|
|
|
|
|
|
|
|
|
|
2020-05-18 20:23:33 +00:00
|
|
|
|
def minibatch_by_words(examples, size, tuples=True, count_words=len, tolerance=0.2):
|
|
|
|
|
"""Create minibatches of roughly a given number of words. If any examples
|
|
|
|
|
are longer than the specified batch length, they will appear in a batch by
|
|
|
|
|
themselves."""
|
2018-03-27 17:23:02 +00:00
|
|
|
|
if isinstance(size, int):
|
|
|
|
|
size_ = itertools.repeat(size)
|
2020-03-08 12:23:18 +00:00
|
|
|
|
elif isinstance(size, List):
|
2020-01-29 16:06:46 +00:00
|
|
|
|
size_ = iter(size)
|
2018-03-27 17:23:02 +00:00
|
|
|
|
else:
|
|
|
|
|
size_ = size
|
2019-11-11 16:35:27 +00:00
|
|
|
|
examples = iter(examples)
|
2020-05-18 20:23:33 +00:00
|
|
|
|
oversize = []
|
2018-03-27 17:23:02 +00:00
|
|
|
|
while True:
|
|
|
|
|
batch_size = next(size_)
|
2020-05-18 20:23:33 +00:00
|
|
|
|
tol_size = batch_size * 0.2
|
2018-03-27 17:23:02 +00:00
|
|
|
|
batch = []
|
2020-05-18 20:23:33 +00:00
|
|
|
|
if oversize:
|
|
|
|
|
example = oversize.pop(0)
|
|
|
|
|
n_words = count_words(example.doc)
|
|
|
|
|
batch.append(example)
|
|
|
|
|
batch_size -= n_words
|
|
|
|
|
while batch_size >= 1:
|
2018-03-27 17:23:02 +00:00
|
|
|
|
try:
|
2019-11-11 16:35:27 +00:00
|
|
|
|
example = next(examples)
|
2018-03-27 17:23:02 +00:00
|
|
|
|
except StopIteration:
|
2020-05-20 10:27:31 +00:00
|
|
|
|
if batch:
|
|
|
|
|
yield batch
|
|
|
|
|
return
|
2020-05-18 20:23:33 +00:00
|
|
|
|
n_words = count_words(example.doc)
|
|
|
|
|
if n_words < (batch_size + tol_size):
|
|
|
|
|
batch_size -= n_words
|
|
|
|
|
batch.append(example)
|
|
|
|
|
else:
|
|
|
|
|
oversize.append(example)
|
2018-03-27 17:23:02 +00:00
|
|
|
|
if batch:
|
|
|
|
|
yield batch
|
|
|
|
|
|
|
|
|
|
|
2017-11-06 22:45:36 +00:00
|
|
|
|
def itershuffle(iterable, bufsize=1000):
|
|
|
|
|
"""Shuffle an iterator. This works by holding `bufsize` items back
|
|
|
|
|
and yielding them sometime later. Obviously, this is not unbiased –
|
|
|
|
|
but should be good enough for batching. Larger bufsize means less bias.
|
|
|
|
|
From https://gist.github.com/andres-erbsen/1307752
|
|
|
|
|
|
|
|
|
|
iterable (iterable): Iterator to shuffle.
|
|
|
|
|
bufsize (int): Items to hold back.
|
|
|
|
|
YIELDS (iterable): The shuffled iterator.
|
|
|
|
|
"""
|
|
|
|
|
iterable = iter(iterable)
|
|
|
|
|
buf = []
|
|
|
|
|
try:
|
|
|
|
|
while True:
|
💫 Tidy up and auto-format .py files (#2983)
<!--- Provide a general summary of your changes in the title. -->
## Description
- [x] Use [`black`](https://github.com/ambv/black) to auto-format all `.py` files.
- [x] Update flake8 config to exclude very large files (lemmatization tables etc.)
- [x] Update code to be compatible with flake8 rules
- [x] Fix various small bugs, inconsistencies and messy stuff in the language data
- [x] Update docs to explain new code style (`black`, `flake8`, when to use `# fmt: off` and `# fmt: on` and what `# noqa` means)
Once #2932 is merged, which auto-formats and tidies up the CLI, we'll be able to run `flake8 spacy` actually get meaningful results.
At the moment, the code style and linting isn't applied automatically, but I'm hoping that the new [GitHub Actions](https://github.com/features/actions) will let us auto-format pull requests and post comments with relevant linting information.
### Types of change
enhancement, code style
## Checklist
<!--- Before you submit the PR, go over this checklist and make sure you can
tick off all the boxes. [] -> [x] -->
- [x] I have submitted the spaCy Contributor Agreement.
- [x] I ran the tests, and all new and existing tests passed.
- [x] My changes don't require a change to the documentation, or if they do, I've added all required information.
2018-11-30 16:03:03 +00:00
|
|
|
|
for i in range(random.randint(1, bufsize - len(buf))):
|
2018-03-27 17:23:02 +00:00
|
|
|
|
buf.append(next(iterable))
|
2017-11-06 22:45:36 +00:00
|
|
|
|
random.shuffle(buf)
|
|
|
|
|
for i in range(random.randint(1, bufsize)):
|
|
|
|
|
if buf:
|
|
|
|
|
yield buf.pop()
|
|
|
|
|
else:
|
|
|
|
|
break
|
|
|
|
|
except StopIteration:
|
|
|
|
|
random.shuffle(buf)
|
|
|
|
|
while buf:
|
|
|
|
|
yield buf.pop()
|
|
|
|
|
raise StopIteration
|
|
|
|
|
|
|
|
|
|
|
2019-05-08 00:33:40 +00:00
|
|
|
|
def filter_spans(spans):
|
|
|
|
|
"""Filter a sequence of spans and remove duplicates or overlaps. Useful for
|
|
|
|
|
creating named entities (where one token can only be part of one entity) or
|
|
|
|
|
when merging spans with `Retokenizer.merge`. When spans overlap, the (first)
|
|
|
|
|
longest span is preferred over shorter spans.
|
|
|
|
|
|
|
|
|
|
spans (iterable): The spans to filter.
|
|
|
|
|
RETURNS (list): The filtered spans.
|
|
|
|
|
"""
|
2019-10-10 15:00:03 +00:00
|
|
|
|
get_sort_key = lambda span: (span.end - span.start, -span.start)
|
2019-05-08 00:33:40 +00:00
|
|
|
|
sorted_spans = sorted(spans, key=get_sort_key, reverse=True)
|
|
|
|
|
result = []
|
|
|
|
|
seen_tokens = set()
|
|
|
|
|
for span in sorted_spans:
|
|
|
|
|
# Check for end - 1 here because boundaries are inclusive
|
|
|
|
|
if span.start not in seen_tokens and span.end - 1 not in seen_tokens:
|
|
|
|
|
result.append(span)
|
|
|
|
|
seen_tokens.update(range(span.start, span.end))
|
|
|
|
|
result = sorted(result, key=lambda span: span.start)
|
|
|
|
|
return result
|
|
|
|
|
|
|
|
|
|
|
2017-05-29 08:13:42 +00:00
|
|
|
|
def to_bytes(getters, exclude):
|
2019-12-22 00:53:56 +00:00
|
|
|
|
serialized = {}
|
2017-05-29 08:13:42 +00:00
|
|
|
|
for key, getter in getters.items():
|
2019-03-10 18:16:45 +00:00
|
|
|
|
# Split to support file names like meta.json
|
|
|
|
|
if key.split(".")[0] not in exclude:
|
2017-05-29 08:13:42 +00:00
|
|
|
|
serialized[key] = getter()
|
💫 Replace ujson, msgpack and dill/pickle/cloudpickle with srsly (#3003)
Remove hacks and wrappers, keep code in sync across our libraries and move spaCy a few steps closer to only depending on packages with binary wheels 🎉
See here: https://github.com/explosion/srsly
Serialization is hard, especially across Python versions and multiple platforms. After dealing with many subtle bugs over the years (encodings, locales, large files) our libraries like spaCy and Prodigy have steadily grown a number of utility functions to wrap the multiple serialization formats we need to support (especially json, msgpack and pickle). These wrapping functions ended up duplicated across our codebases, so we wanted to put them in one place.
At the same time, we noticed that having a lot of small dependencies was making maintainence harder, and making installation slower. To solve this, we've made srsly standalone, by including the component packages directly within it. This way we can provide all the serialization utilities we need in a single binary wheel.
srsly currently includes forks of the following packages:
ujson
msgpack
msgpack-numpy
cloudpickle
* WIP: replace json/ujson with srsly
* Replace ujson in examples
Use regular json instead of srsly to make code easier to read and follow
* Update requirements
* Fix imports
* Fix typos
* Replace msgpack with srsly
* Fix warning
2018-12-03 00:28:22 +00:00
|
|
|
|
return srsly.msgpack_dumps(serialized)
|
2017-05-29 08:13:42 +00:00
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def from_bytes(bytes_data, setters, exclude):
|
💫 Replace ujson, msgpack and dill/pickle/cloudpickle with srsly (#3003)
Remove hacks and wrappers, keep code in sync across our libraries and move spaCy a few steps closer to only depending on packages with binary wheels 🎉
See here: https://github.com/explosion/srsly
Serialization is hard, especially across Python versions and multiple platforms. After dealing with many subtle bugs over the years (encodings, locales, large files) our libraries like spaCy and Prodigy have steadily grown a number of utility functions to wrap the multiple serialization formats we need to support (especially json, msgpack and pickle). These wrapping functions ended up duplicated across our codebases, so we wanted to put them in one place.
At the same time, we noticed that having a lot of small dependencies was making maintainence harder, and making installation slower. To solve this, we've made srsly standalone, by including the component packages directly within it. This way we can provide all the serialization utilities we need in a single binary wheel.
srsly currently includes forks of the following packages:
ujson
msgpack
msgpack-numpy
cloudpickle
* WIP: replace json/ujson with srsly
* Replace ujson in examples
Use regular json instead of srsly to make code easier to read and follow
* Update requirements
* Fix imports
* Fix typos
* Replace msgpack with srsly
* Fix warning
2018-12-03 00:28:22 +00:00
|
|
|
|
msg = srsly.msgpack_loads(bytes_data)
|
2017-05-29 08:13:42 +00:00
|
|
|
|
for key, setter in setters.items():
|
2019-03-10 18:16:45 +00:00
|
|
|
|
# Split to support file names like meta.json
|
|
|
|
|
if key.split(".")[0] not in exclude and key in msg:
|
2017-05-29 08:13:42 +00:00
|
|
|
|
setter(msg[key])
|
|
|
|
|
return msg
|
|
|
|
|
|
|
|
|
|
|
2017-05-31 11:42:39 +00:00
|
|
|
|
def to_disk(path, writers, exclude):
|
|
|
|
|
path = ensure_path(path)
|
|
|
|
|
if not path.exists():
|
|
|
|
|
path.mkdir()
|
|
|
|
|
for key, writer in writers.items():
|
2019-03-10 18:16:45 +00:00
|
|
|
|
# Split to support file names like meta.json
|
|
|
|
|
if key.split(".")[0] not in exclude:
|
2017-05-31 11:42:39 +00:00
|
|
|
|
writer(path / key)
|
|
|
|
|
return path
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def from_disk(path, readers, exclude):
|
|
|
|
|
path = ensure_path(path)
|
|
|
|
|
for key, reader in readers.items():
|
2019-03-10 18:16:45 +00:00
|
|
|
|
# Split to support file names like meta.json
|
|
|
|
|
if key.split(".")[0] not in exclude:
|
2017-10-16 18:55:00 +00:00
|
|
|
|
reader(path / key)
|
2017-05-31 11:42:39 +00:00
|
|
|
|
return path
|
|
|
|
|
|
|
|
|
|
|
2019-12-22 00:53:56 +00:00
|
|
|
|
def import_file(name, loc):
|
|
|
|
|
"""Import module from a file. Used to load models from a directory.
|
|
|
|
|
|
|
|
|
|
name (unicode): Name of module to load.
|
|
|
|
|
loc (unicode / Path): Path to the file.
|
|
|
|
|
RETURNS: The loaded module.
|
|
|
|
|
"""
|
|
|
|
|
loc = str(loc)
|
|
|
|
|
spec = importlib.util.spec_from_file_location(name, str(loc))
|
|
|
|
|
module = importlib.util.module_from_spec(spec)
|
|
|
|
|
spec.loader.exec_module(module)
|
|
|
|
|
return module
|
|
|
|
|
|
|
|
|
|
|
2017-05-14 15:50:23 +00:00
|
|
|
|
def minify_html(html):
|
|
|
|
|
"""Perform a template-specific, rudimentary HTML minification for displaCy.
|
2017-10-27 12:39:09 +00:00
|
|
|
|
Disclaimer: NOT a general-purpose solution, only removes indentation and
|
|
|
|
|
newlines.
|
2017-05-14 15:50:23 +00:00
|
|
|
|
|
|
|
|
|
html (unicode): Markup to minify.
|
|
|
|
|
RETURNS (unicode): "Minified" HTML.
|
|
|
|
|
"""
|
💫 Tidy up and auto-format .py files (#2983)
<!--- Provide a general summary of your changes in the title. -->
## Description
- [x] Use [`black`](https://github.com/ambv/black) to auto-format all `.py` files.
- [x] Update flake8 config to exclude very large files (lemmatization tables etc.)
- [x] Update code to be compatible with flake8 rules
- [x] Fix various small bugs, inconsistencies and messy stuff in the language data
- [x] Update docs to explain new code style (`black`, `flake8`, when to use `# fmt: off` and `# fmt: on` and what `# noqa` means)
Once #2932 is merged, which auto-formats and tidies up the CLI, we'll be able to run `flake8 spacy` actually get meaningful results.
At the moment, the code style and linting isn't applied automatically, but I'm hoping that the new [GitHub Actions](https://github.com/features/actions) will let us auto-format pull requests and post comments with relevant linting information.
### Types of change
enhancement, code style
## Checklist
<!--- Before you submit the PR, go over this checklist and make sure you can
tick off all the boxes. [] -> [x] -->
- [x] I have submitted the spaCy Contributor Agreement.
- [x] I ran the tests, and all new and existing tests passed.
- [x] My changes don't require a change to the documentation, or if they do, I've added all required information.
2018-11-30 16:03:03 +00:00
|
|
|
|
return html.strip().replace(" ", "").replace("\n", "")
|
2017-09-21 00:16:35 +00:00
|
|
|
|
|
|
|
|
|
|
escape html in displacy.render (#2378) (closes #2361)
## Description
Fix for issue #2361 :
replace &, <, >, " with &amp; , &lt; , &gt; , &quot; in before rendering svg
## Checklist
<!--- Before you submit the PR, go over this checklist and make sure you can
tick off all the boxes. [] -> [x] -->
- [x] I have submitted the spaCy Contributor Agreement.
- [ ] I ran the tests, and all new and existing tests passed.
(As discussed in the comments to #2361)
- [x] My changes don't require a change to the documentation, or if they do, I've added all required information.
2018-05-28 16:36:41 +00:00
|
|
|
|
def escape_html(text):
|
|
|
|
|
"""Replace <, >, &, " with their HTML encoded representation. Intended to
|
|
|
|
|
prevent HTML errors in rendered displaCy markup.
|
|
|
|
|
|
|
|
|
|
text (unicode): The original text.
|
|
|
|
|
RETURNS (unicode): Equivalent text to be safely used within HTML.
|
|
|
|
|
"""
|
💫 Tidy up and auto-format .py files (#2983)
<!--- Provide a general summary of your changes in the title. -->
## Description
- [x] Use [`black`](https://github.com/ambv/black) to auto-format all `.py` files.
- [x] Update flake8 config to exclude very large files (lemmatization tables etc.)
- [x] Update code to be compatible with flake8 rules
- [x] Fix various small bugs, inconsistencies and messy stuff in the language data
- [x] Update docs to explain new code style (`black`, `flake8`, when to use `# fmt: off` and `# fmt: on` and what `# noqa` means)
Once #2932 is merged, which auto-formats and tidies up the CLI, we'll be able to run `flake8 spacy` actually get meaningful results.
At the moment, the code style and linting isn't applied automatically, but I'm hoping that the new [GitHub Actions](https://github.com/features/actions) will let us auto-format pull requests and post comments with relevant linting information.
### Types of change
enhancement, code style
## Checklist
<!--- Before you submit the PR, go over this checklist and make sure you can
tick off all the boxes. [] -> [x] -->
- [x] I have submitted the spaCy Contributor Agreement.
- [x] I ran the tests, and all new and existing tests passed.
- [x] My changes don't require a change to the documentation, or if they do, I've added all required information.
2018-11-30 16:03:03 +00:00
|
|
|
|
text = text.replace("&", "&")
|
|
|
|
|
text = text.replace("<", "<")
|
|
|
|
|
text = text.replace(">", ">")
|
|
|
|
|
text = text.replace('"', """)
|
escape html in displacy.render (#2378) (closes #2361)
## Description
Fix for issue #2361 :
replace &, <, >, " with &amp; , &lt; , &gt; , &quot; in before rendering svg
## Checklist
<!--- Before you submit the PR, go over this checklist and make sure you can
tick off all the boxes. [] -> [x] -->
- [x] I have submitted the spaCy Contributor Agreement.
- [ ] I ran the tests, and all new and existing tests passed.
(As discussed in the comments to #2361)
- [x] My changes don't require a change to the documentation, or if they do, I've added all required information.
2018-05-28 16:36:41 +00:00
|
|
|
|
return text
|
|
|
|
|
|
|
|
|
|
|
2017-09-21 00:16:35 +00:00
|
|
|
|
def use_gpu(gpu_id):
|
2020-01-29 16:06:46 +00:00
|
|
|
|
return require_gpu(gpu_id)
|
2018-02-13 11:42:23 +00:00
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def fix_random_seed(seed=0):
|
2018-02-13 11:52:48 +00:00
|
|
|
|
random.seed(seed)
|
|
|
|
|
numpy.random.seed(seed)
|
2018-12-08 11:37:38 +00:00
|
|
|
|
if cupy is not None:
|
|
|
|
|
cupy.random.seed(seed)
|
2018-05-20 13:13:37 +00:00
|
|
|
|
|
|
|
|
|
|
2019-03-10 18:16:45 +00:00
|
|
|
|
def get_serialization_exclude(serializers, exclude, kwargs):
|
|
|
|
|
"""Helper function to validate serialization args and manage transition from
|
|
|
|
|
keyword arguments (pre v2.1) to exclude argument.
|
|
|
|
|
"""
|
|
|
|
|
exclude = list(exclude)
|
|
|
|
|
# Split to support file names like meta.json
|
|
|
|
|
options = [name.split(".")[0] for name in serializers]
|
|
|
|
|
for key, value in kwargs.items():
|
|
|
|
|
if key in ("vocab",) and value is False:
|
2020-02-28 11:20:23 +00:00
|
|
|
|
warnings.warn(Warnings.W015.format(arg=key), DeprecationWarning)
|
2019-03-10 18:16:45 +00:00
|
|
|
|
exclude.append(key)
|
|
|
|
|
elif key.split(".")[0] in options:
|
|
|
|
|
raise ValueError(Errors.E128.format(arg=key))
|
|
|
|
|
# TODO: user warning?
|
|
|
|
|
return exclude
|
|
|
|
|
|
|
|
|
|
|
2018-05-20 13:13:37 +00:00
|
|
|
|
class SimpleFrozenDict(dict):
|
|
|
|
|
"""Simplified implementation of a frozen dict, mainly used as default
|
|
|
|
|
function or method argument (for arguments that should default to empty
|
|
|
|
|
dictionary). Will raise an error if user or spaCy attempts to add to dict.
|
|
|
|
|
"""
|
💫 Tidy up and auto-format .py files (#2983)
<!--- Provide a general summary of your changes in the title. -->
## Description
- [x] Use [`black`](https://github.com/ambv/black) to auto-format all `.py` files.
- [x] Update flake8 config to exclude very large files (lemmatization tables etc.)
- [x] Update code to be compatible with flake8 rules
- [x] Fix various small bugs, inconsistencies and messy stuff in the language data
- [x] Update docs to explain new code style (`black`, `flake8`, when to use `# fmt: off` and `# fmt: on` and what `# noqa` means)
Once #2932 is merged, which auto-formats and tidies up the CLI, we'll be able to run `flake8 spacy` actually get meaningful results.
At the moment, the code style and linting isn't applied automatically, but I'm hoping that the new [GitHub Actions](https://github.com/features/actions) will let us auto-format pull requests and post comments with relevant linting information.
### Types of change
enhancement, code style
## Checklist
<!--- Before you submit the PR, go over this checklist and make sure you can
tick off all the boxes. [] -> [x] -->
- [x] I have submitted the spaCy Contributor Agreement.
- [x] I ran the tests, and all new and existing tests passed.
- [x] My changes don't require a change to the documentation, or if they do, I've added all required information.
2018-11-30 16:03:03 +00:00
|
|
|
|
|
2018-05-20 13:13:37 +00:00
|
|
|
|
def __setitem__(self, key, value):
|
|
|
|
|
raise NotImplementedError(Errors.E095)
|
|
|
|
|
|
|
|
|
|
def pop(self, key, default=None):
|
|
|
|
|
raise NotImplementedError(Errors.E095)
|
|
|
|
|
|
|
|
|
|
def update(self, other):
|
|
|
|
|
raise NotImplementedError(Errors.E095)
|
2019-01-10 14:40:37 +00:00
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class DummyTokenizer(object):
|
|
|
|
|
# add dummy methods for to_bytes, from_bytes, to_disk and from_disk to
|
|
|
|
|
# allow serialization (see #1557)
|
2019-03-10 18:16:45 +00:00
|
|
|
|
def to_bytes(self, **kwargs):
|
2019-02-07 20:00:04 +00:00
|
|
|
|
return b""
|
2019-01-10 14:40:37 +00:00
|
|
|
|
|
2019-03-10 18:16:45 +00:00
|
|
|
|
def from_bytes(self, _bytes_data, **kwargs):
|
2019-01-10 14:40:37 +00:00
|
|
|
|
return self
|
|
|
|
|
|
2019-03-10 18:16:45 +00:00
|
|
|
|
def to_disk(self, _path, **kwargs):
|
2019-01-10 14:40:37 +00:00
|
|
|
|
return None
|
|
|
|
|
|
2019-03-10 18:16:45 +00:00
|
|
|
|
def from_disk(self, _path, **kwargs):
|
2019-01-10 14:40:37 +00:00
|
|
|
|
return self
|
2020-01-29 16:06:46 +00:00
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def link_vectors_to_models(vocab):
|
|
|
|
|
vectors = vocab.vectors
|
|
|
|
|
if vectors.name is None:
|
|
|
|
|
vectors.name = VECTORS_KEY
|
|
|
|
|
if vectors.data.size != 0:
|
2020-02-28 11:20:23 +00:00
|
|
|
|
warnings.warn(Warnings.W020.format(shape=vectors.data.shape))
|
2020-01-29 16:06:46 +00:00
|
|
|
|
for word in vocab:
|
|
|
|
|
if word.orth in vectors.key2row:
|
|
|
|
|
word.rank = vectors.key2row[word.orth]
|
|
|
|
|
else:
|
|
|
|
|
word.rank = 0
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
VECTORS_KEY = "spacy_pretrained_vectors"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def create_default_optimizer():
|
|
|
|
|
learn_rate = env_opt("learn_rate", 0.001)
|
|
|
|
|
beta1 = env_opt("optimizer_B1", 0.9)
|
|
|
|
|
beta2 = env_opt("optimizer_B2", 0.999)
|
|
|
|
|
eps = env_opt("optimizer_eps", 1e-8)
|
|
|
|
|
L2 = env_opt("L2_penalty", 1e-6)
|
2020-04-03 11:02:46 +00:00
|
|
|
|
grad_clip = env_opt("grad_norm_clip", 10.0)
|
|
|
|
|
L2_is_weight_decay = env_opt("L2_is_weight_decay", False)
|
2020-02-18 16:20:17 +00:00
|
|
|
|
optimizer = Adam(
|
|
|
|
|
learn_rate,
|
|
|
|
|
L2=L2,
|
|
|
|
|
beta1=beta1,
|
|
|
|
|
beta2=beta2,
|
|
|
|
|
eps=eps,
|
|
|
|
|
grad_clip=grad_clip,
|
2020-04-03 11:02:46 +00:00
|
|
|
|
L2_is_weight_decay=L2_is_weight_decay,
|
2020-02-18 16:20:17 +00:00
|
|
|
|
)
|
2020-01-29 16:06:46 +00:00
|
|
|
|
return optimizer
|