2017-03-12 12:07:28 +00:00
|
|
|
|
# coding: utf8
|
2017-03-15 16:35:57 +00:00
|
|
|
|
from __future__ import unicode_literals, print_function
|
2017-04-15 10:05:47 +00:00
|
|
|
|
|
2017-05-18 09:36:53 +00:00
|
|
|
|
import os
|
2017-04-15 10:13:34 +00:00
|
|
|
|
import ujson
|
2017-09-16 18:27:59 +00:00
|
|
|
|
import pkg_resources
|
2017-05-07 21:24:51 +00:00
|
|
|
|
import importlib
|
2017-04-19 23:22:52 +00:00
|
|
|
|
import regex as re
|
2017-04-15 10:05:47 +00:00
|
|
|
|
from pathlib import Path
|
2017-03-16 16:08:58 +00:00
|
|
|
|
import sys
|
2017-03-15 16:35:57 +00:00
|
|
|
|
import textwrap
|
2017-05-21 14:05:05 +00:00
|
|
|
|
import random
|
2017-05-31 11:42:39 +00:00
|
|
|
|
from collections import OrderedDict
|
2017-09-21 00:16:35 +00:00
|
|
|
|
from thinc.neural._classes.model import Model
|
2018-03-27 17:22:52 +00:00
|
|
|
|
from thinc.neural.ops import NumpyOps
|
2017-10-17 16:20:52 +00:00
|
|
|
|
import functools
|
2017-11-07 12:20:12 +00:00
|
|
|
|
import cytoolz
|
2017-11-10 18:05:18 +00:00
|
|
|
|
import itertools
|
2018-02-13 11:52:48 +00:00
|
|
|
|
import numpy.random
|
2017-03-15 16:35:57 +00:00
|
|
|
|
|
2017-10-27 12:39:09 +00:00
|
|
|
|
from .symbols import ORTH
|
|
|
|
|
from .compat import cupy, CudaStream, path2str, basestring_, input_, unicode_
|
|
|
|
|
from .compat import import_file
|
2018-04-03 13:50:31 +00:00
|
|
|
|
from .errors import Errors
|
2017-10-27 12:39:09 +00:00
|
|
|
|
|
2018-03-28 22:14:55 +00:00
|
|
|
|
# Import these directly from Thinc, so that we're sure we always have the
|
|
|
|
|
# same version.
|
|
|
|
|
from thinc.neural._classes.model import msgpack
|
|
|
|
|
from thinc.neural._classes.model import msgpack_numpy
|
2017-03-20 21:48:32 +00:00
|
|
|
|
|
|
|
|
|
|
2016-03-25 17:54:45 +00:00
|
|
|
|
LANGUAGES = {}
|
2017-04-15 10:05:47 +00:00
|
|
|
|
_data_path = Path(__file__).parent / 'data'
|
2017-10-27 12:39:09 +00:00
|
|
|
|
_PRINT_ENV = False
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def set_env_log(value):
|
|
|
|
|
global _PRINT_ENV
|
|
|
|
|
_PRINT_ENV = value
|
2016-03-25 17:54:45 +00:00
|
|
|
|
|
|
|
|
|
|
2017-05-13 23:31:10 +00:00
|
|
|
|
def get_lang_class(lang):
|
|
|
|
|
"""Import and load a Language class.
|
2016-03-25 17:54:45 +00:00
|
|
|
|
|
2017-05-13 23:31:10 +00:00
|
|
|
|
lang (unicode): Two-letter language code, e.g. 'en'.
|
|
|
|
|
RETURNS (Language): Language class.
|
|
|
|
|
"""
|
|
|
|
|
global LANGUAGES
|
2017-10-27 12:39:09 +00:00
|
|
|
|
if lang not in LANGUAGES:
|
2017-05-13 23:31:10 +00:00
|
|
|
|
try:
|
|
|
|
|
module = importlib.import_module('.lang.%s' % lang, 'spacy')
|
|
|
|
|
except ImportError:
|
2018-04-03 13:50:31 +00:00
|
|
|
|
raise ImportError(Errors.E048.format(lang=lang))
|
2017-05-13 23:31:10 +00:00
|
|
|
|
LANGUAGES[lang] = getattr(module, module.__all__[0])
|
2016-03-25 17:54:45 +00:00
|
|
|
|
return LANGUAGES[lang]
|
|
|
|
|
|
|
|
|
|
|
2017-05-13 23:31:10 +00:00
|
|
|
|
def set_lang_class(name, cls):
|
|
|
|
|
"""Set a custom Language class name that can be loaded via get_lang_class.
|
2017-05-13 19:22:49 +00:00
|
|
|
|
|
2017-05-13 23:31:10 +00:00
|
|
|
|
name (unicode): Name of Language class.
|
|
|
|
|
cls (Language): Language class.
|
2017-05-13 19:22:49 +00:00
|
|
|
|
"""
|
2017-05-13 23:31:10 +00:00
|
|
|
|
global LANGUAGES
|
|
|
|
|
LANGUAGES[name] = cls
|
2017-05-08 21:50:45 +00:00
|
|
|
|
|
|
|
|
|
|
2017-01-09 22:40:26 +00:00
|
|
|
|
def get_data_path(require_exists=True):
|
2017-05-13 19:22:49 +00:00
|
|
|
|
"""Get path to spaCy data directory.
|
|
|
|
|
|
2017-05-13 23:30:29 +00:00
|
|
|
|
require_exists (bool): Only return path if it exists, otherwise None.
|
|
|
|
|
RETURNS (Path or None): Data path or None.
|
2017-05-13 19:22:49 +00:00
|
|
|
|
"""
|
2017-01-09 22:40:26 +00:00
|
|
|
|
if not require_exists:
|
|
|
|
|
return _data_path
|
|
|
|
|
else:
|
|
|
|
|
return _data_path if _data_path.exists() else None
|
2016-09-24 18:26:17 +00:00
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def set_data_path(path):
|
2017-05-13 19:22:49 +00:00
|
|
|
|
"""Set path to spaCy data directory.
|
|
|
|
|
|
2017-05-13 23:30:29 +00:00
|
|
|
|
path (unicode or Path): Path to new data directory.
|
2017-05-13 19:22:49 +00:00
|
|
|
|
"""
|
2016-09-24 18:26:17 +00:00
|
|
|
|
global _data_path
|
2017-04-15 10:11:16 +00:00
|
|
|
|
_data_path = ensure_path(path)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def ensure_path(path):
|
2017-05-13 23:30:29 +00:00
|
|
|
|
"""Ensure string is converted to a Path.
|
|
|
|
|
|
|
|
|
|
path: Anything. If string, it's converted to Path.
|
|
|
|
|
RETURNS: Path or original argument.
|
|
|
|
|
"""
|
2017-04-15 10:11:16 +00:00
|
|
|
|
if isinstance(path, basestring_):
|
|
|
|
|
return Path(path)
|
|
|
|
|
else:
|
|
|
|
|
return path
|
2016-09-24 18:26:17 +00:00
|
|
|
|
|
|
|
|
|
|
2017-05-29 12:10:10 +00:00
|
|
|
|
def load_model(name, **overrides):
|
2017-05-27 22:22:00 +00:00
|
|
|
|
"""Load a model from a shortcut link, package or data path.
|
2017-05-13 19:22:49 +00:00
|
|
|
|
|
2017-05-13 23:30:29 +00:00
|
|
|
|
name (unicode): Package name, shortcut link or model path.
|
2017-05-29 12:10:10 +00:00
|
|
|
|
**overrides: Specific overrides, like pipeline components to disable.
|
2017-05-27 22:22:00 +00:00
|
|
|
|
RETURNS (Language): `Language` class with the loaded model.
|
2017-05-13 19:22:49 +00:00
|
|
|
|
"""
|
2017-05-08 21:51:15 +00:00
|
|
|
|
data_path = get_data_path()
|
|
|
|
|
if not data_path or not data_path.exists():
|
2018-04-03 13:50:31 +00:00
|
|
|
|
raise IOError(Errors.E049.format(path=path2str(data_path)))
|
2017-10-27 12:39:09 +00:00
|
|
|
|
if isinstance(name, basestring_): # in data dir / shortcut
|
|
|
|
|
if name in set([d.name for d in data_path.iterdir()]):
|
2017-06-05 11:02:31 +00:00
|
|
|
|
return load_model_from_link(name, **overrides)
|
2017-10-27 12:39:09 +00:00
|
|
|
|
if is_package(name): # installed as package
|
2017-06-05 11:02:31 +00:00
|
|
|
|
return load_model_from_package(name, **overrides)
|
2017-10-27 12:39:09 +00:00
|
|
|
|
if Path(name).exists(): # path to model data directory
|
2017-06-05 11:02:31 +00:00
|
|
|
|
return load_model_from_path(Path(name), **overrides)
|
2017-10-27 12:39:09 +00:00
|
|
|
|
elif hasattr(name, 'exists'): # Path or Path-like to model data
|
2017-06-05 11:02:31 +00:00
|
|
|
|
return load_model_from_path(name, **overrides)
|
2018-04-03 13:50:31 +00:00
|
|
|
|
raise IOError(Errors.E050.format(name=name))
|
2017-05-08 21:51:15 +00:00
|
|
|
|
|
|
|
|
|
|
2017-06-05 11:02:31 +00:00
|
|
|
|
def load_model_from_link(name, **overrides):
|
|
|
|
|
"""Load a model from a shortcut link, or directory in spaCy data path."""
|
2017-08-18 19:57:06 +00:00
|
|
|
|
path = get_data_path() / name / '__init__.py'
|
2017-06-05 11:02:31 +00:00
|
|
|
|
try:
|
2017-08-18 19:57:06 +00:00
|
|
|
|
cls = import_file(name, path)
|
2017-06-05 11:02:31 +00:00
|
|
|
|
except AttributeError:
|
2018-04-03 13:50:31 +00:00
|
|
|
|
raise IOError(Errors.E051.format(name=name))
|
2017-06-05 11:02:31 +00:00
|
|
|
|
return cls.load(**overrides)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def load_model_from_package(name, **overrides):
|
|
|
|
|
"""Load a model from an installed package."""
|
|
|
|
|
cls = importlib.import_module(name)
|
|
|
|
|
return cls.load(**overrides)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def load_model_from_path(model_path, meta=False, **overrides):
|
|
|
|
|
"""Load a model from a data directory path. Creates Language class with
|
|
|
|
|
pipeline from meta.json and then calls from_disk() with path."""
|
|
|
|
|
if not meta:
|
|
|
|
|
meta = get_model_meta(model_path)
|
|
|
|
|
cls = get_lang_class(meta['lang'])
|
2017-10-06 22:25:54 +00:00
|
|
|
|
nlp = cls(meta=meta, **overrides)
|
2017-10-06 22:29:08 +00:00
|
|
|
|
pipeline = meta.get('pipeline', [])
|
|
|
|
|
disable = overrides.get('disable', [])
|
|
|
|
|
if pipeline is True:
|
|
|
|
|
pipeline = nlp.Defaults.pipe_names
|
|
|
|
|
elif pipeline in (False, None):
|
|
|
|
|
pipeline = []
|
|
|
|
|
for name in pipeline:
|
|
|
|
|
if name not in disable:
|
|
|
|
|
config = meta.get('pipeline_args', {}).get(name, {})
|
|
|
|
|
component = nlp.create_pipe(name, config=config)
|
|
|
|
|
nlp.add_pipe(component, name=name)
|
2017-06-05 11:02:31 +00:00
|
|
|
|
return nlp.from_disk(model_path)
|
|
|
|
|
|
|
|
|
|
|
2017-05-29 12:10:10 +00:00
|
|
|
|
def load_model_from_init_py(init_file, **overrides):
|
2017-05-27 22:22:00 +00:00
|
|
|
|
"""Helper function to use in the `load()` method of a model package's
|
|
|
|
|
__init__.py.
|
|
|
|
|
|
|
|
|
|
init_file (unicode): Path to model's __init__.py, i.e. `__file__`.
|
2017-05-29 12:10:10 +00:00
|
|
|
|
**overrides: Specific overrides, like pipeline components to disable.
|
2017-05-27 22:22:00 +00:00
|
|
|
|
RETURNS (Language): `Language` class with loaded model.
|
|
|
|
|
"""
|
|
|
|
|
model_path = Path(init_file).parent
|
2017-05-29 12:10:10 +00:00
|
|
|
|
meta = get_model_meta(model_path)
|
|
|
|
|
data_dir = '%s_%s-%s' % (meta['lang'], meta['name'], meta['version'])
|
|
|
|
|
data_path = model_path / data_dir
|
|
|
|
|
if not model_path.exists():
|
2018-04-03 13:50:31 +00:00
|
|
|
|
raise IOError(Errors.E052.format(path=path2str(data_path)))
|
2017-06-05 11:02:31 +00:00
|
|
|
|
return load_model_from_path(data_path, meta, **overrides)
|
2017-05-27 22:22:00 +00:00
|
|
|
|
|
|
|
|
|
|
2017-05-29 12:10:10 +00:00
|
|
|
|
def get_model_meta(path):
|
|
|
|
|
"""Get model meta.json from a directory path and validate its contents.
|
2017-05-27 22:22:00 +00:00
|
|
|
|
|
2017-05-29 12:10:10 +00:00
|
|
|
|
path (unicode or Path): Path to model directory.
|
|
|
|
|
RETURNS (dict): The model's meta data.
|
2017-05-27 22:22:00 +00:00
|
|
|
|
"""
|
2017-05-29 12:10:10 +00:00
|
|
|
|
model_path = ensure_path(path)
|
|
|
|
|
if not model_path.exists():
|
2018-04-03 13:50:31 +00:00
|
|
|
|
raise IOError(Errors.E052.format(path=path2str(model_path)))
|
2017-05-27 22:22:00 +00:00
|
|
|
|
meta_path = model_path / 'meta.json'
|
|
|
|
|
if not meta_path.is_file():
|
2018-04-03 13:50:31 +00:00
|
|
|
|
raise IOError(Errors.E053.format(path=meta_path))
|
2017-05-29 12:10:10 +00:00
|
|
|
|
meta = read_json(meta_path)
|
2017-05-27 22:22:00 +00:00
|
|
|
|
for setting in ['lang', 'name', 'version']:
|
2017-08-29 09:21:44 +00:00
|
|
|
|
if setting not in meta or not meta[setting]:
|
2018-04-03 13:50:31 +00:00
|
|
|
|
raise ValueError(Errors.E054.format(setting=setting))
|
2017-05-29 12:10:10 +00:00
|
|
|
|
return meta
|
2017-05-27 22:22:00 +00:00
|
|
|
|
|
|
|
|
|
|
2017-05-13 19:22:49 +00:00
|
|
|
|
def is_package(name):
|
|
|
|
|
"""Check if string maps to a package installed via pip.
|
|
|
|
|
|
2017-05-13 23:30:29 +00:00
|
|
|
|
name (unicode): Name of package.
|
|
|
|
|
RETURNS (bool): True if installed package, False if not.
|
2017-05-08 21:51:15 +00:00
|
|
|
|
"""
|
2017-09-29 18:55:17 +00:00
|
|
|
|
name = name.lower() # compare package name against lowercase name
|
2017-09-16 18:27:59 +00:00
|
|
|
|
packages = pkg_resources.working_set.by_key.keys()
|
2017-05-08 21:51:15 +00:00
|
|
|
|
for package in packages:
|
2017-09-29 18:55:17 +00:00
|
|
|
|
if package.lower().replace('-', '_') == name:
|
2017-05-08 21:51:15 +00:00
|
|
|
|
return True
|
|
|
|
|
return False
|
|
|
|
|
|
|
|
|
|
|
2017-05-27 22:22:00 +00:00
|
|
|
|
def get_package_path(name):
|
|
|
|
|
"""Get the path to an installed package.
|
2017-05-13 19:22:49 +00:00
|
|
|
|
|
2017-05-27 22:22:00 +00:00
|
|
|
|
name (unicode): Package name.
|
|
|
|
|
RETURNS (Path): Path to installed package.
|
2017-05-13 19:22:49 +00:00
|
|
|
|
"""
|
2017-09-29 18:55:17 +00:00
|
|
|
|
name = name.lower() # use lowercase version to be safe
|
2017-05-08 21:51:15 +00:00
|
|
|
|
# Here we're importing the module just to find it. This is worryingly
|
|
|
|
|
# indirect, but it's otherwise very difficult to find the package.
|
2017-05-29 08:51:19 +00:00
|
|
|
|
pkg = importlib.import_module(name)
|
2017-05-27 22:22:00 +00:00
|
|
|
|
return Path(pkg.__file__).parent
|
2017-05-08 21:51:15 +00:00
|
|
|
|
|
|
|
|
|
|
2018-05-22 16:29:45 +00:00
|
|
|
|
def get_entry_points(key):
|
|
|
|
|
"""Get registered entry points from other packages for a given key, e.g.
|
|
|
|
|
'spacy_factories' and return them as a dictionary, keyed by name.
|
|
|
|
|
|
|
|
|
|
key (unicode): Entry point name.
|
|
|
|
|
RETURNS (dict): Entry points, keyed by name.
|
|
|
|
|
"""
|
|
|
|
|
result = {}
|
|
|
|
|
for entry_point in pkg_resources.iter_entry_points(key):
|
|
|
|
|
result[entry_point.name] = entry_point.load()
|
|
|
|
|
return result
|
|
|
|
|
|
|
|
|
|
|
2017-05-18 12:13:14 +00:00
|
|
|
|
def is_in_jupyter():
|
2017-05-20 23:12:09 +00:00
|
|
|
|
"""Check if user is running spaCy from a Jupyter notebook by detecting the
|
|
|
|
|
IPython kernel. Mainly used for the displaCy visualizer.
|
2017-05-18 12:13:14 +00:00
|
|
|
|
|
|
|
|
|
RETURNS (bool): True if in Jupyter, False if not.
|
|
|
|
|
"""
|
|
|
|
|
try:
|
|
|
|
|
cfg = get_ipython().config
|
|
|
|
|
if cfg['IPKernelApp']['parent_appname'] == 'ipython-notebook':
|
|
|
|
|
return True
|
|
|
|
|
except NameError:
|
|
|
|
|
return False
|
|
|
|
|
return False
|
|
|
|
|
|
|
|
|
|
|
2017-05-13 22:37:53 +00:00
|
|
|
|
def get_cuda_stream(require=False):
|
2018-03-27 17:22:52 +00:00
|
|
|
|
if CudaStream is None:
|
|
|
|
|
return None
|
|
|
|
|
elif isinstance(Model.ops, NumpyOps):
|
|
|
|
|
return None
|
|
|
|
|
else:
|
|
|
|
|
return CudaStream()
|
2017-05-15 19:46:08 +00:00
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def get_async(stream, numpy_array):
|
|
|
|
|
if cupy is None:
|
|
|
|
|
return numpy_array
|
|
|
|
|
else:
|
2017-05-18 09:36:53 +00:00
|
|
|
|
array = cupy.ndarray(numpy_array.shape, order='C',
|
2017-10-27 12:39:09 +00:00
|
|
|
|
dtype=numpy_array.dtype)
|
2017-05-18 09:36:53 +00:00
|
|
|
|
array.set(numpy_array, stream=stream)
|
|
|
|
|
return array
|
|
|
|
|
|
2017-05-26 10:37:45 +00:00
|
|
|
|
|
2017-05-18 09:36:53 +00:00
|
|
|
|
def env_opt(name, default=None):
|
2017-05-18 13:32:03 +00:00
|
|
|
|
if type(default) is float:
|
|
|
|
|
type_convert = float
|
2017-05-18 09:36:53 +00:00
|
|
|
|
else:
|
2017-05-18 13:32:03 +00:00
|
|
|
|
type_convert = int
|
|
|
|
|
if 'SPACY_' + name.upper() in os.environ:
|
|
|
|
|
value = type_convert(os.environ['SPACY_' + name.upper()])
|
2017-05-31 12:14:11 +00:00
|
|
|
|
if _PRINT_ENV:
|
|
|
|
|
print(name, "=", repr(value), "via", "$SPACY_" + name.upper())
|
2017-05-18 13:32:03 +00:00
|
|
|
|
return value
|
|
|
|
|
elif name in os.environ:
|
|
|
|
|
value = type_convert(os.environ[name])
|
2017-05-31 12:14:11 +00:00
|
|
|
|
if _PRINT_ENV:
|
|
|
|
|
print(name, "=", repr(value), "via", '$' + name)
|
2017-05-18 13:32:03 +00:00
|
|
|
|
return value
|
|
|
|
|
else:
|
2017-05-31 12:14:11 +00:00
|
|
|
|
if _PRINT_ENV:
|
|
|
|
|
print(name, '=', repr(default), "by default")
|
2017-05-18 09:36:53 +00:00
|
|
|
|
return default
|
2017-05-13 22:37:53 +00:00
|
|
|
|
|
|
|
|
|
|
2016-09-24 18:26:17 +00:00
|
|
|
|
def read_regex(path):
|
2017-04-15 10:11:16 +00:00
|
|
|
|
path = ensure_path(path)
|
2016-09-24 18:26:17 +00:00
|
|
|
|
with path.open() as file_:
|
|
|
|
|
entries = file_.read().split('\n')
|
2017-10-27 12:39:09 +00:00
|
|
|
|
expression = '|'.join(['^' + re.escape(piece)
|
|
|
|
|
for piece in entries if piece.strip()])
|
2016-09-24 18:26:17 +00:00
|
|
|
|
return re.compile(expression)
|
|
|
|
|
|
|
|
|
|
|
2016-09-25 12:49:53 +00:00
|
|
|
|
def compile_prefix_regex(entries):
|
2017-01-08 19:33:28 +00:00
|
|
|
|
if '(' in entries:
|
|
|
|
|
# Handle deprecated data
|
2017-10-27 12:39:09 +00:00
|
|
|
|
expression = '|'.join(['^' + re.escape(piece)
|
|
|
|
|
for piece in entries if piece.strip()])
|
2017-01-08 19:33:28 +00:00
|
|
|
|
return re.compile(expression)
|
|
|
|
|
else:
|
2017-10-27 12:39:09 +00:00
|
|
|
|
expression = '|'.join(['^' + piece
|
|
|
|
|
for piece in entries if piece.strip()])
|
2017-01-08 19:33:28 +00:00
|
|
|
|
return re.compile(expression)
|
2016-09-24 18:26:17 +00:00
|
|
|
|
|
|
|
|
|
|
2016-09-25 12:49:53 +00:00
|
|
|
|
def compile_suffix_regex(entries):
|
2016-09-24 18:26:17 +00:00
|
|
|
|
expression = '|'.join([piece + '$' for piece in entries if piece.strip()])
|
|
|
|
|
return re.compile(expression)
|
|
|
|
|
|
|
|
|
|
|
2016-09-25 12:49:53 +00:00
|
|
|
|
def compile_infix_regex(entries):
|
2016-09-24 18:26:17 +00:00
|
|
|
|
expression = '|'.join([piece for piece in entries if piece.strip()])
|
|
|
|
|
return re.compile(expression)
|
|
|
|
|
|
|
|
|
|
|
2017-06-03 17:44:47 +00:00
|
|
|
|
def add_lookups(default_func, *lookups):
|
|
|
|
|
"""Extend an attribute function with special cases. If a word is in the
|
|
|
|
|
lookups, the value is returned. Otherwise the previous function is used.
|
|
|
|
|
|
|
|
|
|
default_func (callable): The default function to execute.
|
|
|
|
|
*lookups (dict): Lookup dictionary mapping string to attribute value.
|
|
|
|
|
RETURNS (callable): Lexical attribute getter.
|
|
|
|
|
"""
|
2017-10-17 16:20:52 +00:00
|
|
|
|
# This is implemented as functools.partial instead of a closure, to allow
|
|
|
|
|
# pickle to work.
|
|
|
|
|
return functools.partial(_get_attr_unless_lookup, default_func, lookups)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def _get_attr_unless_lookup(default_func, lookups, string):
|
|
|
|
|
for lookup in lookups:
|
|
|
|
|
if string in lookup:
|
|
|
|
|
return lookup[string]
|
|
|
|
|
return default_func(string)
|
2017-06-03 17:44:47 +00:00
|
|
|
|
|
|
|
|
|
|
2017-05-08 13:42:12 +00:00
|
|
|
|
def update_exc(base_exceptions, *addition_dicts):
|
2017-05-13 19:22:49 +00:00
|
|
|
|
"""Update and validate tokenizer exceptions. Will overwrite exceptions.
|
|
|
|
|
|
2017-05-13 23:30:29 +00:00
|
|
|
|
base_exceptions (dict): Base exceptions.
|
|
|
|
|
*addition_dicts (dict): Exceptions to add to the base dict, in order.
|
|
|
|
|
RETURNS (dict): Combined tokenizer exceptions.
|
2017-05-13 19:22:49 +00:00
|
|
|
|
"""
|
2017-05-08 13:42:12 +00:00
|
|
|
|
exc = dict(base_exceptions)
|
|
|
|
|
for additions in addition_dicts:
|
|
|
|
|
for orth, token_attrs in additions.items():
|
2017-10-27 12:39:09 +00:00
|
|
|
|
if not all(isinstance(attr[ORTH], unicode_)
|
|
|
|
|
for attr in token_attrs):
|
2018-04-03 13:50:31 +00:00
|
|
|
|
raise ValueError(Errors.E055.format(key=orth, orths=token_attrs))
|
2017-05-08 13:42:12 +00:00
|
|
|
|
described_orth = ''.join(attr[ORTH] for attr in token_attrs)
|
|
|
|
|
if orth != described_orth:
|
2018-04-03 13:50:31 +00:00
|
|
|
|
raise ValueError(Errors.E056.format(key=orth, orths=described_orth))
|
2017-05-08 13:42:12 +00:00
|
|
|
|
exc.update(additions)
|
2017-05-13 19:22:25 +00:00
|
|
|
|
exc = expand_exc(exc, "'", "’")
|
2017-05-08 13:42:12 +00:00
|
|
|
|
return exc
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def expand_exc(excs, search, replace):
|
2017-05-13 19:22:49 +00:00
|
|
|
|
"""Find string in tokenizer exceptions, duplicate entry and replace string.
|
|
|
|
|
For example, to add additional versions with typographic apostrophes.
|
|
|
|
|
|
2017-05-13 23:30:29 +00:00
|
|
|
|
excs (dict): Tokenizer exceptions.
|
|
|
|
|
search (unicode): String to find and replace.
|
|
|
|
|
replace (unicode): Replacement.
|
|
|
|
|
RETURNS (dict): Combined tokenizer exceptions.
|
2017-05-13 19:22:49 +00:00
|
|
|
|
"""
|
2017-05-08 13:42:12 +00:00
|
|
|
|
def _fix_token(token, search, replace):
|
|
|
|
|
fixed = dict(token)
|
|
|
|
|
fixed[ORTH] = fixed[ORTH].replace(search, replace)
|
|
|
|
|
return fixed
|
2017-05-13 19:22:25 +00:00
|
|
|
|
new_excs = dict(excs)
|
2017-05-08 13:42:12 +00:00
|
|
|
|
for token_string, tokens in excs.items():
|
|
|
|
|
if search in token_string:
|
|
|
|
|
new_key = token_string.replace(search, replace)
|
|
|
|
|
new_value = [_fix_token(t, search, replace) for t in tokens]
|
2017-05-13 19:22:25 +00:00
|
|
|
|
new_excs[new_key] = new_value
|
|
|
|
|
return new_excs
|
2017-05-08 13:42:12 +00:00
|
|
|
|
|
|
|
|
|
|
2015-10-07 08:25:35 +00:00
|
|
|
|
def normalize_slice(length, start, stop, step=None):
|
|
|
|
|
if not (step is None or step == 1):
|
2018-04-03 13:50:31 +00:00
|
|
|
|
raise ValueError(Errors.E057)
|
2015-10-07 08:25:35 +00:00
|
|
|
|
if start is None:
|
2017-10-27 12:39:09 +00:00
|
|
|
|
start = 0
|
2015-10-07 08:25:35 +00:00
|
|
|
|
elif start < 0:
|
2017-10-27 12:39:09 +00:00
|
|
|
|
start += length
|
2015-10-07 08:25:35 +00:00
|
|
|
|
start = min(length, max(0, start))
|
|
|
|
|
if stop is None:
|
2017-10-27 12:39:09 +00:00
|
|
|
|
stop = length
|
2015-10-07 08:25:35 +00:00
|
|
|
|
elif stop < 0:
|
2017-10-27 12:39:09 +00:00
|
|
|
|
stop += length
|
2015-10-07 08:25:35 +00:00
|
|
|
|
stop = min(length, max(start, stop))
|
|
|
|
|
return start, stop
|
|
|
|
|
|
|
|
|
|
|
2017-11-06 22:45:36 +00:00
|
|
|
|
def minibatch(items, size=8):
|
|
|
|
|
"""Iterate over batches of items. `size` may be an iterator,
|
|
|
|
|
so that batch-size can vary on each step.
|
|
|
|
|
"""
|
|
|
|
|
if isinstance(size, int):
|
2017-11-06 23:22:43 +00:00
|
|
|
|
size_ = itertools.repeat(size)
|
2017-11-06 22:45:36 +00:00
|
|
|
|
else:
|
|
|
|
|
size_ = size
|
|
|
|
|
items = iter(items)
|
|
|
|
|
while True:
|
|
|
|
|
batch_size = next(size_)
|
|
|
|
|
batch = list(cytoolz.take(int(batch_size), items))
|
|
|
|
|
if len(batch) == 0:
|
|
|
|
|
break
|
|
|
|
|
yield list(batch)
|
|
|
|
|
|
|
|
|
|
|
2017-05-25 21:16:10 +00:00
|
|
|
|
def compounding(start, stop, compound):
|
2017-05-27 22:04:04 +00:00
|
|
|
|
"""Yield an infinite series of compounding values. Each time the
|
2017-05-25 21:16:10 +00:00
|
|
|
|
generator is called, a value is produced by multiplying the previous
|
|
|
|
|
value by the compound rate.
|
|
|
|
|
|
2017-05-27 22:04:04 +00:00
|
|
|
|
EXAMPLE:
|
2017-05-25 21:16:10 +00:00
|
|
|
|
>>> sizes = compounding(1., 10., 1.5)
|
|
|
|
|
>>> assert next(sizes) == 1.
|
|
|
|
|
>>> assert next(sizes) == 1 * 1.5
|
|
|
|
|
>>> assert next(sizes) == 1.5 * 1.5
|
2017-05-27 22:04:04 +00:00
|
|
|
|
"""
|
2017-05-25 21:16:10 +00:00
|
|
|
|
def clip(value):
|
2017-10-27 12:39:09 +00:00
|
|
|
|
return max(value, stop) if (start > stop) else min(value, stop)
|
2017-05-25 21:16:10 +00:00
|
|
|
|
curr = float(start)
|
|
|
|
|
while True:
|
|
|
|
|
yield clip(curr)
|
|
|
|
|
curr *= compound
|
|
|
|
|
|
|
|
|
|
|
2018-09-14 16:37:16 +00:00
|
|
|
|
def stepping(start, stop, steps):
|
|
|
|
|
"""Yield an infinite series of values that step from a start value to a
|
|
|
|
|
final value over some number of steps. Each step is (stop-start)/steps.
|
|
|
|
|
|
|
|
|
|
After the final value is reached, the generator continues yielding that
|
|
|
|
|
value.
|
|
|
|
|
|
|
|
|
|
EXAMPLE:
|
|
|
|
|
>>> sizes = stepping(1., 200., 100)
|
|
|
|
|
>>> assert next(sizes) == 1.
|
|
|
|
|
>>> assert next(sizes) == 1 * (200.-1.) / 100
|
|
|
|
|
>>> assert next(sizes) == 1 + (200.-1.) / 100 + (200.-1.) / 100
|
|
|
|
|
"""
|
|
|
|
|
def clip(value):
|
|
|
|
|
return max(value, stop) if (start > stop) else min(value, stop)
|
|
|
|
|
curr = float(start)
|
|
|
|
|
while True:
|
|
|
|
|
yield clip(curr)
|
|
|
|
|
curr += (stop - start) / steps
|
|
|
|
|
|
|
|
|
|
|
2017-05-25 21:16:10 +00:00
|
|
|
|
def decaying(start, stop, decay):
|
2017-05-27 22:04:04 +00:00
|
|
|
|
"""Yield an infinite series of linearly decaying values."""
|
2017-05-25 21:16:10 +00:00
|
|
|
|
def clip(value):
|
2017-10-27 12:39:09 +00:00
|
|
|
|
return max(value, stop) if (start > stop) else min(value, stop)
|
2017-05-25 21:16:10 +00:00
|
|
|
|
nr_upd = 1.
|
|
|
|
|
while True:
|
|
|
|
|
yield clip(start * 1./(1. + decay * nr_upd))
|
|
|
|
|
nr_upd += 1
|
|
|
|
|
|
|
|
|
|
|
2018-11-15 22:44:07 +00:00
|
|
|
|
def minibatch_by_words(items, size, tuples=True, count_words=len):
|
2018-03-27 17:23:02 +00:00
|
|
|
|
'''Create minibatches of a given number of words.'''
|
|
|
|
|
if isinstance(size, int):
|
|
|
|
|
size_ = itertools.repeat(size)
|
|
|
|
|
else:
|
|
|
|
|
size_ = size
|
|
|
|
|
items = iter(items)
|
|
|
|
|
while True:
|
|
|
|
|
batch_size = next(size_)
|
|
|
|
|
batch = []
|
|
|
|
|
while batch_size >= 0:
|
|
|
|
|
try:
|
2018-11-15 22:44:07 +00:00
|
|
|
|
if tuples:
|
|
|
|
|
doc, gold = next(items)
|
|
|
|
|
else:
|
|
|
|
|
doc = next(items)
|
2018-03-27 17:23:02 +00:00
|
|
|
|
except StopIteration:
|
|
|
|
|
if batch:
|
|
|
|
|
yield batch
|
|
|
|
|
return
|
|
|
|
|
batch_size -= count_words(doc)
|
2018-11-15 22:44:07 +00:00
|
|
|
|
if tuples:
|
|
|
|
|
batch.append((doc, gold))
|
|
|
|
|
else:
|
|
|
|
|
batch.append(doc)
|
2018-03-27 17:23:02 +00:00
|
|
|
|
if batch:
|
|
|
|
|
yield batch
|
|
|
|
|
|
|
|
|
|
|
2017-11-06 22:45:36 +00:00
|
|
|
|
def itershuffle(iterable, bufsize=1000):
|
|
|
|
|
"""Shuffle an iterator. This works by holding `bufsize` items back
|
|
|
|
|
and yielding them sometime later. Obviously, this is not unbiased –
|
|
|
|
|
but should be good enough for batching. Larger bufsize means less bias.
|
|
|
|
|
From https://gist.github.com/andres-erbsen/1307752
|
|
|
|
|
|
|
|
|
|
iterable (iterable): Iterator to shuffle.
|
|
|
|
|
bufsize (int): Items to hold back.
|
|
|
|
|
YIELDS (iterable): The shuffled iterator.
|
|
|
|
|
"""
|
|
|
|
|
iterable = iter(iterable)
|
|
|
|
|
buf = []
|
|
|
|
|
try:
|
|
|
|
|
while True:
|
|
|
|
|
for i in range(random.randint(1, bufsize-len(buf))):
|
2018-03-27 17:23:02 +00:00
|
|
|
|
buf.append(next(iterable))
|
2017-11-06 22:45:36 +00:00
|
|
|
|
random.shuffle(buf)
|
|
|
|
|
for i in range(random.randint(1, bufsize)):
|
|
|
|
|
if buf:
|
|
|
|
|
yield buf.pop()
|
|
|
|
|
else:
|
|
|
|
|
break
|
|
|
|
|
except StopIteration:
|
|
|
|
|
random.shuffle(buf)
|
|
|
|
|
while buf:
|
|
|
|
|
yield buf.pop()
|
|
|
|
|
raise StopIteration
|
|
|
|
|
|
|
|
|
|
|
2017-04-16 11:03:28 +00:00
|
|
|
|
def read_json(location):
|
2017-05-13 19:22:49 +00:00
|
|
|
|
"""Open and load JSON from file.
|
|
|
|
|
|
2017-05-13 23:30:29 +00:00
|
|
|
|
location (Path): Path to JSON file.
|
|
|
|
|
RETURNS (dict): Loaded JSON content.
|
2017-05-13 19:22:49 +00:00
|
|
|
|
"""
|
2017-06-04 18:44:37 +00:00
|
|
|
|
location = ensure_path(location)
|
2017-04-16 11:03:28 +00:00
|
|
|
|
with location.open('r', encoding='utf8') as f:
|
|
|
|
|
return ujson.load(f)
|
|
|
|
|
|
|
|
|
|
|
2018-07-18 17:43:16 +00:00
|
|
|
|
def read_jsonl(file_path):
|
|
|
|
|
"""Read a .jsonl file and yield its contents line by line.
|
|
|
|
|
|
|
|
|
|
file_path (unicode / Path): The file path.
|
|
|
|
|
YIELDS: The loaded JSON contents of each line.
|
|
|
|
|
"""
|
|
|
|
|
with Path(file_path).open('r', encoding='utf8') as f:
|
|
|
|
|
for line in f:
|
|
|
|
|
try: # hack to handle broken jsonl
|
|
|
|
|
yield ujson.loads(line.strip())
|
|
|
|
|
except ValueError:
|
|
|
|
|
continue
|
|
|
|
|
|
|
|
|
|
|
2017-03-20 21:48:56 +00:00
|
|
|
|
def get_raw_input(description, default=False):
|
2017-05-13 19:22:49 +00:00
|
|
|
|
"""Get user input from the command line via raw_input / input.
|
|
|
|
|
|
2017-05-13 23:30:29 +00:00
|
|
|
|
description (unicode): Text to display before prompt.
|
|
|
|
|
default (unicode or False/None): Default value to display with prompt.
|
|
|
|
|
RETURNS (unicode): User input.
|
2017-04-16 11:42:34 +00:00
|
|
|
|
"""
|
2017-05-07 21:25:29 +00:00
|
|
|
|
additional = ' (default: %s)' % default if default else ''
|
|
|
|
|
prompt = ' %s%s: ' % (description, additional)
|
2017-04-15 10:11:16 +00:00
|
|
|
|
user_input = input_(prompt)
|
2017-03-20 21:48:56 +00:00
|
|
|
|
return user_input
|
|
|
|
|
|
|
|
|
|
|
2017-05-29 08:13:42 +00:00
|
|
|
|
def to_bytes(getters, exclude):
|
2017-05-31 11:42:39 +00:00
|
|
|
|
serialized = OrderedDict()
|
2017-05-29 08:13:42 +00:00
|
|
|
|
for key, getter in getters.items():
|
|
|
|
|
if key not in exclude:
|
|
|
|
|
serialized[key] = getter()
|
💫 Use Blis for matrix multiplications (#2966)
Our epic matrix multiplication odyssey is drawing to a close...
I've now finally got the Blis linear algebra routines in a self-contained Python package, with wheels for Windows, Linux and OSX. The only missing platform at the moment is Windows Python 2.7. The result is at https://github.com/explosion/cython-blis
Thinc v7.0.0 will make the change to Blis. I've put a Thinc v7.0.0.dev0 up on PyPi so that we can test these changes with the CI, and even get them out to spacy-nightly, before Thinc v7.0.0 is released. This PR also updates the other dependencies to be in line with the current versions master is using. I've also resolved the msgpack deprecation problems, and gotten spaCy and Thinc up to date with the latest Cython.
The point of switching to Blis is to have control of how our matrix multiplications are executed across platforms. When we were using numpy for this, a different library would be used on pip and conda, OSX would use Accelerate, etc. This would open up different bugs and performance problems, especially when multi-threading was introduced.
With the change to Blis, we now strictly single-thread the matrix multiplications. This will make it much easier to use multiprocessing to parallelise the runtime, since we won't have nested parallelism problems to deal with.
* Use blis
* Use -2 arg to Cython
* Update dependencies
* Fix requirements
* Update setup dependencies
* Fix requirement typo
* Fix msgpack errors
* Remove Python27 test from Appveyor, until Blis works there
* Auto-format setup.py
* Fix murmurhash version
2018-11-26 23:44:04 +00:00
|
|
|
|
return msgpack.dumps(serialized, use_bin_type=True)
|
2017-05-29 08:13:42 +00:00
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def from_bytes(bytes_data, setters, exclude):
|
2018-07-20 15:32:00 +00:00
|
|
|
|
msg = msgpack.loads(bytes_data, raw=False)
|
2017-05-29 08:13:42 +00:00
|
|
|
|
for key, setter in setters.items():
|
2017-06-02 16:18:17 +00:00
|
|
|
|
if key not in exclude and key in msg:
|
2017-05-29 08:13:42 +00:00
|
|
|
|
setter(msg[key])
|
|
|
|
|
return msg
|
|
|
|
|
|
|
|
|
|
|
2017-05-31 11:42:39 +00:00
|
|
|
|
def to_disk(path, writers, exclude):
|
|
|
|
|
path = ensure_path(path)
|
|
|
|
|
if not path.exists():
|
|
|
|
|
path.mkdir()
|
|
|
|
|
for key, writer in writers.items():
|
|
|
|
|
if key not in exclude:
|
|
|
|
|
writer(path / key)
|
|
|
|
|
return path
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def from_disk(path, readers, exclude):
|
|
|
|
|
path = ensure_path(path)
|
|
|
|
|
for key, reader in readers.items():
|
|
|
|
|
if key not in exclude:
|
2017-10-16 18:55:00 +00:00
|
|
|
|
reader(path / key)
|
2017-05-31 11:42:39 +00:00
|
|
|
|
return path
|
|
|
|
|
|
|
|
|
|
|
2017-05-07 21:25:29 +00:00
|
|
|
|
def print_table(data, title=None):
|
2017-05-13 19:22:49 +00:00
|
|
|
|
"""Print data in table format.
|
|
|
|
|
|
2017-05-13 23:30:29 +00:00
|
|
|
|
data (dict or list of tuples): Label/value pairs.
|
|
|
|
|
title (unicode or None): Title, will be printed above.
|
2017-04-16 11:42:34 +00:00
|
|
|
|
"""
|
2017-05-13 19:22:49 +00:00
|
|
|
|
if isinstance(data, dict):
|
2017-03-18 12:00:14 +00:00
|
|
|
|
data = list(data.items())
|
2017-05-07 21:25:29 +00:00
|
|
|
|
tpl_row = ' {:<15}' * len(data[0])
|
2017-06-04 19:27:20 +00:00
|
|
|
|
table = '\n'.join([tpl_row.format(l, unicode_(v)) for l, v in data])
|
2017-05-07 21:25:29 +00:00
|
|
|
|
if title:
|
|
|
|
|
print('\n \033[93m{}\033[0m'.format(title))
|
|
|
|
|
print('\n{}\n'.format(table))
|
2017-03-18 12:00:14 +00:00
|
|
|
|
|
|
|
|
|
|
2017-05-07 21:25:29 +00:00
|
|
|
|
def print_markdown(data, title=None):
|
2017-05-13 19:22:49 +00:00
|
|
|
|
"""Print data in GitHub-flavoured Markdown format for issues etc.
|
|
|
|
|
|
2017-05-13 23:30:29 +00:00
|
|
|
|
data (dict or list of tuples): Label/value pairs.
|
|
|
|
|
title (unicode or None): Title, will be rendered as headline 2.
|
2017-04-16 11:42:34 +00:00
|
|
|
|
"""
|
2017-03-18 12:00:14 +00:00
|
|
|
|
def excl_value(value):
|
2017-06-04 19:27:20 +00:00
|
|
|
|
# contains path, i.e. personal info
|
|
|
|
|
return isinstance(value, basestring_) and Path(value).exists()
|
2017-03-18 12:00:14 +00:00
|
|
|
|
|
2017-05-13 19:22:49 +00:00
|
|
|
|
if isinstance(data, dict):
|
2017-03-18 12:00:14 +00:00
|
|
|
|
data = list(data.items())
|
2017-10-27 12:39:09 +00:00
|
|
|
|
markdown = ["* **{}:** {}".format(l, unicode_(v))
|
|
|
|
|
for l, v in data if not excl_value(v)]
|
2017-05-07 21:25:29 +00:00
|
|
|
|
if title:
|
|
|
|
|
print("\n## {}".format(title))
|
|
|
|
|
print('\n{}\n'.format('\n'.join(markdown)))
|
2017-03-18 12:00:14 +00:00
|
|
|
|
|
|
|
|
|
|
2017-05-08 00:00:37 +00:00
|
|
|
|
def prints(*texts, **kwargs):
|
2017-10-27 12:39:09 +00:00
|
|
|
|
"""Print formatted message (manual ANSI escape sequences to avoid
|
|
|
|
|
dependency)
|
2017-05-13 19:22:49 +00:00
|
|
|
|
|
2017-05-13 23:30:29 +00:00
|
|
|
|
*texts (unicode): Texts to print. Each argument is rendered as paragraph.
|
2017-10-27 12:39:09 +00:00
|
|
|
|
**kwargs: 'title' becomes coloured headline. exits=True performs sys exit.
|
2017-04-16 11:42:34 +00:00
|
|
|
|
"""
|
2017-05-22 10:29:15 +00:00
|
|
|
|
exits = kwargs.get('exits', None)
|
2017-05-07 23:05:24 +00:00
|
|
|
|
title = kwargs.get('title', None)
|
2017-05-07 21:25:29 +00:00
|
|
|
|
title = '\033[93m{}\033[0m\n'.format(_wrap(title)) if title else ''
|
|
|
|
|
message = '\n\n'.join([_wrap(text) for text in texts])
|
|
|
|
|
print('\n{}{}\n'.format(title, message))
|
2017-05-22 10:29:15 +00:00
|
|
|
|
if exits is not None:
|
|
|
|
|
sys.exit(exits)
|
2017-03-15 16:35:57 +00:00
|
|
|
|
|
|
|
|
|
|
2017-05-07 21:25:29 +00:00
|
|
|
|
def _wrap(text, wrap_max=80, indent=4):
|
2017-05-13 19:22:49 +00:00
|
|
|
|
"""Wrap text at given width using textwrap module.
|
|
|
|
|
|
2017-05-13 23:30:29 +00:00
|
|
|
|
text (unicode): Text to wrap. If it's a Path, it's converted to string.
|
|
|
|
|
wrap_max (int): Maximum line length (indent is deducted).
|
|
|
|
|
indent (int): Number of spaces for indentation.
|
|
|
|
|
RETURNS (unicode): Wrapped text.
|
2017-04-16 11:42:34 +00:00
|
|
|
|
"""
|
2017-05-07 21:25:29 +00:00
|
|
|
|
indent = indent * ' '
|
2017-03-15 16:35:57 +00:00
|
|
|
|
wrap_width = wrap_max - len(indent)
|
2017-05-07 21:25:29 +00:00
|
|
|
|
if isinstance(text, Path):
|
|
|
|
|
text = path2str(text)
|
2017-03-15 16:35:57 +00:00
|
|
|
|
return textwrap.fill(text, width=wrap_width, initial_indent=indent,
|
2017-05-07 21:25:29 +00:00
|
|
|
|
subsequent_indent=indent, break_long_words=False,
|
|
|
|
|
break_on_hyphens=False)
|
2017-05-14 15:50:23 +00:00
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def minify_html(html):
|
|
|
|
|
"""Perform a template-specific, rudimentary HTML minification for displaCy.
|
2017-10-27 12:39:09 +00:00
|
|
|
|
Disclaimer: NOT a general-purpose solution, only removes indentation and
|
|
|
|
|
newlines.
|
2017-05-14 15:50:23 +00:00
|
|
|
|
|
|
|
|
|
html (unicode): Markup to minify.
|
|
|
|
|
RETURNS (unicode): "Minified" HTML.
|
|
|
|
|
"""
|
|
|
|
|
return html.strip().replace(' ', '').replace('\n', '')
|
2017-09-21 00:16:35 +00:00
|
|
|
|
|
|
|
|
|
|
escape html in displacy.render (#2378) (closes #2361)
## Description
Fix for issue #2361 :
replace &, <, >, " with &amp; , &lt; , &gt; , &quot; in before rendering svg
## Checklist
<!--- Before you submit the PR, go over this checklist and make sure you can
tick off all the boxes. [] -> [x] -->
- [x] I have submitted the spaCy Contributor Agreement.
- [ ] I ran the tests, and all new and existing tests passed.
(As discussed in the comments to #2361)
- [x] My changes don't require a change to the documentation, or if they do, I've added all required information.
2018-05-28 16:36:41 +00:00
|
|
|
|
def escape_html(text):
|
|
|
|
|
"""Replace <, >, &, " with their HTML encoded representation. Intended to
|
|
|
|
|
prevent HTML errors in rendered displaCy markup.
|
|
|
|
|
|
|
|
|
|
text (unicode): The original text.
|
|
|
|
|
RETURNS (unicode): Equivalent text to be safely used within HTML.
|
|
|
|
|
"""
|
|
|
|
|
text = text.replace('&', '&')
|
|
|
|
|
text = text.replace('<', '<')
|
|
|
|
|
text = text.replace('>', '>')
|
|
|
|
|
text = text.replace('"', '"')
|
|
|
|
|
return text
|
|
|
|
|
|
|
|
|
|
|
2017-09-21 00:16:35 +00:00
|
|
|
|
def use_gpu(gpu_id):
|
2017-10-03 20:47:31 +00:00
|
|
|
|
try:
|
|
|
|
|
import cupy.cuda.device
|
|
|
|
|
except ImportError:
|
|
|
|
|
return None
|
2017-09-21 00:16:35 +00:00
|
|
|
|
from thinc.neural.ops import CupyOps
|
|
|
|
|
device = cupy.cuda.device.Device(gpu_id)
|
|
|
|
|
device.use()
|
|
|
|
|
Model.ops = CupyOps()
|
|
|
|
|
Model.Ops = CupyOps
|
|
|
|
|
return device
|
2018-02-13 11:42:23 +00:00
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def fix_random_seed(seed=0):
|
2018-02-13 11:52:48 +00:00
|
|
|
|
random.seed(seed)
|
|
|
|
|
numpy.random.seed(seed)
|
2018-05-20 13:13:37 +00:00
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class SimpleFrozenDict(dict):
|
|
|
|
|
"""Simplified implementation of a frozen dict, mainly used as default
|
|
|
|
|
function or method argument (for arguments that should default to empty
|
|
|
|
|
dictionary). Will raise an error if user or spaCy attempts to add to dict.
|
|
|
|
|
"""
|
|
|
|
|
def __setitem__(self, key, value):
|
|
|
|
|
raise NotImplementedError(Errors.E095)
|
|
|
|
|
|
|
|
|
|
def pop(self, key, default=None):
|
|
|
|
|
raise NotImplementedError(Errors.E095)
|
|
|
|
|
|
|
|
|
|
def update(self, other):
|
|
|
|
|
raise NotImplementedError(Errors.E095)
|