2019-02-10 11:14:51 +00:00
|
|
|
# coding: utf8
|
|
|
|
from __future__ import unicode_literals
|
|
|
|
|
2019-07-08 15:28:28 +00:00
|
|
|
from collections import defaultdict, OrderedDict
|
2019-02-10 11:14:51 +00:00
|
|
|
import srsly
|
|
|
|
|
2019-10-27 12:35:49 +00:00
|
|
|
from ..language import component
|
2019-02-10 11:14:51 +00:00
|
|
|
from ..errors import Errors
|
|
|
|
from ..compat import basestring_
|
2019-07-08 15:28:28 +00:00
|
|
|
from ..util import ensure_path, to_disk, from_disk
|
2020-02-16 17:17:47 +00:00
|
|
|
from ..tokens import Doc, Span
|
2019-02-10 11:14:51 +00:00
|
|
|
from ..matcher import Matcher, PhraseMatcher
|
|
|
|
|
2019-07-10 10:03:05 +00:00
|
|
|
DEFAULT_ENT_ID_SEP = "||"
|
2019-07-08 15:28:28 +00:00
|
|
|
|
2019-02-10 11:14:51 +00:00
|
|
|
|
2019-10-27 12:35:49 +00:00
|
|
|
@component("entity_ruler", assigns=["doc.ents", "token.ent_type", "token.ent_iob"])
|
2019-02-10 11:14:51 +00:00
|
|
|
class EntityRuler(object):
|
2019-03-08 10:42:26 +00:00
|
|
|
"""The EntityRuler lets you add spans to the `Doc.ents` using token-based
|
|
|
|
rules or exact phrase matches. It can be combined with the statistical
|
|
|
|
`EntityRecognizer` to boost accuracy, or used on its own to implement a
|
|
|
|
purely rule-based entity recognition system. After initialization, the
|
|
|
|
component is typically added to the pipeline using `nlp.add_pipe`.
|
|
|
|
|
|
|
|
DOCS: https://spacy.io/api/entityruler
|
|
|
|
USAGE: https://spacy.io/usage/rule-based-matching#entityruler
|
|
|
|
"""
|
|
|
|
|
2019-08-06 22:40:53 +00:00
|
|
|
def __init__(self, nlp, phrase_matcher_attr=None, validate=False, **cfg):
|
2019-03-08 10:42:26 +00:00
|
|
|
"""Initialize the entitiy ruler. If patterns are supplied here, they
|
2019-02-10 11:14:51 +00:00
|
|
|
need to be a list of dictionaries with a `"label"` and `"pattern"`
|
|
|
|
key. A pattern can either be a token pattern (list) or a phrase pattern
|
|
|
|
(string). For example: `{'label': 'ORG', 'pattern': 'Apple'}`.
|
|
|
|
|
|
|
|
nlp (Language): The shared nlp object to pass the vocab to the matchers
|
|
|
|
and process phrase patterns.
|
2019-07-09 18:09:17 +00:00
|
|
|
phrase_matcher_attr (int / unicode): Token attribute to match on, passed
|
|
|
|
to the internal PhraseMatcher as `attr`
|
2019-08-06 22:40:53 +00:00
|
|
|
validate (bool): Whether patterns should be validated, passed to
|
|
|
|
Matcher and PhraseMatcher as `validate`
|
2019-02-10 11:14:51 +00:00
|
|
|
patterns (iterable): Optional patterns to load in.
|
|
|
|
overwrite_ents (bool): If existing entities are present, e.g. entities
|
|
|
|
added by the model, overwrite them by matches if necessary.
|
|
|
|
**cfg: Other config parameters. If pipeline component is loaded as part
|
|
|
|
of a model pipeline, this will include all keyword arguments passed
|
|
|
|
to `spacy.load`.
|
2019-10-25 09:16:42 +00:00
|
|
|
RETURNS (EntityRuler): The newly constructed object.
|
2019-10-25 09:19:46 +00:00
|
|
|
|
2019-03-08 10:42:26 +00:00
|
|
|
DOCS: https://spacy.io/api/entityruler#init
|
2019-02-10 11:14:51 +00:00
|
|
|
"""
|
|
|
|
self.nlp = nlp
|
|
|
|
self.overwrite = cfg.get("overwrite_ents", False)
|
|
|
|
self.token_patterns = defaultdict(list)
|
|
|
|
self.phrase_patterns = defaultdict(list)
|
2019-08-06 22:40:53 +00:00
|
|
|
self.matcher = Matcher(nlp.vocab, validate=validate)
|
2019-07-09 18:09:17 +00:00
|
|
|
if phrase_matcher_attr is not None:
|
2019-08-21 12:00:37 +00:00
|
|
|
if phrase_matcher_attr.upper() == "TEXT":
|
|
|
|
phrase_matcher_attr = "ORTH"
|
2019-07-09 18:09:17 +00:00
|
|
|
self.phrase_matcher_attr = phrase_matcher_attr
|
2019-07-10 10:03:05 +00:00
|
|
|
self.phrase_matcher = PhraseMatcher(
|
2019-08-06 22:40:53 +00:00
|
|
|
nlp.vocab, attr=self.phrase_matcher_attr, validate=validate
|
2019-07-10 10:03:05 +00:00
|
|
|
)
|
2019-07-09 18:09:17 +00:00
|
|
|
else:
|
|
|
|
self.phrase_matcher_attr = None
|
2019-08-06 22:40:53 +00:00
|
|
|
self.phrase_matcher = PhraseMatcher(nlp.vocab, validate=validate)
|
2019-07-08 15:28:28 +00:00
|
|
|
self.ent_id_sep = cfg.get("ent_id_sep", DEFAULT_ENT_ID_SEP)
|
2019-10-25 09:16:42 +00:00
|
|
|
self._ent_ids = defaultdict(dict)
|
2019-02-10 11:14:51 +00:00
|
|
|
patterns = cfg.get("patterns")
|
|
|
|
if patterns is not None:
|
|
|
|
self.add_patterns(patterns)
|
|
|
|
|
2019-10-27 12:35:49 +00:00
|
|
|
@classmethod
|
|
|
|
def from_nlp(cls, nlp, **cfg):
|
|
|
|
return cls(nlp, **cfg)
|
|
|
|
|
2019-02-10 11:14:51 +00:00
|
|
|
def __len__(self):
|
|
|
|
"""The number of all patterns added to the entity ruler."""
|
|
|
|
n_token_patterns = sum(len(p) for p in self.token_patterns.values())
|
|
|
|
n_phrase_patterns = sum(len(p) for p in self.phrase_patterns.values())
|
|
|
|
return n_token_patterns + n_phrase_patterns
|
|
|
|
|
|
|
|
def __contains__(self, label):
|
|
|
|
"""Whether a label is present in the patterns."""
|
|
|
|
return label in self.token_patterns or label in self.phrase_patterns
|
|
|
|
|
|
|
|
def __call__(self, doc):
|
|
|
|
"""Find matches in document and add them as entities.
|
|
|
|
|
|
|
|
doc (Doc): The Doc object in the pipeline.
|
2019-10-25 09:16:42 +00:00
|
|
|
RETURNS (Doc): The Doc with added entities, if available.
|
2019-10-25 09:19:46 +00:00
|
|
|
|
2019-03-08 10:42:26 +00:00
|
|
|
DOCS: https://spacy.io/api/entityruler#call
|
2019-02-10 11:14:51 +00:00
|
|
|
"""
|
|
|
|
matches = list(self.matcher(doc)) + list(self.phrase_matcher(doc))
|
|
|
|
matches = set(
|
|
|
|
[(m_id, start, end) for m_id, start, end in matches if start != end]
|
|
|
|
)
|
|
|
|
get_sort_key = lambda m: (m[2] - m[1], m[1])
|
|
|
|
matches = sorted(matches, key=get_sort_key, reverse=True)
|
|
|
|
entities = list(doc.ents)
|
|
|
|
new_entities = []
|
|
|
|
seen_tokens = set()
|
|
|
|
for match_id, start, end in matches:
|
|
|
|
if any(t.ent_type for t in doc[start:end]) and not self.overwrite:
|
|
|
|
continue
|
|
|
|
# check for end - 1 here because boundaries are inclusive
|
|
|
|
if start not in seen_tokens and end - 1 not in seen_tokens:
|
2019-10-25 09:16:42 +00:00
|
|
|
if match_id in self._ent_ids:
|
|
|
|
label, ent_id = self._ent_ids[match_id]
|
|
|
|
span = Span(doc, start, end, label=label)
|
2019-06-16 11:29:04 +00:00
|
|
|
if ent_id:
|
|
|
|
for token in span:
|
|
|
|
token.ent_id_ = ent_id
|
|
|
|
else:
|
|
|
|
span = Span(doc, start, end, label=match_id)
|
|
|
|
new_entities.append(span)
|
2019-02-10 11:14:51 +00:00
|
|
|
entities = [
|
|
|
|
e for e in entities if not (e.start < end and e.end > start)
|
|
|
|
]
|
|
|
|
seen_tokens.update(range(start, end))
|
|
|
|
doc.ents = entities + new_entities
|
|
|
|
return doc
|
|
|
|
|
|
|
|
@property
|
|
|
|
def labels(self):
|
|
|
|
"""All labels present in the match patterns.
|
2019-10-25 09:25:44 +00:00
|
|
|
|
2019-02-10 11:14:51 +00:00
|
|
|
RETURNS (set): The string labels.
|
2019-10-25 09:19:46 +00:00
|
|
|
|
2019-03-08 10:42:26 +00:00
|
|
|
DOCS: https://spacy.io/api/entityruler#labels
|
2019-02-10 11:14:51 +00:00
|
|
|
"""
|
2020-01-16 01:01:31 +00:00
|
|
|
keys = set(self.token_patterns.keys())
|
|
|
|
keys.update(self.phrase_patterns.keys())
|
|
|
|
all_labels = set()
|
|
|
|
|
|
|
|
for l in keys:
|
|
|
|
if self.ent_id_sep in l:
|
|
|
|
label, _ = self._split_label(l)
|
|
|
|
all_labels.add(label)
|
|
|
|
else:
|
|
|
|
all_labels.add(l)
|
2019-02-14 19:03:19 +00:00
|
|
|
return tuple(all_labels)
|
2019-02-10 11:14:51 +00:00
|
|
|
|
2019-06-16 11:29:04 +00:00
|
|
|
@property
|
|
|
|
def ent_ids(self):
|
2020-01-16 01:01:31 +00:00
|
|
|
"""All entity ids present in the match patterns `id` properties
|
2019-10-25 09:25:44 +00:00
|
|
|
|
2019-06-16 11:29:04 +00:00
|
|
|
RETURNS (set): The string entity ids.
|
2019-10-25 09:19:46 +00:00
|
|
|
|
2019-10-25 09:16:42 +00:00
|
|
|
DOCS: https://spacy.io/api/entityruler#ent_ids
|
2019-06-16 11:29:04 +00:00
|
|
|
"""
|
2020-01-16 01:01:31 +00:00
|
|
|
keys = set(self.token_patterns.keys())
|
|
|
|
keys.update(self.phrase_patterns.keys())
|
2019-06-16 11:29:04 +00:00
|
|
|
all_ent_ids = set()
|
2020-01-16 01:01:31 +00:00
|
|
|
|
|
|
|
for l in keys:
|
2019-06-16 11:29:04 +00:00
|
|
|
if self.ent_id_sep in l:
|
|
|
|
_, ent_id = self._split_label(l)
|
|
|
|
all_ent_ids.add(ent_id)
|
|
|
|
return tuple(all_ent_ids)
|
|
|
|
|
2019-02-10 11:14:51 +00:00
|
|
|
@property
|
|
|
|
def patterns(self):
|
|
|
|
"""Get all patterns that were added to the entity ruler.
|
2020-02-16 17:17:47 +00:00
|
|
|
|
2019-02-10 11:14:51 +00:00
|
|
|
RETURNS (list): The original patterns, one dictionary per pattern.
|
2019-10-25 09:19:46 +00:00
|
|
|
|
2019-03-08 10:42:26 +00:00
|
|
|
DOCS: https://spacy.io/api/entityruler#patterns
|
2019-02-10 11:14:51 +00:00
|
|
|
"""
|
|
|
|
all_patterns = []
|
|
|
|
for label, patterns in self.token_patterns.items():
|
|
|
|
for pattern in patterns:
|
2019-06-16 11:29:04 +00:00
|
|
|
ent_label, ent_id = self._split_label(label)
|
|
|
|
p = {"label": ent_label, "pattern": pattern}
|
|
|
|
if ent_id:
|
|
|
|
p["id"] = ent_id
|
|
|
|
all_patterns.append(p)
|
2019-02-10 11:14:51 +00:00
|
|
|
for label, patterns in self.phrase_patterns.items():
|
|
|
|
for pattern in patterns:
|
2019-06-16 11:29:04 +00:00
|
|
|
ent_label, ent_id = self._split_label(label)
|
|
|
|
p = {"label": ent_label, "pattern": pattern.text}
|
|
|
|
if ent_id:
|
|
|
|
p["id"] = ent_id
|
|
|
|
all_patterns.append(p)
|
|
|
|
|
2019-02-10 11:14:51 +00:00
|
|
|
return all_patterns
|
|
|
|
|
|
|
|
def add_patterns(self, patterns):
|
|
|
|
"""Add patterns to the entitiy ruler. A pattern can either be a token
|
|
|
|
pattern (list of dicts) or a phrase pattern (string). For example:
|
|
|
|
{'label': 'ORG', 'pattern': 'Apple'}
|
|
|
|
{'label': 'GPE', 'pattern': [{'lower': 'san'}, {'lower': 'francisco'}]}
|
2019-10-25 09:19:46 +00:00
|
|
|
|
2019-02-10 11:14:51 +00:00
|
|
|
patterns (list): The patterns to add.
|
2019-03-08 10:42:26 +00:00
|
|
|
|
|
|
|
DOCS: https://spacy.io/api/entityruler#add_patterns
|
2019-02-10 11:14:51 +00:00
|
|
|
"""
|
2020-02-16 17:17:47 +00:00
|
|
|
|
2019-09-27 18:57:13 +00:00
|
|
|
# disable the nlp components after this one in case they hadn't been initialized / deserialised yet
|
|
|
|
try:
|
|
|
|
current_index = self.nlp.pipe_names.index(self.name)
|
2019-10-18 09:27:38 +00:00
|
|
|
subsequent_pipes = [
|
|
|
|
pipe for pipe in self.nlp.pipe_names[current_index + 1 :]
|
|
|
|
]
|
2019-09-27 18:57:13 +00:00
|
|
|
except ValueError:
|
|
|
|
subsequent_pipes = []
|
2019-10-25 14:19:08 +00:00
|
|
|
with self.nlp.disable_pipes(subsequent_pipes):
|
2020-02-16 17:17:47 +00:00
|
|
|
token_patterns = []
|
|
|
|
phrase_pattern_labels = []
|
|
|
|
phrase_pattern_texts = []
|
|
|
|
phrase_pattern_ids = []
|
|
|
|
|
2019-09-27 18:57:13 +00:00
|
|
|
for entry in patterns:
|
2020-02-16 17:17:47 +00:00
|
|
|
if isinstance(entry["pattern"], basestring_):
|
|
|
|
phrase_pattern_labels.append(entry["label"])
|
|
|
|
phrase_pattern_texts.append(entry["pattern"])
|
|
|
|
phrase_pattern_ids.append(entry.get("id"))
|
|
|
|
elif isinstance(entry["pattern"], list):
|
|
|
|
token_patterns.append(entry)
|
|
|
|
|
|
|
|
phrase_patterns = []
|
|
|
|
for label, pattern, ent_id in zip(
|
|
|
|
phrase_pattern_labels,
|
|
|
|
self.nlp.pipe(phrase_pattern_texts),
|
|
|
|
phrase_pattern_ids
|
|
|
|
):
|
|
|
|
phrase_pattern = {
|
|
|
|
"label": label, "pattern": pattern, "id": ent_id
|
|
|
|
}
|
|
|
|
if ent_id:
|
|
|
|
phrase_pattern["id"] = ent_id
|
|
|
|
phrase_patterns.append(phrase_pattern)
|
|
|
|
|
|
|
|
for entry in token_patterns + phrase_patterns:
|
2019-09-27 18:57:13 +00:00
|
|
|
label = entry["label"]
|
|
|
|
if "id" in entry:
|
2019-10-25 09:16:42 +00:00
|
|
|
ent_label = label
|
2019-09-27 18:57:13 +00:00
|
|
|
label = self._create_label(label, entry["id"])
|
2019-10-25 09:16:42 +00:00
|
|
|
key = self.matcher._normalize_key(label)
|
|
|
|
self._ent_ids[key] = (ent_label, entry["id"])
|
|
|
|
|
2019-09-27 18:57:13 +00:00
|
|
|
pattern = entry["pattern"]
|
2020-02-16 17:17:47 +00:00
|
|
|
if isinstance(pattern, Doc):
|
|
|
|
self.phrase_patterns[label].append(pattern)
|
2019-09-27 18:57:13 +00:00
|
|
|
elif isinstance(pattern, list):
|
|
|
|
self.token_patterns[label].append(pattern)
|
|
|
|
else:
|
|
|
|
raise ValueError(Errors.E097.format(pattern=pattern))
|
|
|
|
for label, patterns in self.token_patterns.items():
|
2019-10-25 20:21:08 +00:00
|
|
|
self.matcher.add(label, patterns)
|
2019-09-27 18:57:13 +00:00
|
|
|
for label, patterns in self.phrase_patterns.items():
|
2019-10-25 20:21:08 +00:00
|
|
|
self.phrase_matcher.add(label, patterns)
|
2019-02-10 11:14:51 +00:00
|
|
|
|
2019-06-16 11:29:04 +00:00
|
|
|
def _split_label(self, label):
|
|
|
|
"""Split Entity label into ent_label and ent_id if it contains self.ent_id_sep
|
|
|
|
|
2020-02-16 17:17:47 +00:00
|
|
|
label (str): The value of label in a pattern entry
|
|
|
|
|
2019-06-16 11:29:04 +00:00
|
|
|
RETURNS (tuple): ent_label, ent_id
|
|
|
|
"""
|
|
|
|
if self.ent_id_sep in label:
|
|
|
|
ent_label, ent_id = label.rsplit(self.ent_id_sep, 1)
|
|
|
|
else:
|
|
|
|
ent_label = label
|
|
|
|
ent_id = None
|
|
|
|
|
|
|
|
return ent_label, ent_id
|
|
|
|
|
|
|
|
def _create_label(self, label, ent_id):
|
|
|
|
"""Join Entity label with ent_id if the pattern has an `id` attribute
|
2019-07-08 15:28:28 +00:00
|
|
|
|
2020-02-16 17:17:47 +00:00
|
|
|
label (str): The label to set for ent.label_
|
|
|
|
ent_id (str): The label
|
|
|
|
|
2019-06-16 11:29:04 +00:00
|
|
|
RETURNS (str): The ent_label joined with configured `ent_id_sep`
|
|
|
|
"""
|
|
|
|
if isinstance(ent_id, basestring_):
|
|
|
|
label = "{}{}{}".format(label, self.ent_id_sep, ent_id)
|
|
|
|
return label
|
|
|
|
|
2019-02-10 11:14:51 +00:00
|
|
|
def from_bytes(self, patterns_bytes, **kwargs):
|
|
|
|
"""Load the entity ruler from a bytestring.
|
|
|
|
|
|
|
|
patterns_bytes (bytes): The bytestring to load.
|
|
|
|
**kwargs: Other config paramters, mostly for consistency.
|
2020-02-16 17:17:47 +00:00
|
|
|
|
2019-10-25 09:16:42 +00:00
|
|
|
RETURNS (EntityRuler): The loaded entity ruler.
|
2019-10-25 09:19:46 +00:00
|
|
|
|
2019-03-08 10:42:26 +00:00
|
|
|
DOCS: https://spacy.io/api/entityruler#from_bytes
|
2019-02-10 11:14:51 +00:00
|
|
|
"""
|
2019-07-08 15:28:28 +00:00
|
|
|
cfg = srsly.msgpack_loads(patterns_bytes)
|
|
|
|
if isinstance(cfg, dict):
|
2019-07-10 10:03:05 +00:00
|
|
|
self.add_patterns(cfg.get("patterns", cfg))
|
|
|
|
self.overwrite = cfg.get("overwrite", False)
|
|
|
|
self.phrase_matcher_attr = cfg.get("phrase_matcher_attr", None)
|
2019-07-09 18:09:17 +00:00
|
|
|
if self.phrase_matcher_attr is not None:
|
2019-07-10 10:03:05 +00:00
|
|
|
self.phrase_matcher = PhraseMatcher(
|
|
|
|
self.nlp.vocab, attr=self.phrase_matcher_attr
|
|
|
|
)
|
|
|
|
self.ent_id_sep = cfg.get("ent_id_sep", DEFAULT_ENT_ID_SEP)
|
2019-07-08 15:28:28 +00:00
|
|
|
else:
|
|
|
|
self.add_patterns(cfg)
|
2019-02-10 11:14:51 +00:00
|
|
|
return self
|
|
|
|
|
|
|
|
def to_bytes(self, **kwargs):
|
|
|
|
"""Serialize the entity ruler patterns to a bytestring.
|
|
|
|
|
|
|
|
RETURNS (bytes): The serialized patterns.
|
2019-10-25 09:19:46 +00:00
|
|
|
|
2019-03-08 10:42:26 +00:00
|
|
|
DOCS: https://spacy.io/api/entityruler#to_bytes
|
2019-02-10 11:14:51 +00:00
|
|
|
"""
|
2019-07-08 15:28:28 +00:00
|
|
|
|
2019-07-10 10:03:05 +00:00
|
|
|
serial = OrderedDict(
|
|
|
|
(
|
|
|
|
("overwrite", self.overwrite),
|
|
|
|
("ent_id_sep", self.ent_id_sep),
|
|
|
|
("phrase_matcher_attr", self.phrase_matcher_attr),
|
|
|
|
("patterns", self.patterns),
|
|
|
|
)
|
|
|
|
)
|
2019-07-08 15:28:28 +00:00
|
|
|
return srsly.msgpack_dumps(serial)
|
2019-02-10 11:14:51 +00:00
|
|
|
|
|
|
|
def from_disk(self, path, **kwargs):
|
|
|
|
"""Load the entity ruler from a file. Expects a file containing
|
|
|
|
newline-delimited JSON (JSONL) with one entry per line.
|
|
|
|
|
|
|
|
path (unicode / Path): The JSONL file to load.
|
|
|
|
**kwargs: Other config paramters, mostly for consistency.
|
2020-02-16 17:17:47 +00:00
|
|
|
|
2019-10-25 09:16:42 +00:00
|
|
|
RETURNS (EntityRuler): The loaded entity ruler.
|
2019-10-25 09:19:46 +00:00
|
|
|
|
2019-03-08 10:42:26 +00:00
|
|
|
DOCS: https://spacy.io/api/entityruler#from_disk
|
2019-02-10 11:14:51 +00:00
|
|
|
"""
|
|
|
|
path = ensure_path(path)
|
2019-07-10 10:14:12 +00:00
|
|
|
depr_patterns_path = path.with_suffix(".jsonl")
|
|
|
|
if depr_patterns_path.is_file():
|
|
|
|
patterns = srsly.read_jsonl(depr_patterns_path)
|
2019-07-08 15:28:28 +00:00
|
|
|
self.add_patterns(patterns)
|
|
|
|
else:
|
|
|
|
cfg = {}
|
2019-11-21 15:26:37 +00:00
|
|
|
deserializers_patterns = {
|
2019-07-10 10:03:05 +00:00
|
|
|
"patterns": lambda p: self.add_patterns(
|
|
|
|
srsly.read_jsonl(p.with_suffix(".jsonl"))
|
2019-12-21 18:04:17 +00:00
|
|
|
)
|
2019-07-08 15:28:28 +00:00
|
|
|
}
|
2019-12-21 18:04:17 +00:00
|
|
|
deserializers_cfg = {"cfg": lambda p: cfg.update(srsly.read_json(p))}
|
2019-11-21 15:26:37 +00:00
|
|
|
from_disk(path, deserializers_cfg, {})
|
2019-07-10 10:03:05 +00:00
|
|
|
self.overwrite = cfg.get("overwrite", False)
|
|
|
|
self.phrase_matcher_attr = cfg.get("phrase_matcher_attr")
|
|
|
|
self.ent_id_sep = cfg.get("ent_id_sep", DEFAULT_ENT_ID_SEP)
|
2019-07-09 18:09:17 +00:00
|
|
|
|
|
|
|
if self.phrase_matcher_attr is not None:
|
2019-07-10 10:03:05 +00:00
|
|
|
self.phrase_matcher = PhraseMatcher(
|
|
|
|
self.nlp.vocab, attr=self.phrase_matcher_attr
|
|
|
|
)
|
2019-11-21 15:26:37 +00:00
|
|
|
from_disk(path, deserializers_patterns, {})
|
2019-02-10 11:14:51 +00:00
|
|
|
return self
|
|
|
|
|
|
|
|
def to_disk(self, path, **kwargs):
|
|
|
|
"""Save the entity ruler patterns to a directory. The patterns will be
|
|
|
|
saved as newline-delimited JSON (JSONL).
|
|
|
|
|
2019-07-10 10:25:45 +00:00
|
|
|
path (unicode / Path): The JSONL file to save.
|
2019-02-10 11:14:51 +00:00
|
|
|
**kwargs: Other config paramters, mostly for consistency.
|
2019-03-08 10:42:26 +00:00
|
|
|
|
2019-03-23 14:45:02 +00:00
|
|
|
DOCS: https://spacy.io/api/entityruler#to_disk
|
2019-02-10 11:14:51 +00:00
|
|
|
"""
|
2019-07-10 10:25:45 +00:00
|
|
|
path = ensure_path(path)
|
2019-07-10 10:03:05 +00:00
|
|
|
cfg = {
|
|
|
|
"overwrite": self.overwrite,
|
|
|
|
"phrase_matcher_attr": self.phrase_matcher_attr,
|
|
|
|
"ent_id_sep": self.ent_id_sep,
|
|
|
|
}
|
2019-07-08 15:28:28 +00:00
|
|
|
serializers = {
|
2019-07-10 10:03:05 +00:00
|
|
|
"patterns": lambda p: srsly.write_jsonl(
|
|
|
|
p.with_suffix(".jsonl"), self.patterns
|
|
|
|
),
|
|
|
|
"cfg": lambda p: srsly.write_json(p, cfg),
|
2019-07-08 15:28:28 +00:00
|
|
|
}
|
2019-07-10 10:25:45 +00:00
|
|
|
if path.suffix == ".jsonl": # user wants to save only JSONL
|
|
|
|
srsly.write_jsonl(path, self.patterns)
|
|
|
|
else:
|
|
|
|
to_disk(path, serializers, {})
|