2019-08-29 10:04:01 +00:00
|
|
|
from wasabi import Printer
|
|
|
|
|
2020-10-04 09:16:31 +00:00
|
|
|
from ...errors import Errors
|
2023-06-14 15:48:41 +00:00
|
|
|
from ...tokens import Doc, Span
|
|
|
|
from ...training import iob_to_biluo
|
|
|
|
from ...util import get_lang_class, load_model
|
|
|
|
from .. import tags_to_entities
|
2017-10-10 03:03:26 +00:00
|
|
|
|
|
|
|
|
2020-09-22 09:50:19 +00:00
|
|
|
def conll_ner_to_docs(
|
2019-10-24 14:21:08 +00:00
|
|
|
input_data, n_sents=10, seg_sents=False, model=None, no_print=False, **kwargs
|
|
|
|
):
|
2017-10-10 03:03:26 +00:00
|
|
|
"""
|
2019-08-29 10:04:01 +00:00
|
|
|
Convert files in the CoNLL-2003 NER format and similar
|
2020-06-26 17:34:12 +00:00
|
|
|
whitespace-separated columns into Doc objects.
|
2019-08-29 10:04:01 +00:00
|
|
|
|
|
|
|
The first column is the tokens, the final column is the IOB tags. If an
|
|
|
|
additional second column is present, the second column is the tags.
|
|
|
|
|
|
|
|
Sentences are separated with whitespace and documents can be separated
|
|
|
|
using the line "-DOCSTART- -X- O O".
|
|
|
|
|
|
|
|
Sample format:
|
|
|
|
|
|
|
|
-DOCSTART- -X- O O
|
|
|
|
|
|
|
|
I O
|
|
|
|
like O
|
|
|
|
London B-GPE
|
|
|
|
and O
|
|
|
|
New B-GPE
|
|
|
|
York I-GPE
|
|
|
|
City I-GPE
|
|
|
|
. O
|
|
|
|
|
2017-10-10 03:03:26 +00:00
|
|
|
"""
|
2019-10-18 16:12:59 +00:00
|
|
|
msg = Printer(no_print=no_print)
|
2019-08-29 10:04:01 +00:00
|
|
|
doc_delimiter = "-DOCSTART- -X- O O"
|
|
|
|
# check for existing delimiters, which should be preserved
|
|
|
|
if "\n\n" in input_data and seg_sents:
|
2019-08-31 11:39:06 +00:00
|
|
|
msg.warn(
|
|
|
|
"Sentence boundaries found, automatic sentence segmentation with "
|
|
|
|
"`-s` disabled."
|
|
|
|
)
|
2019-08-29 10:04:01 +00:00
|
|
|
seg_sents = False
|
|
|
|
if doc_delimiter in input_data and n_sents:
|
2019-08-31 11:39:06 +00:00
|
|
|
msg.warn(
|
|
|
|
"Document delimiters found, automatic document segmentation with "
|
|
|
|
"`-n` disabled."
|
|
|
|
)
|
2019-08-29 10:04:01 +00:00
|
|
|
n_sents = 0
|
|
|
|
# do document segmentation with existing sentences
|
2019-08-31 11:39:06 +00:00
|
|
|
if "\n\n" in input_data and doc_delimiter not in input_data and n_sents:
|
2019-08-29 10:04:01 +00:00
|
|
|
n_sents_info(msg, n_sents)
|
|
|
|
input_data = segment_docs(input_data, n_sents, doc_delimiter)
|
|
|
|
# do sentence segmentation with existing documents
|
2019-08-31 11:39:06 +00:00
|
|
|
if "\n\n" not in input_data and doc_delimiter in input_data and seg_sents:
|
2019-08-29 10:04:01 +00:00
|
|
|
input_data = segment_sents_and_docs(input_data, 0, "", model=model, msg=msg)
|
|
|
|
# do both sentence segmentation and document segmentation according
|
|
|
|
# to options
|
2019-08-31 11:39:06 +00:00
|
|
|
if "\n\n" not in input_data and doc_delimiter not in input_data:
|
2019-08-29 10:04:01 +00:00
|
|
|
# sentence segmentation required for document segmentation
|
|
|
|
if n_sents > 0 and not seg_sents:
|
2019-08-31 11:39:06 +00:00
|
|
|
msg.warn(
|
2019-12-22 00:53:56 +00:00
|
|
|
f"No sentence boundaries found to use with option `-n {n_sents}`. "
|
|
|
|
f"Use `-s` to automatically segment sentences or `-n 0` "
|
|
|
|
f"to disable."
|
2019-08-31 11:39:06 +00:00
|
|
|
)
|
2019-08-29 10:04:01 +00:00
|
|
|
else:
|
|
|
|
n_sents_info(msg, n_sents)
|
2019-08-31 11:39:06 +00:00
|
|
|
input_data = segment_sents_and_docs(
|
|
|
|
input_data, n_sents, doc_delimiter, model=model, msg=msg
|
|
|
|
)
|
2019-08-29 10:04:01 +00:00
|
|
|
# provide warnings for problematic data
|
2019-08-31 11:39:06 +00:00
|
|
|
if "\n\n" not in input_data:
|
|
|
|
msg.warn(
|
|
|
|
"No sentence boundaries found. Use `-s` to automatically segment "
|
|
|
|
"sentences."
|
|
|
|
)
|
|
|
|
if doc_delimiter not in input_data:
|
|
|
|
msg.warn(
|
|
|
|
"No document delimiters found. Use `-n` to automatically group "
|
|
|
|
"sentences into documents."
|
|
|
|
)
|
2020-06-26 17:34:12 +00:00
|
|
|
|
|
|
|
if model:
|
|
|
|
nlp = load_model(model)
|
|
|
|
else:
|
2020-10-04 09:10:26 +00:00
|
|
|
nlp = get_lang_class("xx")()
|
2020-06-26 17:34:12 +00:00
|
|
|
for conll_doc in input_data.strip().split(doc_delimiter):
|
|
|
|
conll_doc = conll_doc.strip()
|
|
|
|
if not conll_doc:
|
2017-10-10 03:03:26 +00:00
|
|
|
continue
|
2020-06-26 17:34:12 +00:00
|
|
|
words = []
|
|
|
|
sent_starts = []
|
|
|
|
pos_tags = []
|
|
|
|
biluo_tags = []
|
|
|
|
for conll_sent in conll_doc.split("\n\n"):
|
|
|
|
conll_sent = conll_sent.strip()
|
|
|
|
if not conll_sent:
|
2017-10-10 03:03:26 +00:00
|
|
|
continue
|
2020-06-26 17:34:12 +00:00
|
|
|
lines = [line.strip() for line in conll_sent.split("\n") if line.strip()]
|
2019-08-29 10:04:01 +00:00
|
|
|
cols = list(zip(*[line.split() for line in lines]))
|
|
|
|
if len(cols) < 2:
|
2020-10-05 11:43:32 +00:00
|
|
|
raise ValueError(Errors.E903)
|
2020-06-26 17:34:12 +00:00
|
|
|
length = len(cols[0])
|
|
|
|
words.extend(cols[0])
|
|
|
|
sent_starts.extend([True] + [False] * (length - 1))
|
|
|
|
biluo_tags.extend(iob_to_biluo(cols[-1]))
|
|
|
|
pos_tags.extend(cols[1] if len(cols) > 2 else ["-"] * length)
|
|
|
|
|
|
|
|
doc = Doc(nlp.vocab, words=words)
|
|
|
|
for i, token in enumerate(doc):
|
|
|
|
token.tag_ = pos_tags[i]
|
|
|
|
token.is_sent_start = sent_starts[i]
|
|
|
|
entities = tags_to_entities(biluo_tags)
|
|
|
|
doc.ents = [Span(doc, start=s, end=e + 1, label=L) for L, s, e in entities]
|
2020-12-15 08:47:16 +00:00
|
|
|
yield doc
|
2019-08-29 10:04:01 +00:00
|
|
|
|
|
|
|
|
|
|
|
def segment_sents_and_docs(doc, n_sents, doc_delimiter, model=None, msg=None):
|
|
|
|
sentencizer = None
|
|
|
|
if model:
|
|
|
|
nlp = load_model(model)
|
|
|
|
if "parser" in nlp.pipe_names:
|
2019-12-22 00:53:56 +00:00
|
|
|
msg.info(f"Segmenting sentences with parser from model '{model}'.")
|
2021-04-08 10:25:03 +00:00
|
|
|
for name, proc in nlp.pipeline:
|
|
|
|
if "parser" in getattr(proc, "listening_components", []):
|
|
|
|
nlp.replace_listeners(name, "parser", ["model.tok2vec"])
|
2019-08-29 10:04:01 +00:00
|
|
|
sentencizer = nlp.get_pipe("parser")
|
|
|
|
if not sentencizer:
|
2019-08-31 11:39:06 +00:00
|
|
|
msg.info(
|
|
|
|
"Segmenting sentences with sentencizer. (Use `-b model` for "
|
|
|
|
"improved parser-based sentence segmentation.)"
|
|
|
|
)
|
2020-10-04 09:10:26 +00:00
|
|
|
nlp = get_lang_class("xx")()
|
2019-08-29 10:04:01 +00:00
|
|
|
sentencizer = nlp.create_pipe("sentencizer")
|
|
|
|
lines = doc.strip().split("\n")
|
|
|
|
words = [line.strip().split()[0] for line in lines]
|
|
|
|
nlpdoc = Doc(nlp.vocab, words=words)
|
|
|
|
sentencizer(nlpdoc)
|
|
|
|
lines_with_segs = []
|
|
|
|
sent_count = 0
|
|
|
|
for i, token in enumerate(nlpdoc):
|
|
|
|
if token.is_sent_start:
|
|
|
|
if n_sents and sent_count % n_sents == 0:
|
|
|
|
lines_with_segs.append(doc_delimiter)
|
|
|
|
lines_with_segs.append("")
|
|
|
|
sent_count += 1
|
|
|
|
lines_with_segs.append(lines[i])
|
|
|
|
return "\n".join(lines_with_segs)
|
|
|
|
|
|
|
|
|
|
|
|
def segment_docs(input_data, n_sents, doc_delimiter):
|
|
|
|
sent_delimiter = "\n\n"
|
|
|
|
sents = input_data.split(sent_delimiter)
|
2019-08-31 11:39:06 +00:00
|
|
|
docs = [sents[i : i + n_sents] for i in range(0, len(sents), n_sents)]
|
2019-08-29 10:04:01 +00:00
|
|
|
input_data = ""
|
|
|
|
for doc in docs:
|
|
|
|
input_data += sent_delimiter + doc_delimiter
|
|
|
|
input_data += sent_delimiter.join(doc)
|
|
|
|
return input_data
|
|
|
|
|
|
|
|
|
|
|
|
def n_sents_info(msg, n_sents):
|
2019-12-22 00:53:56 +00:00
|
|
|
msg.info(f"Grouping every {n_sents} sentences into a document.")
|
2019-08-29 10:04:01 +00:00
|
|
|
if n_sents == 1:
|
2019-08-31 11:39:06 +00:00
|
|
|
msg.warn(
|
|
|
|
"To generate better training data, you may want to group "
|
|
|
|
"sentences into documents with `-n 10`."
|
|
|
|
)
|