2017-04-07 11:05:12 +00:00
|
|
|
# coding: utf8
|
2017-04-13 11:51:54 +00:00
|
|
|
from __future__ import unicode_literals
|
2017-04-07 11:05:12 +00:00
|
|
|
|
2018-04-03 13:50:31 +00:00
|
|
|
from .._messages import Messages
|
2017-05-07 21:25:29 +00:00
|
|
|
from ...compat import json_dumps, path2str
|
|
|
|
from ...util import prints
|
2018-07-25 20:21:31 +00:00
|
|
|
from ...gold import iob_to_biluo
|
|
|
|
import re
|
2017-04-07 11:05:12 +00:00
|
|
|
|
|
|
|
|
2018-08-14 12:04:32 +00:00
|
|
|
def conllu2json(input_path, output_path, n_sents=10, use_morphology=False, lang=None):
|
2018-07-25 20:21:31 +00:00
|
|
|
|
2017-04-15 09:59:21 +00:00
|
|
|
"""
|
|
|
|
Convert conllu files into JSON format for use with train cli.
|
2017-04-07 11:05:12 +00:00
|
|
|
use_morphology parameter enables appending morphology to tags, which is
|
|
|
|
useful for languages such as Spanish, where UD tags are not so rich.
|
|
|
|
"""
|
|
|
|
# by @dvsrepo, via #11 explosion/spacy-dev-resources
|
|
|
|
|
2018-07-25 20:21:31 +00:00
|
|
|
"""
|
|
|
|
Extract NER tags if available and convert them so that they follow
|
|
|
|
BILUO and the Wikipedia scheme
|
|
|
|
"""
|
|
|
|
# by @katarkor
|
|
|
|
|
2017-04-07 11:05:12 +00:00
|
|
|
docs = []
|
|
|
|
sentences = []
|
|
|
|
conll_tuples = read_conllx(input_path, use_morphology=use_morphology)
|
2018-07-25 20:21:31 +00:00
|
|
|
checked_for_ner = False
|
|
|
|
has_ner_tags = False
|
2017-04-07 11:05:12 +00:00
|
|
|
|
|
|
|
for i, (raw_text, tokens) in enumerate(conll_tuples):
|
|
|
|
sentence, brackets = tokens[0]
|
2018-07-25 20:21:31 +00:00
|
|
|
if not checked_for_ner:
|
|
|
|
has_ner_tags = is_ner(sentence[5][0])
|
|
|
|
checked_for_ner = True
|
|
|
|
sentences.append(generate_sentence(sentence, has_ner_tags))
|
2017-04-07 11:05:12 +00:00
|
|
|
# Real-sized documents could be extracted using the comments on the
|
|
|
|
# conluu document
|
2018-07-25 20:21:31 +00:00
|
|
|
|
2017-04-07 11:05:12 +00:00
|
|
|
if(len(sentences) % n_sents == 0):
|
|
|
|
doc = create_doc(sentences, i)
|
|
|
|
docs.append(doc)
|
|
|
|
sentences = []
|
|
|
|
|
2017-11-01 20:26:49 +00:00
|
|
|
output_filename = input_path.parts[-1].replace(".conll", ".json")
|
2017-04-07 11:05:12 +00:00
|
|
|
output_filename = input_path.parts[-1].replace(".conllu", ".json")
|
|
|
|
output_file = output_path / output_filename
|
2017-04-19 09:50:33 +00:00
|
|
|
with output_file.open('w', encoding='utf-8') as f:
|
|
|
|
f.write(json_dumps(docs))
|
2018-04-03 13:50:31 +00:00
|
|
|
prints(Messages.M033.format(n_docs=len(docs)),
|
|
|
|
title=Messages.M032.format(name=path2str(output_file)))
|
2017-04-07 11:05:12 +00:00
|
|
|
|
|
|
|
|
2018-07-25 20:21:31 +00:00
|
|
|
def is_ner(tag):
|
|
|
|
|
|
|
|
"""
|
|
|
|
Check the 10th column of the first token to determine if the file contains
|
|
|
|
NER tags
|
|
|
|
"""
|
|
|
|
|
|
|
|
tag_match = re.match('([A-Z_]+)-([A-Z_]+)', tag)
|
|
|
|
if tag_match:
|
|
|
|
return True
|
|
|
|
elif tag == "O":
|
|
|
|
return True
|
|
|
|
else:
|
|
|
|
return False
|
|
|
|
|
2017-04-07 11:05:12 +00:00
|
|
|
def read_conllx(input_path, use_morphology=False, n=0):
|
|
|
|
text = input_path.open('r', encoding='utf-8').read()
|
|
|
|
i = 0
|
|
|
|
for sent in text.strip().split('\n\n'):
|
|
|
|
lines = sent.strip().split('\n')
|
|
|
|
if lines:
|
|
|
|
while lines[0].startswith('#'):
|
|
|
|
lines.pop(0)
|
|
|
|
tokens = []
|
|
|
|
for line in lines:
|
|
|
|
|
2017-05-17 11:13:48 +00:00
|
|
|
parts = line.split('\t')
|
2018-07-25 20:21:31 +00:00
|
|
|
id_, word, lemma, pos, tag, morph, head, dep, _1, iob = parts
|
2017-04-07 11:05:12 +00:00
|
|
|
if '-' in id_ or '.' in id_:
|
|
|
|
continue
|
|
|
|
try:
|
|
|
|
id_ = int(id_) - 1
|
|
|
|
head = (int(head) - 1) if head != '0' else id_
|
|
|
|
dep = 'ROOT' if dep == 'root' else dep
|
2017-11-01 20:25:33 +00:00
|
|
|
tag = pos if tag == '_' else tag
|
2017-05-17 10:04:33 +00:00
|
|
|
tag = tag+'__'+morph if use_morphology else tag
|
2018-07-25 20:21:31 +00:00
|
|
|
tokens.append((id_, word, tag, head, dep, iob))
|
2017-04-07 11:05:12 +00:00
|
|
|
except:
|
|
|
|
print(line)
|
|
|
|
raise
|
|
|
|
tuples = [list(t) for t in zip(*tokens)]
|
|
|
|
yield (None, [[tuples, []]])
|
|
|
|
i += 1
|
|
|
|
if n >= 1 and i >= n:
|
|
|
|
break
|
|
|
|
|
2018-07-25 20:21:31 +00:00
|
|
|
def simplify_tags(iob):
|
|
|
|
|
|
|
|
"""
|
|
|
|
Simplify tags obtained from the dataset in order to follow Wikipedia
|
|
|
|
scheme (PER, LOC, ORG, MISC). 'PER', 'LOC' and 'ORG' keep their tags, while
|
|
|
|
'GPE_LOC' is simplified to 'LOC', 'GPE_ORG' to 'ORG' and all remaining tags to
|
|
|
|
'MISC'.
|
|
|
|
"""
|
|
|
|
|
|
|
|
new_iob = []
|
|
|
|
for tag in iob:
|
|
|
|
tag_match = re.match('([A-Z_]+)-([A-Z_]+)', tag)
|
|
|
|
if tag_match:
|
|
|
|
prefix = tag_match.group(1)
|
|
|
|
suffix = tag_match.group(2)
|
|
|
|
if suffix == 'GPE_LOC':
|
|
|
|
suffix = 'LOC'
|
|
|
|
elif suffix == 'GPE_ORG':
|
|
|
|
suffix = 'ORG'
|
|
|
|
elif suffix != 'PER' and suffix != 'LOC' and suffix != 'ORG':
|
|
|
|
suffix = 'MISC'
|
|
|
|
tag = prefix + '-' + suffix
|
|
|
|
new_iob.append(tag)
|
|
|
|
return new_iob
|
2017-04-07 11:05:12 +00:00
|
|
|
|
2018-07-25 20:21:31 +00:00
|
|
|
def generate_sentence(sent, has_ner_tags):
|
|
|
|
(id_, word, tag, head, dep, iob) = sent
|
2017-04-07 11:05:12 +00:00
|
|
|
sentence = {}
|
|
|
|
tokens = []
|
2018-07-25 20:21:31 +00:00
|
|
|
if has_ner_tags:
|
|
|
|
iob = simplify_tags(iob)
|
|
|
|
biluo = iob_to_biluo(iob)
|
2017-04-07 11:05:12 +00:00
|
|
|
for i, id in enumerate(id_):
|
|
|
|
token = {}
|
2018-07-25 20:21:31 +00:00
|
|
|
token["id"] = id
|
2017-06-09 20:53:56 +00:00
|
|
|
token["orth"] = word[i]
|
|
|
|
token["tag"] = tag[i]
|
|
|
|
token["head"] = head[i] - id
|
|
|
|
token["dep"] = dep[i]
|
2018-07-25 20:21:31 +00:00
|
|
|
if has_ner_tags:
|
|
|
|
token["ner"] = biluo[i]
|
2017-04-07 11:05:12 +00:00
|
|
|
tokens.append(token)
|
|
|
|
sentence["tokens"] = tokens
|
|
|
|
return sentence
|
|
|
|
|
|
|
|
|
|
|
|
def create_doc(sentences,id):
|
|
|
|
doc = {}
|
|
|
|
paragraph = {}
|
|
|
|
doc["id"] = id
|
|
|
|
doc["paragraphs"] = []
|
|
|
|
paragraph["sentences"] = sentences
|
|
|
|
doc["paragraphs"].append(paragraph)
|
|
|
|
return doc
|