first stab at model - not functional yet

This commit is contained in:
svlandeg 2019-05-09 17:23:19 +02:00
parent 9f33732b96
commit c6ca8649d7
2 changed files with 158 additions and 41 deletions

View File

@ -6,53 +6,168 @@ import datetime
from os import listdir from os import listdir
from examples.pipeline.wiki_entity_linking import run_el, training_set_creator, kb_creator from examples.pipeline.wiki_entity_linking import run_el, training_set_creator, kb_creator
from examples.pipeline.wiki_entity_linking import wikidata_processor as wd
from spacy._ml import SpacyVectors, create_default_optimizer, zero_init
from thinc.api import chain
from thinc.v2v import Model, Maxout, Softmax, Affine, ReLu
from thinc.api import flatten_add_lengths
from thinc.t2v import Pooling, sum_pool, mean_pool
from thinc.t2t import ExtractWindow, ParametricAttention
from thinc.misc import Residual
""" TODO: this code needs to be implemented in pipes.pyx""" """ TODO: this code needs to be implemented in pipes.pyx"""
def train_model(kb, nlp, training_dir, entity_descr_output, limit=None): class EL_Model():
run_el._prepare_pipeline(nlp, kb)
correct_entries, incorrect_entries = training_set_creator.read_training_entities(training_output=training_dir, labels = ["MATCH", "NOMATCH"]
collect_correct=True, name = "entity_linker"
collect_incorrect=True)
entities = kb.get_entity_strings() def __init__(self, kb, nlp):
run_el._prepare_pipeline(nlp, kb)
self.nlp = nlp
self.kb = kb
id_to_descr = kb_creator._get_id_to_description(entity_descr_output) self.entity_encoder = self._simple_encoder(width=300)
self.article_encoder = self._simple_encoder(width=300)
cnt = 0 def train_model(self, training_dir, entity_descr_output, limit=None, to_print=True):
for f in listdir(training_dir): instances, gold_vectors, entity_descriptions, doc_by_article = self._get_training_data(training_dir,
if not limit or cnt < limit: entity_descr_output,
if not run_el.is_dev(f): limit, to_print)
article_id = f.replace(".txt", "")
if cnt % 500 == 0: if to_print:
print(datetime.datetime.now(), "processed", cnt, "files in the dev dataset") print("Training on", len(gold_vectors), "instances")
cnt += 1 print(" - pos:", len([x for x in gold_vectors if x]), "instances")
with open(os.path.join(training_dir, f), mode="r", encoding='utf8') as file: print(" - pos:", len([x for x in gold_vectors if not x]), "instances")
text = file.read() print()
print()
doc = nlp(text) self.sgd_entity = self.begin_training(self.entity_encoder)
doc_vector = doc.vector self.sgd_article = self.begin_training(self.article_encoder)
print("FILE", f, len(doc_vector), "D vector")
losses = {}
for inst, label, entity_descr in zip(instances, gold_vectors, entity_descriptions):
article = inst.split(sep="_")[0]
entity_id = inst.split(sep="_")[1]
article_doc = doc_by_article[article]
self.update(article_doc, entity_descr, label, losses=losses)
def _simple_encoder(self, width):
with Model.define_operators({">>": chain}):
encoder = SpacyVectors \
>> flatten_add_lengths \
>> ParametricAttention(width)\
>> Pooling(sum_pool) \
>> Residual(zero_init(Maxout(width, width)))
return encoder
def begin_training(self, model):
# TODO ? link_vectors_to_models(self.vocab)
sgd = create_default_optimizer(model.ops)
return sgd
def update(self, article_doc, entity_descr, label, drop=0., losses=None):
entity_encoding, entity_bp = self.entity_encoder.begin_update([entity_descr], drop=drop)
doc_encoding, article_bp = self.article_encoder.begin_update([article_doc], drop=drop)
# print("entity/article output dim", len(entity_encoding[0]), len(doc_encoding[0]))
mse, diffs = self._calculate_similarity(entity_encoding, doc_encoding)
# print()
# TODO: proper backpropagation taking ranking of elements into account ?
# TODO backpropagation also for negative examples
if label:
entity_bp(diffs, sgd=self.sgd_entity)
article_bp(diffs, sgd=self.sgd_article)
print(mse)
# TODO delete ?
def _simple_cnn_model(self, internal_dim):
nr_class = len(self.labels)
with Model.define_operators({">>": chain}):
model_entity = SpacyVectors >> flatten_add_lengths >> Pooling(mean_pool) # entity encoding
model_doc = SpacyVectors >> flatten_add_lengths >> Pooling(mean_pool) # doc encoding
output_layer = Softmax(nr_class, internal_dim*2)
model = (model_entity | model_doc) >> output_layer
# model.tok2vec = chain(tok2vec, flatten)
model.nO = nr_class
return model
def predict(self, entity_doc, article_doc):
entity_encoding = self.entity_encoder(entity_doc)
doc_encoding = self.article_encoder(article_doc)
print("entity_encodings", len(entity_encoding), entity_encoding)
print("doc_encodings", len(doc_encoding), doc_encoding)
mse, diffs = self._calculate_similarity(entity_encoding, doc_encoding)
print("mse", mse)
return mse
def _calculate_similarity(self, vector1, vector2):
if len(vector1) != len(vector2):
raise ValueError("To calculate similarity, both vectors should be of equal length")
diffs = (vector2 - vector1)
error_sum = (diffs ** 2).sum(axis=1)
mean_square_error = error_sum / len(vector1)
return float(mean_square_error), diffs
def _get_labels(self):
return tuple(self.labels)
def _get_training_data(self, training_dir, entity_descr_output, limit, to_print):
id_to_descr = kb_creator._get_id_to_description(entity_descr_output)
correct_entries, incorrect_entries = training_set_creator.read_training_entities(training_output=training_dir,
collect_correct=True,
collect_incorrect=True)
instances = list()
entity_descriptions = list()
local_vectors = list() # TODO: local vectors
gold_vectors = list()
doc_by_article = dict()
cnt = 0
for f in listdir(training_dir):
if not limit or cnt < limit:
if not run_el.is_dev(f):
article_id = f.replace(".txt", "")
if cnt % 500 == 0 and to_print:
print(datetime.datetime.now(), "processed", cnt, "files in the dev dataset")
cnt += 1
if article_id not in doc_by_article:
with open(os.path.join(training_dir, f), mode="r", encoding='utf8') as file:
text = file.read()
doc = self.nlp(text)
doc_by_article[article_id] = doc
for mention_pos, entity_pos in correct_entries[article_id].items(): for mention_pos, entity_pos in correct_entries[article_id].items():
descr = id_to_descr.get(entity_pos) descr = id_to_descr.get(entity_pos)
if descr: if descr:
doc_descr = nlp(descr) instances.append(article_id + "_" + entity_pos)
descr_vector = doc_descr.vector doc = self.nlp(descr)
print("GOLD POS", mention_pos, entity_pos, len(descr_vector), "D vector") entity_descriptions.append(doc)
gold_vectors.append(True)
for mention_neg, entity_negs in incorrect_entries[article_id].items(): for mention_neg, entity_negs in incorrect_entries[article_id].items():
for entity_neg in entity_negs: for entity_neg in entity_negs:
descr = id_to_descr.get(entity_neg) descr = id_to_descr.get(entity_neg)
if descr: if descr:
doc_descr = nlp(descr) instances.append(article_id + "_" + entity_neg)
descr_vector = doc_descr.vector doc = self.nlp(descr)
print("GOLD NEG", mention_neg, entity_neg, len(descr_vector), "D vector") entity_descriptions.append(doc)
gold_vectors.append(False)
print()
print("Processed", cnt, "dev articles")
print()
if to_print:
print()
print("Processed", cnt, "dev articles")
print()
return instances, gold_vectors, entity_descriptions, doc_by_article

View File

@ -1,7 +1,8 @@
# coding: utf-8 # coding: utf-8
from __future__ import unicode_literals from __future__ import unicode_literals
from examples.pipeline.wiki_entity_linking import wikipedia_processor as wp, kb_creator, training_set_creator, run_el, train_el from examples.pipeline.wiki_entity_linking import wikipedia_processor as wp, kb_creator, training_set_creator, run_el
from examples.pipeline.wiki_entity_linking.train_el import EL_Model
import spacy import spacy
from spacy.vocab import Vocab from spacy.vocab import Vocab
@ -31,17 +32,17 @@ if __name__ == "__main__":
# one-time methods to create KB and write to file # one-time methods to create KB and write to file
to_create_prior_probs = False to_create_prior_probs = False
to_create_entity_counts = False to_create_entity_counts = False
to_create_kb = True to_create_kb = False
# read KB back in from file # read KB back in from file
to_read_kb = True to_read_kb = True
to_test_kb = True to_test_kb = False
# create training dataset # create training dataset
create_wp_training = False create_wp_training = False
# run training # run training
run_training = False run_training = True
# apply named entity linking to the dev dataset # apply named entity linking to the dev dataset
apply_to_dev = False apply_to_dev = False
@ -105,16 +106,17 @@ if __name__ == "__main__":
print("STEP 5: create training dataset", datetime.datetime.now()) print("STEP 5: create training dataset", datetime.datetime.now())
training_set_creator.create_training(kb=my_kb, entity_def_input=ENTITY_DEFS, training_output=TRAINING_DIR) training_set_creator.create_training(kb=my_kb, entity_def_input=ENTITY_DEFS, training_output=TRAINING_DIR)
# STEP 7: apply the EL algorithm on the training dataset # STEP 6: apply the EL algorithm on the training dataset
if run_training: if run_training:
print("STEP 6: training ", datetime.datetime.now()) print("STEP 6: training ", datetime.datetime.now())
my_nlp = spacy.load('en_core_web_sm') my_nlp = spacy.load('en_core_web_md')
train_el.train_model(kb=my_kb, nlp=my_nlp, training_dir=TRAINING_DIR, entity_descr_output=ENTITY_DESCR, limit=5) trainer = EL_Model(kb=my_kb, nlp=my_nlp)
trainer.train_model(training_dir=TRAINING_DIR, entity_descr_output=ENTITY_DESCR, limit=50)
print() print()
# STEP 8: apply the EL algorithm on the dev dataset # STEP 7: apply the EL algorithm on the dev dataset
if apply_to_dev: if apply_to_dev:
my_nlp = spacy.load('en_core_web_sm') my_nlp = spacy.load('en_core_web_md')
run_el.run_el_dev(kb=my_kb, nlp=my_nlp, training_dir=TRAINING_DIR, limit=2000) run_el.run_el_dev(kb=my_kb, nlp=my_nlp, training_dir=TRAINING_DIR, limit=2000)
print() print()