From 8fdb9bc278b950436d6a6d28cb27ef093fca9560 Mon Sep 17 00:00:00 2001 From: Matthew Honnibal Date: Thu, 15 Nov 2018 22:17:16 +0100 Subject: [PATCH] =?UTF-8?q?=F0=9F=92=AB=20Add=20experimental=20ULMFit/BERT?= =?UTF-8?q?/Elmo-like=20pretraining=20=20(#2931)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Add 'spacy pretrain' command * Fix pretrain command for Python 2 * Fix pretrain command * Fix pretrain command --- spacy/__main__.py | 3 +- spacy/cli/__init__.py | 1 + spacy/cli/pretrain.py | 188 ++++++++++++++++++++++++++++++++++++++++++ spacy/cli/train.py | 21 ++++- 4 files changed, 211 insertions(+), 2 deletions(-) create mode 100644 spacy/cli/pretrain.py diff --git a/spacy/__main__.py b/spacy/__main__.py index 897d890c2..5d712ea15 100644 --- a/spacy/__main__.py +++ b/spacy/__main__.py @@ -6,7 +6,7 @@ from __future__ import print_function if __name__ == '__main__': import plac import sys - from spacy.cli import download, link, info, package, train, convert + from spacy.cli import download, link, info, package, train, pretrain, convert from spacy.cli import vocab, init_model, profile, evaluate, validate from spacy.cli import ud_train, ud_evaluate from spacy.util import prints @@ -16,6 +16,7 @@ if __name__ == '__main__': 'link': link, 'info': info, 'train': train, + 'pretrain': pretrain, 'ud-train': ud_train, 'evaluate': evaluate, 'ud-evaluate': ud_evaluate, diff --git a/spacy/cli/__init__.py b/spacy/cli/__init__.py index 2788ffc86..5497c55ce 100644 --- a/spacy/cli/__init__.py +++ b/spacy/cli/__init__.py @@ -4,6 +4,7 @@ from .link import link from .package import package from .profile import profile from .train import train +from .pretrain import pretrain from .evaluate import evaluate from .convert import convert from .vocab import make_vocab as vocab diff --git a/spacy/cli/pretrain.py b/spacy/cli/pretrain.py new file mode 100644 index 000000000..e356c1183 --- /dev/null +++ b/spacy/cli/pretrain.py @@ -0,0 +1,188 @@ +'''This script is experimental. + +Try pre-training the CNN component of the text categorizer using a cheap +language modelling-like objective. Specifically, we load pre-trained vectors +(from something like word2vec, GloVe, FastText etc), and use the CNN to +predict the tokens' pre-trained vectors. This isn't as easy as it sounds: +we're not merely doing compression here, because heavy dropout is applied, +including over the input words. This means the model must often (50% of the time) +use the context in order to predict the word. + +To evaluate the technique, we're pre-training with the 50k texts from the IMDB +corpus, and then training with only 100 labels. Note that it's a bit dirty to +pre-train with the development data, but also not *so* terrible: we're not using +the development labels, after all --- only the unlabelled text. +''' +from __future__ import print_function, unicode_literals +import plac +import random +import numpy +import time +import ujson as json +from pathlib import Path + +import spacy +from spacy.attrs import ID +from spacy.util import minibatch, use_gpu, compounding, ensure_path +from spacy._ml import Tok2Vec, flatten, chain, zero_init, create_default_optimizer +from thinc.v2v import Affine + + +def prefer_gpu(): + used = spacy.util.use_gpu(0) + if used is None: + return False + else: + import cupy.random + cupy.random.seed(0) + return True + + +def load_texts(path): + '''Load inputs from a jsonl file. + + Each line should be a dict like {"text": "..."} + ''' + path = ensure_path(path) + with path.open('r', encoding='utf8') as file_: + for line in file_: + data = json.loads(line) + yield data['text'] + + +def make_update(model, docs, optimizer, drop=0.): + """Perform an update over a single batch of documents. + + docs (iterable): A batch of `Doc` objects. + drop (float): The droput rate. + optimizer (callable): An optimizer. + RETURNS loss: A float for the loss. + """ + predictions, backprop = model.begin_update(docs, drop=drop) + loss, gradients = get_vectors_loss(model.ops, docs, predictions) + backprop(gradients, sgd=optimizer) + return loss + + +def get_vectors_loss(ops, docs, prediction): + """Compute a mean-squared error loss between the documents' vectors and + the prediction. + + Note that this is ripe for customization! We could compute the vectors + in some other word, e.g. with an LSTM language model, or use some other + type of objective. + """ + # The simplest way to implement this would be to vstack the + # token.vector values, but that's a bit inefficient, especially on GPU. + # Instead we fetch the index into the vectors table for each of our tokens, + # and look them up all at once. This prevents data copying. + ids = ops.flatten([doc.to_array(ID).ravel() for doc in docs]) + target = docs[0].vocab.vectors.data[ids] + d_scores = (prediction - target) / prediction.shape[0] + loss = (d_scores**2).sum() + return loss, d_scores + + +def create_pretraining_model(nlp, tok2vec): + '''Define a network for the pretraining. We simply add an output layer onto + the tok2vec input model. The tok2vec input model needs to be a model that + takes a batch of Doc objects (as a list), and returns a list of arrays. + Each array in the output needs to have one row per token in the doc. + ''' + output_size = nlp.vocab.vectors.data.shape[1] + output_layer = zero_init(Affine(output_size, drop_factor=0.0)) + model = chain( + tok2vec, + flatten, + output_layer + ) + model.output_layer = output_layer + model.begin_training([nlp.make_doc('Give it a doc to infer shapes')]) + return model + + +class ProgressTracker(object): + def __init__(self, frequency=10000): + self.loss = 0. + self.nr_word = 0 + self.frequency = frequency + self.last_time = time.time() + self.last_update = 0 + + def update(self, epoch, loss, docs): + self.loss += loss + self.nr_word += sum(len(doc) for doc in docs) + words_since_update = self.nr_word - self.last_update + if words_since_update >= self.frequency: + wps = words_since_update / (time.time() - self.last_time) + self.last_update = self.nr_word + self.last_time = time.time() + status = (epoch, self.nr_word, '%.5f' % self.loss, int(wps)) + return status + else: + return None + + +@plac.annotations( + texts_loc=("Path to jsonl file with texts to learn from", "positional", None, str), + vectors_model=("Name or path to vectors model to learn from"), + output_dir=("Directory to write models each epoch", "positional", None, str), + width=("Width of CNN layers", "option", "cw", int), + depth=("Depth of CNN layers", "option", "cd", int), + embed_rows=("Embedding rows", "option", "er", int), + dropout=("Dropout", "option", "d", float), + seed=("Seed for random number generators", "option", "s", float), + nr_iter=("Number of iterations to pretrain", "option", "i", int), +) +def pretrain(texts_loc, vectors_model, output_dir, width=128, depth=4, + embed_rows=1000, dropout=0.2, nr_iter=1, seed=0): + """ + Pre-train the 'token-to-vector' (tok2vec) layer of pipeline components, + using an approximate language-modelling objective. Specifically, we load + pre-trained vectors, and train a component like a CNN, BiLSTM, etc to predict + vectors which match the pre-trained ones. The weights are saved to a directory + after each epoch. You can then pass a path to one of these pre-trained weights + files to the 'spacy train' command. + + This technique may be especially helpful if you have little labelled data. + However, it's still quite experimental, so your mileage may vary. + + To load the weights back in during 'spacy train', you need to ensure + all settings are the same between pretraining and training. The API and + errors around this need some improvement. + """ + config = dict(locals()) + output_dir = ensure_path(output_dir) + random.seed(seed) + numpy.random.seed(seed) + if not output_dir.exists(): + output_dir.mkdir() + with (output_dir / 'config.json').open('w') as file_: + file_.write(json.dumps(config)) + has_gpu = prefer_gpu() + nlp = spacy.load(vectors_model) + tok2vec = Tok2Vec(width, embed_rows, + conv_depth=depth, + pretrained_vectors=nlp.vocab.vectors.name, + bilstm_depth=0, # Requires PyTorch. Experimental. + cnn_maxout_pieces=2, # You can try setting this higher + subword_features=True) # Set to False for character models, e.g. Chinese + model = create_pretraining_model(nlp, tok2vec) + optimizer = create_default_optimizer(model.ops) + tracker = ProgressTracker() + texts = list(load_texts(texts_loc)) + print('Epoch', '#Words', 'Loss', 'w/s') + for epoch in range(nr_iter): + random.shuffle(texts) + for batch in minibatch(texts): + docs = [nlp.make_doc(text) for text in batch] + loss = make_update(model, docs, optimizer, drop=dropout) + progress = tracker.update(epoch, loss, docs) + if progress: + print(*progress) + with model.use_params(optimizer.averages): + with (output_dir / ('model%d.bin' % epoch)).open('wb') as file_: + file_.write(tok2vec.to_bytes()) + with (output_dir / 'log.jsonl').open('a') as file_: + file_.write(json.dumps({'nr_word': tracker.nr_word, + 'loss': tracker.loss, 'epoch': epoch})) diff --git a/spacy/cli/train.py b/spacy/cli/train.py index a0fb4d28a..ccd404db3 100644 --- a/spacy/cli/train.py +++ b/spacy/cli/train.py @@ -40,9 +40,11 @@ from ..compat import json_dumps version=("Model version", "option", "V", str), meta_path=("Optional path to meta.json. All relevant properties will be " "overwritten.", "option", "m", Path), + init_tok2vec=("Path to pretrained weights for the token-to-vector parts " + "of the models. See 'spacy pretrain'. Experimental.", "option", "t2v", Path), verbose=("Display more information for debug", "option", None, bool)) def train(lang, output_dir, train_data, dev_data, n_iter=30, n_sents=0, - parser_multitasks='', entity_multitasks='', + parser_multitasks='', entity_multitasks='', init_tok2vec=None, use_gpu=-1, vectors=None, no_tagger=False, noise_level=0.0, no_parser=False, no_entities=False, gold_preproc=False, version="0.0.0", meta_path=None, verbose=False): @@ -120,6 +122,9 @@ def train(lang, output_dir, train_data, dev_data, n_iter=30, n_sents=0, for objective in entity_multitasks.split(','): nlp.entity.add_multitask_objective(objective) optimizer = nlp.begin_training(lambda: corpus.train_tuples, device=use_gpu) + if init_tok2vec is not None: + loaded = _load_pretrained_tok2vec(nlp, init_tok2vec) + print("Loaded pretrained tok2vec for:", loaded) nlp._optimizer = None print("Itn. Dep Loss NER Loss UAS NER P. NER R. NER F. Tag % Token % CPU WPS GPU WPS") @@ -199,6 +204,20 @@ def train(lang, output_dir, train_data, dev_data, n_iter=30, n_sents=0, _collate_best_model(meta, output_path, components) +def _load_pretrained_tok2vec(nlp, loc): + """Load pre-trained weights for the 'token-to-vector' part of the component + models, which is typically a CNN. See 'spacy pretrain'. Experimental. + """ + with loc.open('rb') as file_: + weights_data = file_.read() + loaded = [] + for name, component in nlp.pipeline: + if hasattr(component, 'model') and hasattr(component.model, 'tok2vec'): + component.model.tok2vec.from_bytes(weights_data) + loaded.append(name) + return loaded + + def _collate_best_model(meta, output_path, components): bests = {} for component in components: