spaCy/examples/training/train_textcat.py

152 lines
5.4 KiB
Python
Raw Normal View History

2017-10-26 22:32:19 +00:00
#!/usr/bin/env python
# coding: utf8
💫 Interactive code examples, spaCy Universe and various docs improvements (#2274) * Integrate Python kernel via Binder * Add live model test for languages with examples * Update docs and code examples * Adjust margin (if not bootstrapped) * Add binder version to global config * Update terminal and executable code mixins * Pass attributes through infobox and section * Hide v-cloak * Fix example * Take out model comparison for now * Add meta text for compat * Remove chart.js dependency * Tidy up and simplify JS and port big components over to Vue * Remove chartjs example * Add Twitter icon * Add purple stylesheet option * Add utility for hand cursor (special cases only) * Add transition classes * Add small option for section * Add thumb object for small round thumbnail images * Allow unset code block language via "none" value (workaround to still allow unset language to default to DEFAULT_SYNTAX) * Pass through attributes * Add syntax highlighting definitions for Julia, R and Docker * Add website icon * Remove user survey from navigation * Don't hide GitHub icon on small screens * Make top navigation scrollable on small screens * Remove old resources page and references to it * Add Universe * Add helper functions for better page URL and title * Update site description * Increment versions * Update preview images * Update mentions of resources * Fix image * Fix social images * Fix problem with cover sizing and floats * Add divider and move badges into heading * Add docstrings * Reference converting section * Add section on converting word vectors * Move converting section to custom section and fix formatting * Remove old fastText example * Move extensions content to own section Keep weird ID to not break permalinks for now (we don't want to rewrite URLs if not absolutely necessary) * Use better component example and add factories section * Add note on larger model * Use better example for non-vector * Remove similarity in context section Only works via small models with tensors so has always been kind of confusing * Add note on init-model command * Fix lightning tour examples and make excutable if possible * Add spacy train CLI section to train * Fix formatting and add video * Fix formatting * Fix textcat example description (resolves #2246) * Add dummy file to try resolve conflict * Delete dummy file * Tidy up [ci skip] * Ensure sufficient height of loading container * Add loading animation to universe * Update Thebelab build and use better startup message * Fix asset versioning * Fix typo [ci skip] * Add note on project idea label
2018-04-29 00:06:46 +00:00
"""Train a convolutional neural network text classifier on the
2017-10-26 22:32:19 +00:00
IMDB dataset, using the TextCategorizer component. The dataset will be loaded
automatically via Thinc's built-in dataset loader. The model is added to
2017-10-31 23:43:22 +00:00
spacy.pipeline, and predictions are available via `doc.cats`. For more details,
see the documentation:
2017-11-07 11:00:43 +00:00
* Training: https://spacy.io/usage/training
2017-10-26 22:32:19 +00:00
2017-11-07 00:22:30 +00:00
Compatible with: spaCy v2.0.0+
2017-10-26 22:32:19 +00:00
"""
from __future__ import unicode_literals, print_function
import plac
import random
2017-10-26 22:32:19 +00:00
from pathlib import Path
import thinc.extra.datasets
2017-10-26 22:32:19 +00:00
import spacy
2017-11-06 23:22:43 +00:00
from spacy.util import minibatch, compounding
2017-10-04 13:12:28 +00:00
2017-10-26 22:32:19 +00:00
@plac.annotations(
model=("Model name. Defaults to blank 'en' model.", "option", "m", str),
output_dir=("Optional output directory", "option", "o", Path),
2017-11-01 16:09:22 +00:00
n_texts=("Number of texts to train from", "option", "t", int),
2018-12-02 03:26:26 +00:00
n_iter=("Number of training iterations", "option", "n", int),
)
def main(model=None, output_dir=None, n_iter=20, n_texts=2000):
if output_dir is not None:
output_dir = Path(output_dir)
if not output_dir.exists():
output_dir.mkdir()
2017-10-26 22:32:19 +00:00
if model is not None:
nlp = spacy.load(model) # load existing spaCy model
print("Loaded model '%s'" % model)
else:
2018-12-02 03:26:26 +00:00
nlp = spacy.blank("en") # create blank Language class
2017-10-26 22:32:19 +00:00
print("Created blank 'en' model")
# add the text classifier to the pipeline if it doesn't exist
# nlp.create_pipe works for built-ins that are registered with spaCy
2018-12-02 03:26:26 +00:00
if "textcat" not in nlp.pipe_names:
2019-03-16 13:15:49 +00:00
textcat = nlp.create_pipe(
"textcat", config={"architecture": "simple_cnn", "exclusive_classes": True}
)
nlp.add_pipe(textcat, last=True)
2017-10-26 22:32:19 +00:00
# otherwise, get it, so we can add labels to it
else:
2018-12-02 03:26:26 +00:00
textcat = nlp.get_pipe("textcat")
2017-10-26 22:32:19 +00:00
# add label to text classifier
2018-12-02 03:26:26 +00:00
textcat.add_label("POSITIVE")
textcat.add_label("NEGATIVE")
2017-10-26 22:32:19 +00:00
2017-12-09 12:14:57 +00:00
# load the IMDB dataset
2017-10-26 22:32:19 +00:00
print("Loading IMDB data...")
(train_texts, train_cats), (dev_texts, dev_cats) = load_data(limit=n_texts)
2018-12-02 03:26:26 +00:00
print(
"Using {} examples ({} training, {} evaluation)".format(
n_texts, len(train_texts), len(dev_texts)
)
)
train_data = list(zip(train_texts, [{"cats": cats} for cats in train_cats]))
2017-10-26 22:32:19 +00:00
# get names of other pipes to disable them during training
2018-12-02 03:26:26 +00:00
other_pipes = [pipe for pipe in nlp.pipe_names if pipe != "textcat"]
2017-10-26 22:32:19 +00:00
with nlp.disable_pipes(*other_pipes): # only train textcat
optimizer = nlp.begin_training()
2017-10-26 22:32:19 +00:00
print("Training the model...")
2018-12-02 03:26:26 +00:00
print("{:^5}\t{:^5}\t{:^5}\t{:^5}".format("LOSS", "P", "R", "F"))
2017-10-26 22:32:19 +00:00
for i in range(n_iter):
losses = {}
# batch up the examples using spaCy's minibatch
2019-02-23 10:57:59 +00:00
batches = minibatch(train_data, size=compounding(4.0, 32.0, 1.001))
2017-10-26 22:32:19 +00:00
for batch in batches:
texts, annotations = zip(*batch)
2018-12-02 03:26:26 +00:00
nlp.update(texts, annotations, sgd=optimizer, drop=0.2, losses=losses)
2017-10-26 22:32:19 +00:00
with textcat.model.use_params(optimizer.averages):
# evaluate on the dev data split off in load_data()
scores = evaluate(nlp.tokenizer, textcat, dev_texts, dev_cats)
2018-12-02 03:26:26 +00:00
print(
"{0:.3f}\t{1:.3f}\t{2:.3f}\t{3:.3f}".format( # print a simple table
losses["textcat"],
scores["textcat_p"],
scores["textcat_r"],
scores["textcat_f"],
)
)
2017-10-26 22:32:19 +00:00
# test the trained model
test_text = "This movie sucked"
doc = nlp(test_text)
print(test_text, doc.cats)
if output_dir is not None:
with nlp.use_params(optimizer.averages):
nlp.to_disk(output_dir)
2017-10-26 22:32:19 +00:00
print("Saved model to", output_dir)
# test the saved model
print("Loading from", output_dir)
nlp2 = spacy.load(output_dir)
doc2 = nlp2(test_text)
print(test_text, doc2.cats)
def load_data(limit=0, split=0.8):
"""Load data from the IMDB dataset."""
# Partition off part of the train data for evaluation
train_data, _ = thinc.extra.datasets.imdb()
random.shuffle(train_data)
train_data = train_data[-limit:]
texts, labels = zip(*train_data)
cats = [{"POSITIVE": bool(y), "NEGATIVE": not bool(y)} for y in labels]
2017-10-26 22:32:19 +00:00
split = int(len(train_data) * split)
return (texts[:split], cats[:split]), (texts[split:], cats[split:])
def evaluate(tokenizer, textcat, texts, cats):
docs = (tokenizer(text) for text in texts)
tp = 0.0 # True positives
2017-10-26 22:32:19 +00:00
fp = 1e-8 # False positives
fn = 1e-8 # False negatives
tn = 0.0 # True negatives
for i, doc in enumerate(textcat.pipe(docs)):
gold = cats[i]
for label, score in doc.cats.items():
if label not in gold:
continue
if label == "NEGATIVE":
continue
if score >= 0.5 and gold[label] >= 0.5:
2018-12-02 03:26:26 +00:00
tp += 1.0
elif score >= 0.5 and gold[label] < 0.5:
2018-12-02 03:26:26 +00:00
fp += 1.0
elif score < 0.5 and gold[label] < 0.5:
tn += 1
elif score < 0.5 and gold[label] >= 0.5:
fn += 1
2017-10-26 22:32:19 +00:00
precision = tp / (tp + fp)
recall = tp / (tp + fn)
2019-03-16 13:15:49 +00:00
if (precision + recall) == 0:
2019-02-23 11:45:41 +00:00
f_score = 0.0
else:
f_score = 2 * (precision * recall) / (precision + recall)
2018-12-02 03:26:26 +00:00
return {"textcat_p": precision, "textcat_r": recall, "textcat_f": f_score}
2017-07-22 22:34:12 +00:00
2018-12-02 03:26:26 +00:00
if __name__ == "__main__":
plac.call(main)