2019-07-12 08:01:35 +00:00
|
|
|
import spacy
|
|
|
|
from spacy.util import minibatch, compounding
|
|
|
|
|
|
|
|
|
|
|
|
def test_issue3611():
|
|
|
|
""" Test whether adding n-grams in the textcat works even when n > token length of some docs """
|
|
|
|
unique_classes = ["offensive", "inoffensive"]
|
2019-07-17 10:34:13 +00:00
|
|
|
x_train = [
|
|
|
|
"This is an offensive text",
|
|
|
|
"This is the second offensive text",
|
|
|
|
"inoff",
|
|
|
|
]
|
2019-07-12 08:01:35 +00:00
|
|
|
y_train = ["offensive", "offensive", "inoffensive"]
|
|
|
|
|
|
|
|
# preparing the data
|
|
|
|
pos_cats = list()
|
|
|
|
for train_instance in y_train:
|
|
|
|
pos_cats.append({label: label == train_instance for label in unique_classes})
|
2019-07-17 10:34:13 +00:00
|
|
|
train_data = list(zip(x_train, [{"cats": cats} for cats in pos_cats]))
|
2019-07-12 08:01:35 +00:00
|
|
|
|
|
|
|
# set up the spacy model with a text categorizer component
|
2019-07-17 10:34:13 +00:00
|
|
|
nlp = spacy.blank("en")
|
2019-07-12 08:01:35 +00:00
|
|
|
|
|
|
|
textcat = nlp.create_pipe(
|
|
|
|
"textcat",
|
2019-07-17 10:34:13 +00:00
|
|
|
config={"exclusive_classes": True, "architecture": "bow", "ngram_size": 2},
|
2019-07-12 08:01:35 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
for label in unique_classes:
|
|
|
|
textcat.add_label(label)
|
|
|
|
nlp.add_pipe(textcat, last=True)
|
|
|
|
|
|
|
|
# training the network
|
2020-05-18 20:27:10 +00:00
|
|
|
with nlp.select_pipes(enable="textcat"):
|
2020-01-29 16:06:46 +00:00
|
|
|
optimizer = nlp.begin_training(X=x_train, Y=y_train)
|
2019-07-12 08:01:35 +00:00
|
|
|
for i in range(3):
|
|
|
|
losses = {}
|
|
|
|
batches = minibatch(train_data, size=compounding(4.0, 32.0, 1.001))
|
|
|
|
|
|
|
|
for batch in batches:
|
2019-07-17 10:34:13 +00:00
|
|
|
nlp.update(
|
2020-02-18 14:38:18 +00:00
|
|
|
examples=batch, sgd=optimizer, drop=0.1, losses=losses,
|
2019-07-17 10:34:13 +00:00
|
|
|
)
|