diff --git a/spacy/_ml.py b/spacy/_ml.py index 3fc2c4718..711e27fca 100644 --- a/spacy/_ml.py +++ b/spacy/_ml.py @@ -646,7 +646,7 @@ def build_text_classifier(nr_class, width=64, **cfg): SpacyVectors >> flatten_add_lengths >> with_getitem(0, Affine(width, pretrained_dims)) - >> ParametricAttention(width) + >> ParametricAttention(width, seed=100) >> Pooling(sum_pool) >> Residual(ReLu(width, width)) ** 2 >> zero_init(Affine(nr_class, width, drop_factor=0.0)) @@ -688,7 +688,7 @@ def build_text_classifier(nr_class, width=64, **cfg): cnn_model = ( tok2vec >> flatten_add_lengths - >> ParametricAttention(width) + >> ParametricAttention(width, seed=99) >> Pooling(sum_pool) >> Residual(zero_init(Maxout(width, width))) >> zero_init(Affine(nr_class, width, drop_factor=0.0)) diff --git a/spacy/tests/regression/test_issue6177.py b/spacy/tests/regression/test_issue6177.py index c806011c3..cb33bc692 100644 --- a/spacy/tests/regression/test_issue6177.py +++ b/spacy/tests/regression/test_issue6177.py @@ -11,7 +11,6 @@ def test_issue6177(): # NOTE: no need to transform this code to v3 when 'master' is merged into 'develop'. # A similar test exists already for v3: test_issue5551 # This is just a backport - results = [] for i in range(3): fix_random_seed(0) @@ -24,12 +23,15 @@ def test_issue6177(): nlp.add_pipe(textcat) for label in set(example[1]["cats"]): textcat.add_label(label) - nlp.begin_training() + # Train + optimizer = nlp.begin_training() + text, annots = example + nlp.update([text], [annots], sgd=optimizer) # Store the result of each iteration - result = textcat.model.predict([nlp.make_doc(example[0])]) + result = textcat.model.predict([nlp.make_doc(text)]) results.append(list(result[0])) # All results should be the same because of the fixed seed assert len(results) == 3 assert results[0] == results[1] - assert results[0] == results[2] \ No newline at end of file + assert results[0] == results[2]