diff --git a/spacy/tests/lang/tr/test_tokenizer.py b/spacy/tests/lang/tr/test_tokenizer.py index 2ceca5068..9f988eae9 100644 --- a/spacy/tests/lang/tr/test_tokenizer.py +++ b/spacy/tests/lang/tr/test_tokenizer.py @@ -694,5 +694,4 @@ TESTS = ABBREV_TESTS + URL_TESTS + NUMBER_TESTS + PUNCT_TESTS + GENERAL_TESTS def test_tr_tokenizer_handles_allcases(tr_tokenizer, text, expected_tokens): tokens = tr_tokenizer(text) token_list = [token.text for token in tokens if not token.is_space] - print(token_list) assert expected_tokens == token_list diff --git a/spacy/tests/pipeline/test_morphologizer.py b/spacy/tests/pipeline/test_morphologizer.py index 11d6f0477..33696bfd8 100644 --- a/spacy/tests/pipeline/test_morphologizer.py +++ b/spacy/tests/pipeline/test_morphologizer.py @@ -184,7 +184,7 @@ def test_overfitting_IO(): token.pos_ = "" token.set_morph(None) optimizer = nlp.initialize(get_examples=lambda: train_examples) - print(nlp.get_pipe("morphologizer").labels) + assert nlp.get_pipe("morphologizer").labels is not None for i in range(50): losses = {} nlp.update(train_examples, sgd=optimizer, losses=losses) diff --git a/spacy/tests/test_cli.py b/spacy/tests/test_cli.py index 5e431d5cb..26a5710a8 100644 --- a/spacy/tests/test_cli.py +++ b/spacy/tests/test_cli.py @@ -217,7 +217,6 @@ def test_cli_converters_conllu_to_docs_subtokens(): sent = converted[0]["paragraphs"][0]["sentences"][0] assert len(sent["tokens"]) == 4 tokens = sent["tokens"] - print(tokens) assert [t["orth"] for t in tokens] == ["Dommer", "FE", "avstår", "."] assert [t["tag"] for t in tokens] == [ "NOUN__Definite=Ind|Gender=Masc|Number=Sing",