mirror of https://github.com/explosion/spaCy.git
removing print statements from the test suite (#10712)
This commit is contained in:
parent
455f089c9b
commit
b3717ba53a
|
@ -694,5 +694,4 @@ TESTS = ABBREV_TESTS + URL_TESTS + NUMBER_TESTS + PUNCT_TESTS + GENERAL_TESTS
|
|||
def test_tr_tokenizer_handles_allcases(tr_tokenizer, text, expected_tokens):
|
||||
tokens = tr_tokenizer(text)
|
||||
token_list = [token.text for token in tokens if not token.is_space]
|
||||
print(token_list)
|
||||
assert expected_tokens == token_list
|
||||
|
|
|
@ -184,7 +184,7 @@ def test_overfitting_IO():
|
|||
token.pos_ = ""
|
||||
token.set_morph(None)
|
||||
optimizer = nlp.initialize(get_examples=lambda: train_examples)
|
||||
print(nlp.get_pipe("morphologizer").labels)
|
||||
assert nlp.get_pipe("morphologizer").labels is not None
|
||||
for i in range(50):
|
||||
losses = {}
|
||||
nlp.update(train_examples, sgd=optimizer, losses=losses)
|
||||
|
|
|
@ -217,7 +217,6 @@ def test_cli_converters_conllu_to_docs_subtokens():
|
|||
sent = converted[0]["paragraphs"][0]["sentences"][0]
|
||||
assert len(sent["tokens"]) == 4
|
||||
tokens = sent["tokens"]
|
||||
print(tokens)
|
||||
assert [t["orth"] for t in tokens] == ["Dommer", "FE", "avstår", "."]
|
||||
assert [t["tag"] for t in tokens] == [
|
||||
"NOUN__Definite=Ind|Gender=Masc|Number=Sing",
|
||||
|
|
Loading…
Reference in New Issue