diff --git a/tests/benchmarks/test_basic_parity.py b/tests/benchmarks/test_basic_parity.py index 7a7962d337..d893262148 100644 --- a/tests/benchmarks/test_basic_parity.py +++ b/tests/benchmarks/test_basic_parity.py @@ -149,7 +149,6 @@ def vanilla_loop(cls_model, idx, device_type: str = "cuda", num_epochs=10): def lightning_loop(cls_model, idx, device_type: str = "cuda", num_epochs=10): seed_everything(idx) - torch.backends.cudnn.deterministic = True model = cls_model() # init model parts @@ -163,6 +162,7 @@ def lightning_loop(cls_model, idx, device_type: str = "cuda", num_epochs=10): devices=1, logger=False, replace_sampler_ddp=False, + benchmark=False, ) trainer.fit(model) diff --git a/tests/benchmarks/test_sharded_parity.py b/tests/benchmarks/test_sharded_parity.py index c80c1d2afc..78e90bd093 100644 --- a/tests/benchmarks/test_sharded_parity.py +++ b/tests/benchmarks/test_sharded_parity.py @@ -138,7 +138,13 @@ def plugin_parity_test( use_cuda = gpus > 0 trainer = Trainer( - fast_dev_run=True, max_epochs=1, accelerator="gpu", devices=gpus, precision=precision, strategy="ddp_spawn" + fast_dev_run=True, + max_epochs=1, + accelerator="gpu", + devices=gpus, + precision=precision, + strategy="ddp_spawn", + benchmark=False, ) max_memory_ddp, ddp_time = record_ddp_fit_model_stats(trainer=trainer, model=ddp_model, use_cuda=use_cuda) @@ -154,6 +160,7 @@ def plugin_parity_test( devices=gpus, precision=precision, strategy="ddp_sharded_spawn", + benchmark=False, ) assert isinstance(trainer.strategy, DDPSpawnShardedStrategy)