Specify `Trainer(benchmark=False)` in parity benchmarks (#13182)

Co-authored-by: Carlos Mocholí <carlossmocholi@gmail.com>
This commit is contained in:
Akihiro Nitta 2022-06-01 06:23:21 +09:00 committed by GitHub
parent f4f14bb5d8
commit a21e6c3f33
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 9 additions and 2 deletions

View File

@ -149,7 +149,6 @@ def vanilla_loop(cls_model, idx, device_type: str = "cuda", num_epochs=10):
def lightning_loop(cls_model, idx, device_type: str = "cuda", num_epochs=10):
seed_everything(idx)
torch.backends.cudnn.deterministic = True
model = cls_model()
# init model parts
@ -163,6 +162,7 @@ def lightning_loop(cls_model, idx, device_type: str = "cuda", num_epochs=10):
devices=1,
logger=False,
replace_sampler_ddp=False,
benchmark=False,
)
trainer.fit(model)

View File

@ -138,7 +138,13 @@ def plugin_parity_test(
use_cuda = gpus > 0
trainer = Trainer(
fast_dev_run=True, max_epochs=1, accelerator="gpu", devices=gpus, precision=precision, strategy="ddp_spawn"
fast_dev_run=True,
max_epochs=1,
accelerator="gpu",
devices=gpus,
precision=precision,
strategy="ddp_spawn",
benchmark=False,
)
max_memory_ddp, ddp_time = record_ddp_fit_model_stats(trainer=trainer, model=ddp_model, use_cuda=use_cuda)
@ -154,6 +160,7 @@ def plugin_parity_test(
devices=gpus,
precision=precision,
strategy="ddp_sharded_spawn",
benchmark=False,
)
assert isinstance(trainer.strategy, DDPSpawnShardedStrategy)