set dp as default backend
This commit is contained in:
parent
3321e8c541
commit
e86b191691
|
@ -311,6 +311,7 @@ class Trainer(TrainerIO):
|
|||
|
||||
# when using multi-node or DDP within a node start each module in a separate process
|
||||
if self.use_ddp:
|
||||
print('using ddp')
|
||||
# must copy only the meta of the exp so it survives pickle/unpickle when going to new process
|
||||
self.experiment = self.experiment.get_meta_copy()
|
||||
mp.spawn(self.ddp_train, nprocs=len(self.data_parallel_device_ids), args=(model, ))
|
||||
|
@ -318,6 +319,7 @@ class Trainer(TrainerIO):
|
|||
# 1 gpu or dp option triggers training using DP module
|
||||
# easier to avoid NCCL issues
|
||||
elif self.use_dp:
|
||||
print('using dp')
|
||||
self.dp_train(model)
|
||||
|
||||
# ON CPU
|
||||
|
|
Loading…
Reference in New Issue