added single node distdataparallel
This commit is contained in:
parent
ac57dac235
commit
b9f581ab87
|
@ -282,11 +282,12 @@ class Trainer(TrainerIO):
|
||||||
# when GPU is called, spawn off a single worker for each gpu
|
# when GPU is called, spawn off a single worker for each gpu
|
||||||
if self.on_gpu:
|
if self.on_gpu:
|
||||||
rank = 0
|
rank = 0
|
||||||
mp.spawn(self.__dp_train, nprocs=len(self.data_parallel_device_ids), args=(rank, model ))
|
self.model = model
|
||||||
|
mp.spawn(self.__dp_train, nprocs=len(self.data_parallel_device_ids), args=(rank))
|
||||||
else:
|
else:
|
||||||
self.__run_pretrain_routine(model)
|
self.__run_pretrain_routine(model)
|
||||||
|
|
||||||
def __dp_train(self, gpu_nb, proc_rank, model):
|
def __dp_train(self, gpu_nb, proc_rank):
|
||||||
"""
|
"""
|
||||||
Entry point into a DP thread
|
Entry point into a DP thread
|
||||||
:param gpu_nb:
|
:param gpu_nb:
|
||||||
|
|
Loading…
Reference in New Issue