From ad24bef1c976a2a772b47584b24ae78eaa6e3604 Mon Sep 17 00:00:00 2001 From: William Falcon Date: Sun, 14 Jul 2019 18:12:41 -0400 Subject: [PATCH] removed print statements --- pytorch_lightning/models/trainer.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/pytorch_lightning/models/trainer.py b/pytorch_lightning/models/trainer.py index a2561fc70a..d04d731e8f 100644 --- a/pytorch_lightning/models/trainer.py +++ b/pytorch_lightning/models/trainer.py @@ -82,7 +82,6 @@ class Trainer(TrainerIO): self.print_nan_grads = print_nan_grads self.data_parallel_device_ids = None self.world_size = 1 - print('-'*100) # gpus come in as a string. # if gpus = -1 then use all available devices @@ -297,17 +296,14 @@ class Trainer(TrainerIO): # when using gpus, first thing we do is spawn a new process between each worker # multi-gpu and multi-nodes if self.data_parallel: - print('DP train') self.experiment = self.experiment.get_meta_copy() mp.spawn(self.dp_train, nprocs=len(self.data_parallel_device_ids), args=(model, )) # treat 1 gpu as a different case to avoid nccl bugs elif len(self.data_parallel_device_ids) == 1: - print('1 gpu train') self.single_gpu_train(model) else: - print('NO GPU') # CHOOSE OPTIMIZER # filter out the weights that were done on gpu so we can load on good old cpus self.optimizers = model.configure_optimizers()