2020-08-20 02:03:22 +00:00
|
|
|
# Copyright The PyTorch Lightning team.
|
|
|
|
#
|
|
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
# you may not use this file except in compliance with the License.
|
|
|
|
# You may obtain a copy of the License at
|
|
|
|
#
|
|
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
#
|
|
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
# See the License for the specific language governing permissions and
|
|
|
|
# limitations under the License.
|
|
|
|
|
2019-07-03 20:43:05 +00:00
|
|
|
import itertools
|
2019-10-22 08:32:40 +00:00
|
|
|
import threading
|
2020-08-24 08:59:47 +00:00
|
|
|
from collections.abc import Mapping, Iterable
|
2020-08-07 22:33:51 +00:00
|
|
|
from itertools import chain
|
2019-06-25 23:42:15 +00:00
|
|
|
|
2019-06-25 23:52:26 +00:00
|
|
|
import torch
|
|
|
|
from torch.cuda._utils import _get_device_index
|
2019-10-22 08:32:40 +00:00
|
|
|
from torch.nn import DataParallel
|
|
|
|
from torch.nn.parallel import DistributedDataParallel
|
2020-08-07 07:18:29 +00:00
|
|
|
from torch.nn.parallel._functions import Gather
|
2020-08-07 22:33:51 +00:00
|
|
|
|
2020-07-20 23:00:20 +00:00
|
|
|
from pytorch_lightning.core.step_result import Result
|
2020-09-23 04:19:46 +00:00
|
|
|
from pytorch_lightning.utilities.warning_utils import WarningCache
|
2019-06-25 23:52:26 +00:00
|
|
|
|
|
|
|
|
2020-03-19 13:14:29 +00:00
|
|
|
def _find_tensors(obj): # pragma: no-cover
|
2019-07-03 20:43:05 +00:00
|
|
|
r"""
|
|
|
|
Recursively find all tensors contained in the specified object.
|
|
|
|
"""
|
|
|
|
if isinstance(obj, torch.Tensor):
|
|
|
|
return [obj]
|
|
|
|
if isinstance(obj, (list, tuple)):
|
|
|
|
return itertools.chain(*map(_find_tensors, obj))
|
|
|
|
if isinstance(obj, dict):
|
|
|
|
return itertools.chain(*map(_find_tensors, obj.values()))
|
|
|
|
return []
|
|
|
|
|
|
|
|
|
2020-03-19 13:14:29 +00:00
|
|
|
def get_a_var(obj): # pragma: no-cover
|
2019-06-25 23:52:26 +00:00
|
|
|
if isinstance(obj, torch.Tensor):
|
|
|
|
return obj
|
|
|
|
|
2020-02-01 23:44:05 +00:00
|
|
|
if isinstance(obj, (list, tuple)):
|
2019-06-25 23:52:26 +00:00
|
|
|
for result in map(get_a_var, obj):
|
|
|
|
if isinstance(result, torch.Tensor):
|
|
|
|
return result
|
|
|
|
if isinstance(obj, dict):
|
|
|
|
for result in map(get_a_var, obj.items()):
|
|
|
|
if isinstance(result, torch.Tensor):
|
|
|
|
return result
|
|
|
|
return None
|
|
|
|
|
2019-06-25 23:42:15 +00:00
|
|
|
|
2020-09-23 04:19:46 +00:00
|
|
|
warning_cache = WarningCache()
|
|
|
|
|
|
|
|
|
2019-06-25 23:42:15 +00:00
|
|
|
class LightningDataParallel(DataParallel):
|
|
|
|
"""
|
|
|
|
Override the forward call in lightning so it goes to training and validation step respectively
|
|
|
|
"""
|
|
|
|
|
2019-07-18 15:39:06 +00:00
|
|
|
def forward(self, *inputs, **kwargs):
|
|
|
|
if not self.device_ids:
|
|
|
|
return self.module(*inputs, **kwargs)
|
|
|
|
|
|
|
|
for t in chain(self.module.parameters(), self.module.buffers()):
|
|
|
|
if t.device != self.src_device_obj:
|
|
|
|
raise RuntimeError("module must have its parameters and buffers "
|
|
|
|
"on device {} (device_ids[0]) but found one of "
|
|
|
|
"them on device: {}".format(self.src_device_obj, t.device))
|
|
|
|
|
|
|
|
inputs, kwargs = self.scatter(inputs, kwargs, self.device_ids)
|
|
|
|
if len(self.device_ids) == 1:
|
|
|
|
# lightning
|
|
|
|
if self.module.training:
|
|
|
|
return self.module.training_step(*inputs[0], **kwargs[0])
|
2020-02-01 23:44:05 +00:00
|
|
|
if self.module.testing:
|
2019-08-30 22:56:09 +00:00
|
|
|
return self.module.test_step(*inputs[0], **kwargs[0])
|
2020-02-01 23:44:05 +00:00
|
|
|
|
|
|
|
return self.module.validation_step(*inputs[0], **kwargs[0])
|
2019-07-18 15:39:06 +00:00
|
|
|
|
|
|
|
replicas = self.replicate(self.module, self.device_ids[:len(inputs)])
|
|
|
|
outputs = self.parallel_apply(replicas, inputs, kwargs)
|
2020-07-20 23:00:20 +00:00
|
|
|
|
|
|
|
if isinstance(outputs[0], Result):
|
|
|
|
outputs = self.__gather_structured_result(outputs)
|
|
|
|
else:
|
2020-08-07 07:18:29 +00:00
|
|
|
outputs = self.gather(outputs)
|
2020-07-20 23:00:20 +00:00
|
|
|
return outputs
|
|
|
|
|
|
|
|
def __gather_structured_result(self, outputs):
|
|
|
|
prototype_output = outputs[0]
|
|
|
|
original_class = prototype_output.__class__
|
|
|
|
outputs = [dict(x) for x in outputs]
|
|
|
|
|
|
|
|
# remove all the meta info
|
|
|
|
meta = outputs[0]['meta']
|
|
|
|
for i, output in enumerate(outputs):
|
|
|
|
del output['meta']
|
|
|
|
|
2020-08-07 07:18:29 +00:00
|
|
|
outputs = self.gather(outputs)
|
2020-07-20 23:00:20 +00:00
|
|
|
|
|
|
|
# pass minimize to constructor for TrainResult
|
|
|
|
if 'minimize' in outputs:
|
|
|
|
result = original_class(outputs['minimize'])
|
|
|
|
else:
|
|
|
|
result = original_class()
|
|
|
|
|
|
|
|
result.update(outputs)
|
|
|
|
result['meta'] = meta
|
|
|
|
return result
|
2019-07-18 15:39:06 +00:00
|
|
|
|
2020-08-07 07:18:29 +00:00
|
|
|
def gather(self, outputs):
|
|
|
|
r"""
|
|
|
|
Override the gather method to support python scalars as well.
|
|
|
|
"""
|
|
|
|
def gather_map(outputs):
|
|
|
|
elem = outputs[0]
|
|
|
|
elem_type = type(elem)
|
|
|
|
|
|
|
|
if isinstance(elem, torch.Tensor):
|
|
|
|
return Gather.apply(self.output_device, self.dim, *outputs)
|
|
|
|
|
|
|
|
if elem is None:
|
|
|
|
return None
|
|
|
|
|
|
|
|
if isinstance(elem, Mapping):
|
|
|
|
if not all((len(elem) == len(d) for d in outputs)):
|
|
|
|
raise ValueError('All dicts must have the same number of keys')
|
|
|
|
return elem_type(((k, gather_map([d[k] for d in outputs]))
|
|
|
|
for k in elem))
|
|
|
|
|
|
|
|
if isinstance(elem, Iterable) and not isinstance(elem, str):
|
|
|
|
return elem_type(map(gather_map, zip(*outputs)))
|
|
|
|
|
|
|
|
return outputs
|
|
|
|
|
|
|
|
# Recursive function calls like this create reference cycles.
|
|
|
|
# Setting the function to None clears the refcycle.
|
|
|
|
try:
|
|
|
|
res = gather_map(outputs)
|
|
|
|
finally:
|
|
|
|
gather_map = None
|
|
|
|
return res
|
|
|
|
|
2019-06-25 23:52:26 +00:00
|
|
|
def parallel_apply(self, replicas, inputs, kwargs):
|
|
|
|
return parallel_apply(replicas, inputs, kwargs, self.device_ids[:len(replicas)])
|
|
|
|
|
2019-07-03 20:44:18 +00:00
|
|
|
|
|
|
|
class LightningDistributedDataParallel(DistributedDataParallel):
|
|
|
|
"""
|
|
|
|
Override the forward call in lightning so it goes to training and validation step respectively
|
|
|
|
"""
|
|
|
|
|
|
|
|
def parallel_apply(self, replicas, inputs, kwargs):
|
|
|
|
return parallel_apply(replicas, inputs, kwargs, self.device_ids[:len(replicas)])
|
|
|
|
|
2020-03-19 13:14:29 +00:00
|
|
|
def forward(self, *inputs, **kwargs): # pragma: no-cover
|
2019-07-03 20:43:05 +00:00
|
|
|
self._sync_params()
|
2020-09-23 04:19:46 +00:00
|
|
|
fx_called: str = ''
|
|
|
|
|
2019-07-03 20:43:05 +00:00
|
|
|
if self.device_ids:
|
|
|
|
inputs, kwargs = self.scatter(inputs, kwargs, self.device_ids)
|
|
|
|
if len(self.device_ids) == 1:
|
2019-07-03 20:46:14 +00:00
|
|
|
# --------------
|
|
|
|
# LIGHTNING MOD
|
|
|
|
# --------------
|
|
|
|
# normal
|
|
|
|
# output = self.module(*inputs[0], **kwargs[0])
|
|
|
|
# lightning
|
|
|
|
if self.module.training:
|
|
|
|
output = self.module.training_step(*inputs[0], **kwargs[0])
|
2020-09-23 04:19:46 +00:00
|
|
|
fx_called = 'training_step'
|
2019-08-30 22:56:09 +00:00
|
|
|
elif self.module.testing:
|
|
|
|
output = self.module.test_step(*inputs[0], **kwargs[0])
|
2020-09-23 04:19:46 +00:00
|
|
|
fx_called = 'test_step'
|
2019-07-03 20:46:14 +00:00
|
|
|
else:
|
|
|
|
output = self.module.validation_step(*inputs[0], **kwargs[0])
|
2020-09-23 04:19:46 +00:00
|
|
|
fx_called = 'validation_step'
|
2019-07-03 20:43:05 +00:00
|
|
|
else:
|
|
|
|
outputs = self.parallel_apply(self._module_copies[:len(inputs)], inputs, kwargs)
|
|
|
|
output = self.gather(outputs, self.output_device)
|
|
|
|
else:
|
2020-04-16 03:17:31 +00:00
|
|
|
# output = self.module(*inputs, **kwargs)
|
2020-08-07 07:18:29 +00:00
|
|
|
# normal lightning (ddp_cpu)
|
2020-04-16 03:17:31 +00:00
|
|
|
if self.module.training:
|
|
|
|
output = self.module.training_step(*inputs, **kwargs)
|
|
|
|
elif self.module.testing:
|
|
|
|
output = self.module.test_step(*inputs, **kwargs)
|
|
|
|
else:
|
|
|
|
output = self.module.validation_step(*inputs, **kwargs)
|
2019-07-03 20:43:05 +00:00
|
|
|
|
|
|
|
if torch.is_grad_enabled():
|
|
|
|
# We'll return the output object verbatim since it is a freeform
|
|
|
|
# object. We need to find any tensors in this object, though,
|
|
|
|
# because we need to figure out which parameters were used during
|
|
|
|
# this forward pass, to ensure we short circuit reduction for any
|
|
|
|
# unused parameters. Only if `find_unused_parameters` is set.
|
|
|
|
if self.find_unused_parameters:
|
|
|
|
self.reducer.prepare_for_backward(list(_find_tensors(output)))
|
|
|
|
else:
|
|
|
|
self.reducer.prepare_for_backward([])
|
2020-09-23 04:19:46 +00:00
|
|
|
|
|
|
|
if output is None:
|
2020-10-02 17:51:02 +00:00
|
|
|
warn_missing_output(f'{fx_called} returned None. Did you forget to re')
|
2019-07-03 20:43:05 +00:00
|
|
|
return output
|
|
|
|
|
2019-06-25 23:52:26 +00:00
|
|
|
|
2020-09-23 04:19:46 +00:00
|
|
|
def warn_missing_output(fx_called):
|
|
|
|
if fx_called == 'training_step':
|
2020-10-02 17:51:02 +00:00
|
|
|
warning_cache.warn("Your training_step returned None. You should instead do:\n"
|
|
|
|
"`return loss`\n or\n `return TrainResult`")
|
2020-09-23 04:19:46 +00:00
|
|
|
elif fx_called in ['validation_step', 'test_step']:
|
2020-10-02 17:51:02 +00:00
|
|
|
warning_cache.warn(f"Your {fx_called} returned None. You should instead do:\n `return EvalResult")
|
2020-09-23 04:19:46 +00:00
|
|
|
|
|
|
|
|
2020-03-19 13:14:29 +00:00
|
|
|
def parallel_apply(modules, inputs, kwargs_tup=None, devices=None): # pragma: no-cover
|
2019-06-25 23:52:26 +00:00
|
|
|
r"""Applies each `module` in :attr:`modules` in parallel on arguments
|
|
|
|
contained in :attr:`inputs` (positional) and :attr:`kwargs_tup` (keyword)
|
|
|
|
on each of :attr:`devices`.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
modules (Module): modules to be parallelized
|
|
|
|
inputs (tensor): inputs to the modules
|
|
|
|
devices (list of int or torch.device): CUDA devices
|
|
|
|
|
|
|
|
:attr:`modules`, :attr:`inputs`, :attr:`kwargs_tup` (if given), and
|
|
|
|
:attr:`devices` (if given) should all have same length. Moreover, each
|
|
|
|
element of :attr:`inputs` can either be a single object as the only argument
|
|
|
|
to a module, or a collection of positional arguments.
|
|
|
|
"""
|
|
|
|
assert len(modules) == len(inputs)
|
|
|
|
if kwargs_tup is not None:
|
|
|
|
assert len(modules) == len(kwargs_tup)
|
|
|
|
else:
|
|
|
|
kwargs_tup = ({},) * len(modules)
|
|
|
|
if devices is not None:
|
|
|
|
assert len(modules) == len(devices)
|
|
|
|
else:
|
|
|
|
devices = [None] * len(modules)
|
|
|
|
devices = list(map(lambda x: _get_device_index(x, True), devices))
|
|
|
|
lock = threading.Lock()
|
|
|
|
results = {}
|
|
|
|
grad_enabled = torch.is_grad_enabled()
|
|
|
|
|
|
|
|
def _worker(i, module, input, kwargs, device=None):
|
|
|
|
torch.set_grad_enabled(grad_enabled)
|
2020-09-23 04:19:46 +00:00
|
|
|
fx_called: str = ''
|
2019-06-25 23:52:26 +00:00
|
|
|
if device is None:
|
|
|
|
device = get_a_var(input).get_device()
|
|
|
|
try:
|
|
|
|
with torch.cuda.device(device):
|
|
|
|
# this also avoids accidental slicing of `input` if it is a Tensor
|
|
|
|
if not isinstance(input, (list, tuple)):
|
|
|
|
input = (input,)
|
|
|
|
|
2020-07-20 23:00:20 +00:00
|
|
|
module = module.to(device)
|
|
|
|
|
2019-06-25 23:54:28 +00:00
|
|
|
# ---------------
|
|
|
|
# CHANGE
|
2019-06-25 23:52:26 +00:00
|
|
|
if module.training:
|
2019-06-26 00:12:41 +00:00
|
|
|
output = module.training_step(*input, **kwargs)
|
2020-09-23 04:19:46 +00:00
|
|
|
fx_called = 'training_step'
|
2019-08-30 22:56:09 +00:00
|
|
|
elif module.testing:
|
|
|
|
output = module.test_step(*input, **kwargs)
|
2020-09-23 04:19:46 +00:00
|
|
|
fx_called = 'test_step'
|
2019-06-25 23:52:26 +00:00
|
|
|
else:
|
2019-06-26 00:12:41 +00:00
|
|
|
output = module.validation_step(*input, **kwargs)
|
2020-09-23 04:19:46 +00:00
|
|
|
fx_called = 'validation_step'
|
2020-04-02 15:46:20 +00:00
|
|
|
|
2020-09-23 04:19:46 +00:00
|
|
|
if output is None:
|
|
|
|
warn_missing_output(fx_called)
|
|
|
|
|
|
|
|
if output is not None and (module.use_dp or module.use_ddp2):
|
2020-04-02 15:46:20 +00:00
|
|
|
auto_squeeze_dim_zeros(output)
|
2019-06-25 23:54:28 +00:00
|
|
|
# ---------------
|
|
|
|
|
2019-06-25 23:52:26 +00:00
|
|
|
with lock:
|
|
|
|
results[i] = output
|
2020-05-26 23:04:42 +00:00
|
|
|
except Exception as ex:
|
2019-06-25 23:52:26 +00:00
|
|
|
with lock:
|
2020-05-26 23:04:42 +00:00
|
|
|
results[i] = ex
|
2019-06-25 23:52:26 +00:00
|
|
|
|
2019-10-04 19:07:54 +00:00
|
|
|
# TODO: fix hack (maybe not a hack)
|
|
|
|
# make sure each module knows what training state it's in...
|
|
|
|
# fixes weird bug where copies are out of sync
|
|
|
|
root_m = modules[0]
|
|
|
|
for m in modules[1:]:
|
|
|
|
m.training = root_m.training
|
|
|
|
m.testing = root_m.testing
|
|
|
|
|
2019-06-25 23:52:26 +00:00
|
|
|
if len(modules) > 1:
|
|
|
|
threads = [threading.Thread(target=_worker,
|
|
|
|
args=(i, module, input, kwargs, device))
|
|
|
|
for i, (module, input, kwargs, device) in
|
|
|
|
enumerate(zip(modules, inputs, kwargs_tup, devices))]
|
|
|
|
|
|
|
|
for thread in threads:
|
|
|
|
thread.start()
|
|
|
|
for thread in threads:
|
|
|
|
thread.join()
|
|
|
|
else:
|
|
|
|
_worker(0, modules[0], inputs[0], kwargs_tup[0], devices[0])
|
|
|
|
|
|
|
|
outputs = []
|
|
|
|
for i in range(len(inputs)):
|
|
|
|
output = results[i]
|
|
|
|
if isinstance(output, Exception):
|
|
|
|
raise output
|
|
|
|
outputs.append(output)
|
2019-07-03 20:43:05 +00:00
|
|
|
return outputs
|
2020-04-02 15:46:20 +00:00
|
|
|
|
|
|
|
|
|
|
|
def auto_squeeze_dim_zeros(output):
|
|
|
|
"""
|
|
|
|
In DP or DDP2 we need to unsqueeze dim 0
|
|
|
|
:param output:
|
|
|
|
:return:
|
|
|
|
"""
|
2020-09-23 04:19:46 +00:00
|
|
|
if isinstance(output, torch.Tensor):
|
|
|
|
output = output.unsqueeze(0)
|
|
|
|
return output
|
|
|
|
|
2020-04-02 15:46:20 +00:00
|
|
|
for k, v in output.items():
|
|
|
|
if not isinstance(v, torch.Tensor):
|
|
|
|
continue
|
|
|
|
|
|
|
|
is_scalar = v.dim() == 0
|
|
|
|
if is_scalar:
|
|
|
|
output[k] = output[k].unsqueeze(0)
|