lightning/pytorch_lightning/root_module/memory.py

202 lines
5.4 KiB
Python
Raw Normal View History

2019-03-31 01:45:16 +00:00
import torch
import gc
import subprocess
import numpy as np
import pandas as pd
'''
Generates a summary of a model's layers and dimensionality
'''
class ModelSummary(object):
def __init__(self, model):
'''
Generates summaries of model layers and dimensions.
'''
self.model = model
self.in_sizes = []
self.out_sizes = []
self.summarize()
def __str__(self):
return self.summary.__str__()
def __repr__(self):
return self.summary.__str__()
def get_variable_sizes(self):
'''Run sample input through each layer to get output sizes'''
mods = list(self.model.modules())
in_sizes = []
out_sizes = []
2019-07-24 20:22:09 +00:00
input_ = self.model.example_input_array
2019-07-24 20:24:58 +00:00
if self.model.on_gpu:
input_ = input_.cuda(0)
2019-07-24 20:27:16 +00:00
if self.model.trainer.use_amp:
input_ = input_.half()
2019-07-24 20:28:55 +00:00
with torch.no_grad():
2019-07-24 20:27:16 +00:00
for i in range(1, len(mods)):
m = mods[i]
if type(input_) is list or type(input_) is tuple:
out = m(*input_)
else:
out = m(input_)
if type(input_) is tuple or type(input_) is list:
in_size = []
for x in input_:
if type(x) is list:
in_size.append(len(x))
else:
in_size.append(x.size())
else:
in_size = np.array(input_.size())
in_sizes.append(in_size)
if type(out) is tuple or type(out) is list:
out_size = np.asarray([x.size() for x in out])
else:
out_size = np.array(out.size())
out_sizes.append(out_size)
input_ = out
2019-03-31 01:45:16 +00:00
self.in_sizes = in_sizes
self.out_sizes = out_sizes
return
def get_layer_names(self):
'''Collect Layer Names'''
mods = list(self.model.named_modules())
names = []
layers = []
for m in mods[1:]:
names += [m[0]]
layers += [str(m[1].__class__)]
layer_types = [x.split('.')[-1][:-2] for x in layers]
self.layer_names = names
self.layer_types = layer_types
return
def get_parameter_sizes(self):
'''Get sizes of all parameters in `model`'''
mods = list(self.model.modules())
sizes = []
for i in range(1,len(mods)):
m = mods[i]
p = list(m.parameters())
modsz = []
for j in range(len(p)):
modsz.append(np.array(p[j].size()))
sizes.append(modsz)
self.param_sizes = sizes
return
def get_parameter_nums(self):
'''Get number of parameters in each layer'''
param_nums = []
for mod in self.param_sizes:
all_params = 0
for p in mod:
all_params += np.prod(p)
param_nums.append(all_params)
self.param_nums = param_nums
return
def make_summary(self):
'''
Makes a summary listing with:
Layer Name, Layer Type, Input Size, Output Size, Number of Parameters
'''
2019-07-24 20:30:27 +00:00
cols = ['Name', 'Type', 'Params']
if self.model.example_input_array is not None:
cols.extend(['In_sizes', 'Out_sizes'])
2019-07-24 20:31:55 +00:00
df = pd.DataFrame(np.zeros( (len(self.layer_names), len(cols))))
2019-07-24 20:30:27 +00:00
df.columns = cols
2019-03-31 01:45:16 +00:00
df['Name'] = self.layer_names
df['Type'] = self.layer_types
df['Params'] = self.param_nums
2019-07-24 20:19:19 +00:00
2019-07-24 20:23:30 +00:00
if self.model.example_input_array is not None:
2019-07-24 20:30:27 +00:00
2019-07-24 20:19:19 +00:00
df['In_sizes'] = self.in_sizes
df['Out_sizes'] = self.out_sizes
2019-03-31 01:45:16 +00:00
self.summary = df
return
def summarize(self):
self.get_layer_names()
self.get_parameter_sizes()
self.get_parameter_nums()
2019-07-24 20:19:19 +00:00
2019-07-24 20:23:30 +00:00
if self.model.example_input_array is not None:
2019-07-24 20:19:19 +00:00
self.get_variable_sizes()
2019-03-31 01:45:16 +00:00
self.make_summary()
2019-07-24 21:47:51 +00:00
def print_mem_stack(): # pragma: no cover
2019-03-31 01:45:16 +00:00
for obj in gc.get_objects():
try:
if torch.is_tensor(obj) or (hasattr(obj, 'data') and torch.is_tensor(obj.data)):
print(type(obj), obj.size())
except Exception as e:
pass
2019-07-24 21:47:51 +00:00
def count_mem_items(): # pragma: no cover
2019-03-31 01:45:16 +00:00
nb_params = 0
nb_tensors = 0
for obj in gc.get_objects():
try:
if torch.is_tensor(obj) or (hasattr(obj, 'data') and torch.is_tensor(obj.data)):
obj_type = str(type(obj))
if 'parameter' in obj_type:
nb_params += 1
else:
nb_tensors += 1
except Exception as e:
pass
return nb_params, nb_tensors
def get_gpu_memory_map():
"""Get the current gpu usage.
Returns
-------
usage: dict
Keys are device ids as integers.
Values are memory usage as integers in MB.
"""
result = subprocess.check_output(
[
'nvidia-smi', '--query-gpu=memory.used',
'--format=csv,nounits,noheader'
], encoding='utf-8')
# Convert lines into a dictionary
gpu_memory = [int(x) for x in result.strip().split('\n')]
gpu_memory_map = {}
for k, v in zip(range(len(gpu_memory)), gpu_memory):
k = f'gpu_{k}'
gpu_memory_map[k] = v
return gpu_memory_map