From 2232eb35d1a31d72e5cdc41751b30e621ab207ee Mon Sep 17 00:00:00 2001 From: Ibraheem Moosa Date: Sat, 14 Mar 2020 23:02:31 +0600 Subject: [PATCH] Fix examples that uses type_as (#1129) --- docs/source/multi_gpu.rst | 2 +- pytorch_lightning/core/__init__.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/source/multi_gpu.rst b/docs/source/multi_gpu.rst index fdd5a92b46..baf99d87d1 100644 --- a/docs/source/multi_gpu.rst +++ b/docs/source/multi_gpu.rst @@ -38,7 +38,7 @@ This will make your code scale to any arbitrary number of GPUs or TPUs with Ligh # with lightning def forward(self, x): z = torch.Tensor(2, 3) - z = z.type_as(x.type()) + z = z.type_as(x) Remove samplers ^^^^^^^^^^^^^^^ diff --git a/pytorch_lightning/core/__init__.py b/pytorch_lightning/core/__init__.py index fac76ffd61..9cd75606b5 100644 --- a/pytorch_lightning/core/__init__.py +++ b/pytorch_lightning/core/__init__.py @@ -228,7 +228,7 @@ When you init a new tensor in your code, just use type_as # put the z on the appropriate gpu or tpu core z = sample_noise() - z = z.type_as(x.type()) + z = z.type_as(x) ----------