diff --git a/docs/source/multi_gpu.rst b/docs/source/multi_gpu.rst index b31490e000..706a977290 100644 --- a/docs/source/multi_gpu.rst +++ b/docs/source/multi_gpu.rst @@ -67,7 +67,7 @@ register the tensor as a buffer in your modules's `__init__` method with :meth:` Remove samplers ^^^^^^^^^^^^^^^ -In PyTorch, you must use `torch.nn.DistributedSampler` for multi-node or TPU training in PyTorch. The +In PyTorch, you must use `torch.nn.DistributedSampler` for multi-node or TPU training. The sampler makes sure each GPU sees the appropriate part of your data. .. testcode:: diff --git a/pytorch_lightning/core/lightning.py b/pytorch_lightning/core/lightning.py index d11c8ed9e7..d23cde63f4 100644 --- a/pytorch_lightning/core/lightning.py +++ b/pytorch_lightning/core/lightning.py @@ -1670,7 +1670,7 @@ class LightningModule(ABC, DeviceDtypeModuleMixin, GradInformation, ModelIO, Mod >>> class ManuallyArgsModel(LightningModule): ... def __init__(self, arg1, arg2, arg3): ... super().__init__() - ... # manually assine arguments + ... # manually assign arguments ... self.save_hyperparameters('arg1', 'arg3') ... def forward(self, *args, **kwargs): ... ...