From bb7356bcaa91af6df73a16822603ec97834a2f2e Mon Sep 17 00:00:00 2001 From: William Falcon Date: Thu, 5 Mar 2020 18:58:23 -0500 Subject: [PATCH] Docs12 (#1057) * docs * docs --- pytorch_lightning/core/__init__.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/pytorch_lightning/core/__init__.py b/pytorch_lightning/core/__init__.py index 1225e489a9..8cf7e9ade8 100644 --- a/pytorch_lightning/core/__init__.py +++ b/pytorch_lightning/core/__init__.py @@ -9,7 +9,7 @@ Notice a few things. 1. It's the SAME code. 2. The PyTorch code IS NOT abstracted - just organized. - 3. All the other code that didn't go in the LightningModule has been automated for you by the trainer + 3. All the other code that not in the LightningModule has been automated for you by the trainer .. code-block:: python net = Net() @@ -101,7 +101,7 @@ Which you can train by doing: trainer.fit(model) ---- +---------- Training loop structure ----------------------- @@ -181,7 +181,7 @@ don't run your test data by accident. Instead you have to explicitly call: trainer = Trainer() trainer.test(model) ---- +---------- Training_step_end method ------------------------ @@ -211,7 +211,7 @@ which allows you to operate on the pieces of the batch # like calculate validation set accuracy or loss training_epoch_end(val_outs) ---- +---------- Remove cuda calls ----------------- @@ -230,7 +230,7 @@ When you init a new tensor in your code, just use type_as z = sample_noise() z = z.type_as(x.type()) ---- +---------- Data preparation ----------------