From a78709751a6c1ced1a6b2abf45b94e6d4db75b66 Mon Sep 17 00:00:00 2001 From: Wei Ji <23487320+weiji14@users.noreply.github.com> Date: Sat, 31 Jul 2021 01:56:17 +1200 Subject: [PATCH] Reverse width, height to height, width in docs (#8612) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Carlos MocholĂ­ --- docs/source/starter/introduction_guide.rst | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/docs/source/starter/introduction_guide.rst b/docs/source/starter/introduction_guide.rst index 8e806bfd16..500c04c03e 100644 --- a/docs/source/starter/introduction_guide.rst +++ b/docs/source/starter/introduction_guide.rst @@ -79,13 +79,13 @@ Let's first start with the model. In this case, we'll design a 3-layer neural ne def __init__(self): super().__init__() - # mnist images are (1, 28, 28) (channels, width, height) + # mnist images are (1, 28, 28) (channels, height, width) self.layer_1 = nn.Linear(28 * 28, 128) self.layer_2 = nn.Linear(128, 256) self.layer_3 = nn.Linear(256, 10) def forward(self, x): - batch_size, channels, width, height = x.size() + batch_size, channels, height, width = x.size() # (b, 1, 28, 28) -> (b, 1*28*28) x = x.view(batch_size, -1) @@ -415,7 +415,7 @@ For clarity, we'll recall that the full LightningModule now looks like this. self.layer_3 = nn.Linear(256, 10) def forward(self, x): - batch_size, channels, width, height = x.size() + batch_size, channels, height, width = x.size() x = x.view(batch_size, -1) x = self.layer_1(x) x = F.relu(x) @@ -794,7 +794,7 @@ within it. class MNISTClassifier(LightningModule): def forward(self, x): - batch_size, channels, width, height = x.size() + batch_size, channels, height, width = x.size() x = x.view(batch_size, -1) x = self.layer_1(x) x = F.relu(x) @@ -822,7 +822,7 @@ In this case, we've set this LightningModel to predict logits. But we could also class MNISTRepresentator(LightningModule): def forward(self, x): - batch_size, channels, width, height = x.size() + batch_size, channels, height, width = x.size() x = x.view(batch_size, -1) x = self.layer_1(x) x1 = F.relu(x)