docs: fix `y` var for compute loss (#19465)
* Update train_model_basic.rst Loss should be calculated by comparing `x_hat` to `y`, not `x` * Update evaluation_basic.rst Error in the documentation where training loss is calculated as `F.mse_loss(x_hat, x)`. Changed to `F.mse_loss(x_hat, y)` * Update notebooks.rst Doc fix * Update README.md Doc fix * Update introduction.rst Doc fix * Update train.py Doc fix * Update README.md Doc fix * Revert "Update train.py" This reverts commit1934760056
. * Revert "Update introduction.rst" This reverts commit0cec613c07
. * Revert "Update README.md" This reverts commitfda3fbf32d
. * Revert "Update notebooks.rst" This reverts commit183a2a9e33
. * Revert "Update evaluation_basic.rst" This reverts commit0eefe39aea
. * Revert "Update train_model_basic.rst" This reverts commitb98e1d2414
. * remove the variable y when unused in docs * Update README.md --------- Co-authored-by: Jirka Borovec <6035284+Borda@users.noreply.github.com>
This commit is contained in:
parent
fe535970a9
commit
88869ad482
|
@ -146,7 +146,7 @@ class LitAutoEncoder(L.LightningModule):
|
|||
|
||||
def training_step(self, batch, batch_idx):
|
||||
# training_step defines the train loop. It is independent of forward
|
||||
x, y = batch
|
||||
x, _ = batch
|
||||
x = x.view(x.size(0), -1)
|
||||
z = self.encoder(x)
|
||||
x_hat = self.decoder(z)
|
||||
|
|
|
@ -25,7 +25,7 @@ class LitAutoEncoder(pl.LightningModule):
|
|||
def training_step(self, batch, batch_idx):
|
||||
# training_step defines the train loop.
|
||||
# It is independent of forward
|
||||
x, y = batch
|
||||
x, _ = batch
|
||||
x = x.view(x.size(0), -1)
|
||||
z = self.encoder(x)
|
||||
x_hat = self.decoder(z)
|
||||
|
|
|
@ -45,7 +45,7 @@ To add a test loop, implement the **test_step** method of the LightningModule
|
|||
|
||||
def test_step(self, batch, batch_idx):
|
||||
# this is the test loop
|
||||
x, y = batch
|
||||
x, _ = batch
|
||||
x = x.view(x.size(0), -1)
|
||||
z = self.encoder(x)
|
||||
x_hat = self.decoder(z)
|
||||
|
@ -105,7 +105,7 @@ To add a validation loop, implement the **validation_step** method of the Lightn
|
|||
|
||||
def validation_step(self, batch, batch_idx):
|
||||
# this is the validation loop
|
||||
x, y = batch
|
||||
x, _ = batch
|
||||
x = x.view(x.size(0), -1)
|
||||
z = self.encoder(x)
|
||||
x_hat = self.decoder(z)
|
||||
|
|
|
@ -57,7 +57,7 @@ Paste the following code block into a notebook cell:
|
|||
self.decoder = decoder
|
||||
|
||||
def training_step(self, batch, batch_idx):
|
||||
x, y = batch
|
||||
x, _ = batch
|
||||
x = x.view(x.size(0), -1)
|
||||
z = self.encoder(x)
|
||||
x_hat = self.decoder(z)
|
||||
|
|
|
@ -68,7 +68,7 @@ The LightningModule is the full **recipe** that defines how your nn.Modules inte
|
|||
|
||||
def training_step(self, batch, batch_idx):
|
||||
# training_step defines the train loop.
|
||||
x, y = batch
|
||||
x, _ = batch
|
||||
x = x.view(x.size(0), -1)
|
||||
z = self.encoder(x)
|
||||
x_hat = self.decoder(z)
|
||||
|
|
|
@ -129,7 +129,7 @@ A LightningModule enables your PyTorch nn.Module to play together in complex way
|
|||
def training_step(self, batch, batch_idx):
|
||||
# training_step defines the train loop.
|
||||
# it is independent of forward
|
||||
x, y = batch
|
||||
x, _ = batch
|
||||
x = x.view(x.size(0), -1)
|
||||
z = self.encoder(x)
|
||||
x_hat = self.decoder(z)
|
||||
|
|
|
@ -174,7 +174,7 @@ class LitAutoEncoder(pl.LightningModule):
|
|||
|
||||
def training_step(self, batch, batch_idx):
|
||||
# training_step defines the train loop. It is independent of forward
|
||||
x, y = batch
|
||||
x, _ = batch
|
||||
x = x.view(x.size(0), -1)
|
||||
z = self.encoder(x)
|
||||
x_hat = self.decoder(z)
|
||||
|
|
Loading…
Reference in New Issue