docs: fix `y` var for compute loss (#19465)

* Update train_model_basic.rst

Loss should be calculated by comparing `x_hat` to `y`, not `x`

* Update evaluation_basic.rst

Error in the documentation where training loss is calculated as `F.mse_loss(x_hat, x)`. Changed to `F.mse_loss(x_hat, y)`

* Update notebooks.rst

Doc fix

* Update README.md

Doc fix

* Update introduction.rst

Doc fix

* Update train.py

Doc fix

* Update README.md

Doc fix

* Revert "Update train.py"

This reverts commit 1934760056.

* Revert "Update introduction.rst"

This reverts commit 0cec613c07.

* Revert "Update README.md"

This reverts commit fda3fbf32d.

* Revert "Update notebooks.rst"

This reverts commit 183a2a9e33.

* Revert "Update evaluation_basic.rst"

This reverts commit 0eefe39aea.

* Revert "Update train_model_basic.rst"

This reverts commit b98e1d2414.

* remove the variable y when unused in docs

* Update README.md

---------

Co-authored-by: Jirka Borovec <6035284+Borda@users.noreply.github.com>
This commit is contained in:
Karthik Venkataramani 2024-03-15 14:56:28 -07:00 committed by GitHub
parent fe535970a9
commit 88869ad482
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
7 changed files with 8 additions and 8 deletions

View File

@ -146,7 +146,7 @@ class LitAutoEncoder(L.LightningModule):
def training_step(self, batch, batch_idx):
# training_step defines the train loop. It is independent of forward
x, y = batch
x, _ = batch
x = x.view(x.size(0), -1)
z = self.encoder(x)
x_hat = self.decoder(z)

View File

@ -25,7 +25,7 @@ class LitAutoEncoder(pl.LightningModule):
def training_step(self, batch, batch_idx):
# training_step defines the train loop.
# It is independent of forward
x, y = batch
x, _ = batch
x = x.view(x.size(0), -1)
z = self.encoder(x)
x_hat = self.decoder(z)

View File

@ -45,7 +45,7 @@ To add a test loop, implement the **test_step** method of the LightningModule
def test_step(self, batch, batch_idx):
# this is the test loop
x, y = batch
x, _ = batch
x = x.view(x.size(0), -1)
z = self.encoder(x)
x_hat = self.decoder(z)
@ -105,7 +105,7 @@ To add a validation loop, implement the **validation_step** method of the Lightn
def validation_step(self, batch, batch_idx):
# this is the validation loop
x, y = batch
x, _ = batch
x = x.view(x.size(0), -1)
z = self.encoder(x)
x_hat = self.decoder(z)

View File

@ -57,7 +57,7 @@ Paste the following code block into a notebook cell:
self.decoder = decoder
def training_step(self, batch, batch_idx):
x, y = batch
x, _ = batch
x = x.view(x.size(0), -1)
z = self.encoder(x)
x_hat = self.decoder(z)

View File

@ -68,7 +68,7 @@ The LightningModule is the full **recipe** that defines how your nn.Modules inte
def training_step(self, batch, batch_idx):
# training_step defines the train loop.
x, y = batch
x, _ = batch
x = x.view(x.size(0), -1)
z = self.encoder(x)
x_hat = self.decoder(z)

View File

@ -129,7 +129,7 @@ A LightningModule enables your PyTorch nn.Module to play together in complex way
def training_step(self, batch, batch_idx):
# training_step defines the train loop.
# it is independent of forward
x, y = batch
x, _ = batch
x = x.view(x.size(0), -1)
z = self.encoder(x)
x_hat = self.decoder(z)

View File

@ -174,7 +174,7 @@ class LitAutoEncoder(pl.LightningModule):
def training_step(self, batch, batch_idx):
# training_step defines the train loop. It is independent of forward
x, y = batch
x, _ = batch
x = x.view(x.size(0), -1)
z = self.encoder(x)
x_hat = self.decoder(z)