diff --git a/.github/workflows/code-checks.yml b/.github/workflows/code-checks.yml
index 8d143cfdc1..ad7f0002b1 100644
--- a/.github/workflows/code-checks.yml
+++ b/.github/workflows/code-checks.yml
@@ -49,4 +49,4 @@ jobs:
           pip list
 
       - name: Check typing
-        run: mypy --no-warn-unused-ignores
+        run: mypy
diff --git a/pyproject.toml b/pyproject.toml
index 43ef2fc019..7fcb08a439 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -127,17 +127,10 @@ files = [
 ]
 # This section is for folders with "-" as they are not valid python modules
 exclude = [
-    "src/lightning_app/__about__.py",
-    "src/lightning_app/__setup__.py",
-    "src/lightning_app/__version__.py",
-    "src/lightning_fabric/__about__.py",
-    "src/lightning_fabric/__setup__.py",
-    "src/lightning_fabric/__version__.py",
     "src/lightning/app/cli/app-template",
     "src/lightning/app/cli/component-template",
     "src/lightning/app/cli/pl-app-template",
     "src/lightning/app/cli/react-ui-template",
-    "src/lightning/app/launcher",
 ]
 install_types = "True"
 non_interactive = "True"
@@ -192,6 +185,9 @@ module = [
     "lightning.app.frontend.stream_lit",
     "lightning.app.frontend.utils",
     "lightning.app.frontend.web",
+    "lightning.app.launcher.launcher",
+    "lightning.app.launcher.lightning_backend",
+    "lightning.app.launcher.lightning_hybrid_backend",
     "lightning.app.pdb.pdb",
     "lightning.app.runners.backends.backend",
     "lightning.app.runners.backends.cloud",
@@ -240,9 +236,6 @@ module = [
     "lightning.app.utilities.state",
     "lightning.app.utilities.tracer",
     "lightning.app.utilities.tree",
-    "lightning.store.authentication",
-    "lightning.store.cloud_api",
-    "lightning.store.save",
     "lightning.store.utils",
 ]
 ignore_errors = "True"
diff --git a/requirements/data/data.txt b/requirements/data/data.txt
index 4813af9523..40d3f24d9a 100644
--- a/requirements/data/data.txt
+++ b/requirements/data/data.txt
@@ -5,4 +5,4 @@ lightning-utilities >=0.8.0, <0.10.0
 # to be able to include also 0.6 and preserve `>` needed for CI min version bypass
 torchdata >0.5.9, <=0.7.0
 # to be able to include also PL 2.0 and preserve `>` needed for CI min version bypass
-torch >0.14.0, <=2.1.0
+torch >0.14.0, <2.2.0
diff --git a/requirements/fabric/examples.txt b/requirements/fabric/examples.txt
index a09b12cfe6..c93942aded 100644
--- a/requirements/fabric/examples.txt
+++ b/requirements/fabric/examples.txt
@@ -1,6 +1,6 @@
 # NOTE: the upper bound for the package version is only set for CI stability, and it is dropped while installing this package
 #  in case you want to preserve/enforce restrictions on the latest compatible version, add "strict" as an in-line comment
 
-torchvision >=0.13.0, <0.16.0
+torchvision >=0.13.0, <0.17.0
 torchmetrics >=0.10.0, <1.3.0
 lightning-utilities >=0.8.0, <0.10.0
diff --git a/requirements/pytorch/examples.txt b/requirements/pytorch/examples.txt
index 19b9fb4f15..15cf3b7288 100644
--- a/requirements/pytorch/examples.txt
+++ b/requirements/pytorch/examples.txt
@@ -1,7 +1,7 @@
 # NOTE: the upper bound for the package version is only set for CI stability, and it is dropped while installing this package
 #  in case you want to preserve/enforce restrictions on the latest compatible version, add "strict" as an in-line comment
 
-torchvision >=0.13.0, <0.16.0
+torchvision >=0.13.0, <0.17.0
 gym[classic_control] >=0.17.0, <0.27.0
 ipython[all] <8.15.0
 torchmetrics >=0.10.0, <1.3.0
diff --git a/requirements/typing.txt b/requirements/typing.txt
index dbcc5328da..33fdbc2bd8 100644
--- a/requirements/typing.txt
+++ b/requirements/typing.txt
@@ -1,5 +1,5 @@
 mypy==1.5.1
-torch==2.0.1
+torch==2.1.0
 
 types-Markdown
 types-PyYAML
diff --git a/src/lightning/app/core/api.py b/src/lightning/app/core/api.py
index 0e8c34415e..11205ec7e8 100644
--- a/src/lightning/app/core/api.py
+++ b/src/lightning/app/core/api.py
@@ -196,9 +196,9 @@ if _is_starsessions_available():
 @fastapi_service.get("/api/v1/state", response_class=JSONResponse)
 async def get_state(
     response: Response,
-    x_lightning_type: Optional[str] = Header(None),  # type: ignore[assignment]
-    x_lightning_session_uuid: Optional[str] = Header(None),  # type: ignore[assignment]
-    x_lightning_session_id: Optional[str] = Header(None),  # type: ignore[assignment]
+    x_lightning_type: Optional[str] = Header(None),
+    x_lightning_session_uuid: Optional[str] = Header(None),
+    x_lightning_session_id: Optional[str] = Header(None),
 ) -> Mapping:
     if x_lightning_session_uuid is None:
         raise Exception("Missing X-Lightning-Session-UUID header")
@@ -246,8 +246,8 @@ async def get_layout() -> str:
 @fastapi_service.get("/api/v1/spec", response_class=JSONResponse)
 async def get_spec(
     response: Response,
-    x_lightning_session_uuid: Optional[str] = Header(None),  # type: ignore[assignment]
-    x_lightning_session_id: Optional[str] = Header(None),  # type: ignore[assignment]
+    x_lightning_session_uuid: Optional[str] = Header(None),
+    x_lightning_session_id: Optional[str] = Header(None),
 ) -> Union[List, Dict]:
     if x_lightning_session_uuid is None:
         raise Exception("Missing X-Lightning-Session-UUID header")
@@ -266,9 +266,9 @@ async def get_spec(
 async def post_delta(
     request: Request,
     response: Response,
-    x_lightning_type: Optional[str] = Header(None),  # type: ignore[assignment]
-    x_lightning_session_uuid: Optional[str] = Header(None),  # type: ignore[assignment]
-    x_lightning_session_id: Optional[str] = Header(None),  # type: ignore[assignment]
+    x_lightning_type: Optional[str] = Header(None),
+    x_lightning_session_uuid: Optional[str] = Header(None),
+    x_lightning_session_id: Optional[str] = Header(None),
 ) -> Optional[Dict]:
     """This endpoint is used to make an update to the app state using delta diff, mainly used by streamlit to update
     the state."""
@@ -292,9 +292,9 @@ async def post_delta(
 async def post_state(
     request: Request,
     response: Response,
-    x_lightning_type: Optional[str] = Header(None),  # type: ignore[assignment]
-    x_lightning_session_uuid: Optional[str] = Header(None),  # type: ignore[assignment]
-    x_lightning_session_id: Optional[str] = Header(None),  # type: ignore[assignment]
+    x_lightning_type: Optional[str] = Header(None),
+    x_lightning_session_uuid: Optional[str] = Header(None),
+    x_lightning_session_id: Optional[str] = Header(None),
 ) -> Optional[Dict]:
     if x_lightning_session_uuid is None:
         raise Exception("Missing X-Lightning-Session-UUID header")
diff --git a/src/lightning/fabric/plugins/collectives/torch_collective.py b/src/lightning/fabric/plugins/collectives/torch_collective.py
index 50b9a49975..506462bd54 100644
--- a/src/lightning/fabric/plugins/collectives/torch_collective.py
+++ b/src/lightning/fabric/plugins/collectives/torch_collective.py
@@ -84,10 +84,10 @@ class TorchCollective(Collective):
         return output_tensor_list
 
     def send(self, tensor: Tensor, dst: int, tag: int = 0) -> None:
-        dist.send(tensor, dst, tag=tag, group=self.group)  # type: ignore[arg-type]
+        dist.send(tensor, dst, tag=tag, group=self.group)
 
     def recv(self, tensor: Tensor, src: Optional[int] = None, tag: int = 0) -> Tensor:
-        dist.recv(tensor, src, tag=tag, group=self.group)  # type: ignore[arg-type]
+        dist.recv(tensor, src, tag=tag, group=self.group)
         return tensor
 
     def all_gather_object(self, object_list: List[Any], obj: Any) -> List[Any]:
diff --git a/src/lightning/fabric/plugins/precision/fsdp.py b/src/lightning/fabric/plugins/precision/fsdp.py
index 054aa23c64..ebdafcd651 100644
--- a/src/lightning/fabric/plugins/precision/fsdp.py
+++ b/src/lightning/fabric/plugins/precision/fsdp.py
@@ -143,7 +143,7 @@ class FSDPPrecision(Precision):
         if scaler is not None:
             if _optimizer_handles_unscaling(optimizer):
                 raise NotImplementedError("Gradient clipping is not implemented for optimizers handling the unscaling.")
-            scaler.unscale_(optimizer)  # type: ignore[arg-type]  # ShardedGradScaler has wrong type annotation
+            scaler.unscale_(optimizer)
 
     def state_dict(self) -> Dict[str, Any]:
         if self.scaler is not None:
diff --git a/src/lightning/fabric/strategies/xla_fsdp.py b/src/lightning/fabric/strategies/xla_fsdp.py
index d34eb5caeb..8518536e8c 100644
--- a/src/lightning/fabric/strategies/xla_fsdp.py
+++ b/src/lightning/fabric/strategies/xla_fsdp.py
@@ -277,7 +277,7 @@ class XLAFSDPStrategy(ParallelStrategy, _Sharded):
     ) -> Tensor:
         """Clip gradients by norm."""
         self.precision.unscale_gradients(optimizer)
-        return module.clip_grad_norm_(max_norm=max_norm, norm_type=norm_type)  # type: ignore[operator]
+        return module.clip_grad_norm_(max_norm=max_norm, norm_type=norm_type)
 
     def clip_gradients_value(self, module: Module, optimizer: Optimizer, clip_val: Union[float, int]) -> None:
         """Clip gradients by value."""
diff --git a/src/lightning/fabric/utilities/types.py b/src/lightning/fabric/utilities/types.py
index cf4d35b7e8..3192b8a269 100644
--- a/src/lightning/fabric/utilities/types.py
+++ b/src/lightning/fabric/utilities/types.py
@@ -12,12 +12,24 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 from pathlib import Path
-from typing import Any, Callable, Dict, Iterator, List, Optional, Protocol, TypeVar, Union, runtime_checkable
+from typing import (
+    Any,
+    Callable,
+    DefaultDict,
+    Dict,
+    Iterator,
+    List,
+    Optional,
+    Protocol,
+    TypeVar,
+    Union,
+    runtime_checkable,
+)
 
 import torch
 from torch import Tensor
 from torch.optim import Optimizer
-from typing_extensions import TypeAlias
+from typing_extensions import TypeAlias, overload
 
 from lightning.fabric.utilities.imports import _TORCH_GREATER_EQUAL_1_13, _TORCH_GREATER_EQUAL_2_0
 
@@ -117,7 +129,14 @@ class ReduceLROnPlateau(_Stateful[str], Protocol):
 class Steppable(Protocol):
     """To structurally type ``optimizer.step()``"""
 
-    # Inferred from `torch.optim.optimizer.pyi`
+    @overload
+    def step(self, closure: None = ...) -> None:
+        ...
+
+    @overload
+    def step(self, closure: Callable[[], float]) -> float:
+        ...
+
     def step(self, closure: Optional[Callable[[], float]] = ...) -> Optional[float]:
         ...
 
@@ -128,7 +147,7 @@ class Optimizable(Steppable, Protocol):
 
     param_groups: List[Dict[Any, Any]]
     defaults: Dict[Any, Any]
-    state: Dict[Any, Any]
+    state: DefaultDict[Tensor, Any]
 
     def state_dict(self) -> Dict[str, Dict[Any, Any]]:
         ...
diff --git a/src/lightning/pytorch/_graveyard/_torchmetrics.py b/src/lightning/pytorch/_graveyard/_torchmetrics.py
index 6bbc66887b..82e3ad2dcf 100644
--- a/src/lightning/pytorch/_graveyard/_torchmetrics.py
+++ b/src/lightning/pytorch/_graveyard/_torchmetrics.py
@@ -1,9 +1,9 @@
-import contextlib
 from typing import Callable
 
 import torchmetrics
 from lightning_utilities.core.imports import compare_version as _compare_version
 
+from lightning.pytorch.utilities.imports import _TORCHMETRICS_GREATER_EQUAL_0_8_0
 from lightning.pytorch.utilities.migration.utils import _patch_pl_to_mirror_if_necessary
 
 
@@ -12,13 +12,9 @@ def compare_version(package: str, op: Callable, version: str, use_base_version:
     return _compare_version(new_package, op, version, use_base_version)
 
 
-# patching is necessary, since up to v.0.7.3 torchmetrics has a hardcoded reference to lightning.pytorch,
-# which has to be redirected to the unified package:
-# https://github.com/Lightning-AI/metrics/blob/v0.7.3/torchmetrics/metric.py#L96
-with contextlib.suppress(AttributeError):
-    if hasattr(torchmetrics.utilities.imports, "_compare_version"):
-        torchmetrics.utilities.imports._compare_version = compare_version
-
-with contextlib.suppress(AttributeError):
-    if hasattr(torchmetrics.metric, "_compare_version"):
-        torchmetrics.metric._compare_version = compare_version
+if not _TORCHMETRICS_GREATER_EQUAL_0_8_0:
+    # up to v0.8.0 torchmetrics had a hardcoded reference to lightning.pytorch which has to be redirected to the
+    # unified package. this was removed in
+    # https://github.com/Lightning-AI/torchmetrics/commit/b225889b34b83272117b758cbc28772a5c2356d9
+    torchmetrics.utilities.imports._compare_version = compare_version
+    torchmetrics.metric._compare_version = compare_version
diff --git a/src/lightning/pytorch/callbacks/finetuning.py b/src/lightning/pytorch/callbacks/finetuning.py
index 5bb97bcf35..91b38f12e9 100644
--- a/src/lightning/pytorch/callbacks/finetuning.py
+++ b/src/lightning/pytorch/callbacks/finetuning.py
@@ -127,7 +127,7 @@ class BaseFinetuning(Callback):
 
         if isinstance(modules, Iterable):
             _flatten_modules = []
-            for m in modules:
+            for m in modules:  # type: ignore[union-attr]
                 _flatten_modules.extend(BaseFinetuning.flatten_modules(m))
 
             _modules = iter(_flatten_modules)
diff --git a/src/lightning/pytorch/core/optimizer.py b/src/lightning/pytorch/core/optimizer.py
index 080b2008f3..81fb648c63 100644
--- a/src/lightning/pytorch/core/optimizer.py
+++ b/src/lightning/pytorch/core/optimizer.py
@@ -13,7 +13,7 @@
 # limitations under the License.
 from contextlib import contextmanager
 from dataclasses import fields
-from typing import Any, Callable, Dict, Generator, List, Optional, Tuple, Union
+from typing import Any, Callable, Dict, Generator, List, Optional, Tuple, Union, overload
 from weakref import proxy
 
 import torch
@@ -393,9 +393,17 @@ class _MockOptimizer(Optimizer):
     def state_dict(self) -> Dict[str, Any]:
         return {}  # Return Empty
 
-    def step(self, closure: Optional[Callable] = None) -> None:
+    @overload
+    def step(self, closure: None = ...) -> None:
+        ...
+
+    @overload
+    def step(self, closure: Callable[[], float]) -> float:
+        ...
+
+    def step(self, closure: Optional[Callable[[], float]] = None) -> Optional[float]:
         if closure is not None:
-            closure()
+            return closure()
 
     def zero_grad(self, set_to_none: Optional[bool] = True) -> None:
         pass  # Do Nothing
diff --git a/src/lightning/pytorch/demos/transformer.py b/src/lightning/pytorch/demos/transformer.py
index 9cf7e226e1..6c2ad6defc 100644
--- a/src/lightning/pytorch/demos/transformer.py
+++ b/src/lightning/pytorch/demos/transformer.py
@@ -75,10 +75,10 @@ class PositionalEncoding(nn.Module):
         self.register_parameter("pe", nn.Parameter(pe, requires_grad=False))
 
     def reset_parameters(self) -> None:
-        self.pe.copy_(self._init_pos_encoding())  # type: ignore[operator]
+        self.pe.copy_(self._init_pos_encoding())
 
     def forward(self, x: Tensor) -> Tensor:
-        x + self.pe[: x.size(0), :]  # type: ignore[index]
+        x + self.pe[: x.size(0), :]
         return self.dropout(x)
 
     def _init_pos_encoding(self) -> Tensor:
diff --git a/src/lightning/pytorch/overrides/distributed.py b/src/lightning/pytorch/overrides/distributed.py
index b349f320bb..7c6578006c 100644
--- a/src/lightning/pytorch/overrides/distributed.py
+++ b/src/lightning/pytorch/overrides/distributed.py
@@ -163,9 +163,7 @@ def _register_ddp_comm_hook(
 def _sync_module_states(module: torch.nn.Module) -> None:
     """Taken from https://github.com/pytorch/pytorch/blob/v2.0.0/torch/nn/parallel/distributed.py#L675-L682."""
     parameters_to_ignore = (
-        set(module._ddp_params_and_buffers_to_ignore)  # type: ignore[arg-type]
-        if hasattr(module, "_ddp_params_and_buffers_to_ignore")
-        else set()
+        set(module._ddp_params_and_buffers_to_ignore) if hasattr(module, "_ddp_params_and_buffers_to_ignore") else set()
     )
     from torch.distributed.distributed_c10d import _get_default_group
     from torch.distributed.utils import _sync_module_states as torch_sync_module_states
diff --git a/src/lightning/pytorch/utilities/compile.py b/src/lightning/pytorch/utilities/compile.py
index e7f919b4f9..0b38c4d794 100644
--- a/src/lightning/pytorch/utilities/compile.py
+++ b/src/lightning/pytorch/utilities/compile.py
@@ -88,8 +88,8 @@ def to_uncompiled(model: Union["pl.LightningModule", "torch._dynamo.OptimizedMod
     from torch._dynamo import OptimizedModule
 
     if isinstance(model, OptimizedModule):
-        model = model._orig_mod
-        if not isinstance(model, pl.LightningModule):
+        original = model._orig_mod
+        if not isinstance(original, pl.LightningModule):
             raise TypeError(
                 f"Unexpected error, the wrapped model should be a LightningModule, found {type(model).__name__}"
             )
@@ -99,20 +99,21 @@ def to_uncompiled(model: Union["pl.LightningModule", "torch._dynamo.OptimizedMod
             raise ValueError(
                 "`model` is required to be a compiled LightningModule. Found a non-compiled LightningModule instead."
             )
+        original = model
 
     else:
         raise ValueError("`model` must either be an instance of OptimizedModule or LightningModule")
 
-    ctx = model._compiler_ctx
+    ctx = original._compiler_ctx
     if ctx is not None:
-        model.forward = ctx["original_forward"]  # type: ignore[method-assign]
-        model.training_step = ctx["original_training_step"]  # type: ignore[method-assign]
-        model.validation_step = ctx["original_validation_step"]  # type: ignore[method-assign]
-        model.test_step = ctx["original_test_step"]  # type: ignore[method-assign]
-        model.predict_step = ctx["original_predict_step"]  # type: ignore[method-assign]
-        model._compiler_ctx = None
+        original.forward = ctx["original_forward"]  # type: ignore[method-assign]
+        original.training_step = ctx["original_training_step"]  # type: ignore[method-assign]
+        original.validation_step = ctx["original_validation_step"]  # type: ignore[method-assign]
+        original.test_step = ctx["original_test_step"]  # type: ignore[method-assign]
+        original.predict_step = ctx["original_predict_step"]  # type: ignore[method-assign]
+        original._compiler_ctx = None
 
-    return model
+    return original
 
 
 def _maybe_unwrap_optimized(model: object) -> "pl.LightningModule":
diff --git a/src/lightning/pytorch/utilities/imports.py b/src/lightning/pytorch/utilities/imports.py
index 159b0b7758..d872e27153 100644
--- a/src/lightning/pytorch/utilities/imports.py
+++ b/src/lightning/pytorch/utilities/imports.py
@@ -19,6 +19,7 @@ from lightning_utilities.core.imports import RequirementCache, package_available
 from lightning_utilities.core.rank_zero import rank_zero_warn
 
 _PYTHON_GREATER_EQUAL_3_11_0 = (sys.version_info.major, sys.version_info.minor) >= (3, 11)
+_TORCHMETRICS_GREATER_EQUAL_0_8_0 = RequirementCache("torchmetrics>=0.8.0")
 _TORCHMETRICS_GREATER_EQUAL_0_9_1 = RequirementCache("torchmetrics>=0.9.1")
 _TORCHMETRICS_GREATER_EQUAL_0_11 = RequirementCache("torchmetrics>=0.11.0")  # using new API with task
 _TORCHMETRICS_GREATER_EQUAL_1_0_0 = RequirementCache("torchmetrics>=1.0.0")