Bump torch from 2.0.1 to 2.1.0 in /requirements (#18752)
Co-authored-by: Jirka <jirka.borovec@seznam.cz> Co-authored-by: Carlos Mocholí <carlossmocholi@gmail.com>
This commit is contained in:
parent
c5a731c3cd
commit
73f5df0a0a
|
@ -49,4 +49,4 @@ jobs:
|
|||
pip list
|
||||
|
||||
- name: Check typing
|
||||
run: mypy --no-warn-unused-ignores
|
||||
run: mypy
|
||||
|
|
|
@ -127,17 +127,10 @@ files = [
|
|||
]
|
||||
# This section is for folders with "-" as they are not valid python modules
|
||||
exclude = [
|
||||
"src/lightning_app/__about__.py",
|
||||
"src/lightning_app/__setup__.py",
|
||||
"src/lightning_app/__version__.py",
|
||||
"src/lightning_fabric/__about__.py",
|
||||
"src/lightning_fabric/__setup__.py",
|
||||
"src/lightning_fabric/__version__.py",
|
||||
"src/lightning/app/cli/app-template",
|
||||
"src/lightning/app/cli/component-template",
|
||||
"src/lightning/app/cli/pl-app-template",
|
||||
"src/lightning/app/cli/react-ui-template",
|
||||
"src/lightning/app/launcher",
|
||||
]
|
||||
install_types = "True"
|
||||
non_interactive = "True"
|
||||
|
@ -192,6 +185,9 @@ module = [
|
|||
"lightning.app.frontend.stream_lit",
|
||||
"lightning.app.frontend.utils",
|
||||
"lightning.app.frontend.web",
|
||||
"lightning.app.launcher.launcher",
|
||||
"lightning.app.launcher.lightning_backend",
|
||||
"lightning.app.launcher.lightning_hybrid_backend",
|
||||
"lightning.app.pdb.pdb",
|
||||
"lightning.app.runners.backends.backend",
|
||||
"lightning.app.runners.backends.cloud",
|
||||
|
@ -240,9 +236,6 @@ module = [
|
|||
"lightning.app.utilities.state",
|
||||
"lightning.app.utilities.tracer",
|
||||
"lightning.app.utilities.tree",
|
||||
"lightning.store.authentication",
|
||||
"lightning.store.cloud_api",
|
||||
"lightning.store.save",
|
||||
"lightning.store.utils",
|
||||
]
|
||||
ignore_errors = "True"
|
||||
|
|
|
@ -5,4 +5,4 @@ lightning-utilities >=0.8.0, <0.10.0
|
|||
# to be able to include also 0.6 and preserve `>` needed for CI min version bypass
|
||||
torchdata >0.5.9, <=0.7.0
|
||||
# to be able to include also PL 2.0 and preserve `>` needed for CI min version bypass
|
||||
torch >0.14.0, <=2.1.0
|
||||
torch >0.14.0, <2.2.0
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
# NOTE: the upper bound for the package version is only set for CI stability, and it is dropped while installing this package
|
||||
# in case you want to preserve/enforce restrictions on the latest compatible version, add "strict" as an in-line comment
|
||||
|
||||
torchvision >=0.13.0, <0.16.0
|
||||
torchvision >=0.13.0, <0.17.0
|
||||
torchmetrics >=0.10.0, <1.3.0
|
||||
lightning-utilities >=0.8.0, <0.10.0
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
# NOTE: the upper bound for the package version is only set for CI stability, and it is dropped while installing this package
|
||||
# in case you want to preserve/enforce restrictions on the latest compatible version, add "strict" as an in-line comment
|
||||
|
||||
torchvision >=0.13.0, <0.16.0
|
||||
torchvision >=0.13.0, <0.17.0
|
||||
gym[classic_control] >=0.17.0, <0.27.0
|
||||
ipython[all] <8.15.0
|
||||
torchmetrics >=0.10.0, <1.3.0
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
mypy==1.5.1
|
||||
torch==2.0.1
|
||||
torch==2.1.0
|
||||
|
||||
types-Markdown
|
||||
types-PyYAML
|
||||
|
|
|
@ -196,9 +196,9 @@ if _is_starsessions_available():
|
|||
@fastapi_service.get("/api/v1/state", response_class=JSONResponse)
|
||||
async def get_state(
|
||||
response: Response,
|
||||
x_lightning_type: Optional[str] = Header(None), # type: ignore[assignment]
|
||||
x_lightning_session_uuid: Optional[str] = Header(None), # type: ignore[assignment]
|
||||
x_lightning_session_id: Optional[str] = Header(None), # type: ignore[assignment]
|
||||
x_lightning_type: Optional[str] = Header(None),
|
||||
x_lightning_session_uuid: Optional[str] = Header(None),
|
||||
x_lightning_session_id: Optional[str] = Header(None),
|
||||
) -> Mapping:
|
||||
if x_lightning_session_uuid is None:
|
||||
raise Exception("Missing X-Lightning-Session-UUID header")
|
||||
|
@ -246,8 +246,8 @@ async def get_layout() -> str:
|
|||
@fastapi_service.get("/api/v1/spec", response_class=JSONResponse)
|
||||
async def get_spec(
|
||||
response: Response,
|
||||
x_lightning_session_uuid: Optional[str] = Header(None), # type: ignore[assignment]
|
||||
x_lightning_session_id: Optional[str] = Header(None), # type: ignore[assignment]
|
||||
x_lightning_session_uuid: Optional[str] = Header(None),
|
||||
x_lightning_session_id: Optional[str] = Header(None),
|
||||
) -> Union[List, Dict]:
|
||||
if x_lightning_session_uuid is None:
|
||||
raise Exception("Missing X-Lightning-Session-UUID header")
|
||||
|
@ -266,9 +266,9 @@ async def get_spec(
|
|||
async def post_delta(
|
||||
request: Request,
|
||||
response: Response,
|
||||
x_lightning_type: Optional[str] = Header(None), # type: ignore[assignment]
|
||||
x_lightning_session_uuid: Optional[str] = Header(None), # type: ignore[assignment]
|
||||
x_lightning_session_id: Optional[str] = Header(None), # type: ignore[assignment]
|
||||
x_lightning_type: Optional[str] = Header(None),
|
||||
x_lightning_session_uuid: Optional[str] = Header(None),
|
||||
x_lightning_session_id: Optional[str] = Header(None),
|
||||
) -> Optional[Dict]:
|
||||
"""This endpoint is used to make an update to the app state using delta diff, mainly used by streamlit to update
|
||||
the state."""
|
||||
|
@ -292,9 +292,9 @@ async def post_delta(
|
|||
async def post_state(
|
||||
request: Request,
|
||||
response: Response,
|
||||
x_lightning_type: Optional[str] = Header(None), # type: ignore[assignment]
|
||||
x_lightning_session_uuid: Optional[str] = Header(None), # type: ignore[assignment]
|
||||
x_lightning_session_id: Optional[str] = Header(None), # type: ignore[assignment]
|
||||
x_lightning_type: Optional[str] = Header(None),
|
||||
x_lightning_session_uuid: Optional[str] = Header(None),
|
||||
x_lightning_session_id: Optional[str] = Header(None),
|
||||
) -> Optional[Dict]:
|
||||
if x_lightning_session_uuid is None:
|
||||
raise Exception("Missing X-Lightning-Session-UUID header")
|
||||
|
|
|
@ -84,10 +84,10 @@ class TorchCollective(Collective):
|
|||
return output_tensor_list
|
||||
|
||||
def send(self, tensor: Tensor, dst: int, tag: int = 0) -> None:
|
||||
dist.send(tensor, dst, tag=tag, group=self.group) # type: ignore[arg-type]
|
||||
dist.send(tensor, dst, tag=tag, group=self.group)
|
||||
|
||||
def recv(self, tensor: Tensor, src: Optional[int] = None, tag: int = 0) -> Tensor:
|
||||
dist.recv(tensor, src, tag=tag, group=self.group) # type: ignore[arg-type]
|
||||
dist.recv(tensor, src, tag=tag, group=self.group)
|
||||
return tensor
|
||||
|
||||
def all_gather_object(self, object_list: List[Any], obj: Any) -> List[Any]:
|
||||
|
|
|
@ -143,7 +143,7 @@ class FSDPPrecision(Precision):
|
|||
if scaler is not None:
|
||||
if _optimizer_handles_unscaling(optimizer):
|
||||
raise NotImplementedError("Gradient clipping is not implemented for optimizers handling the unscaling.")
|
||||
scaler.unscale_(optimizer) # type: ignore[arg-type] # ShardedGradScaler has wrong type annotation
|
||||
scaler.unscale_(optimizer)
|
||||
|
||||
def state_dict(self) -> Dict[str, Any]:
|
||||
if self.scaler is not None:
|
||||
|
|
|
@ -277,7 +277,7 @@ class XLAFSDPStrategy(ParallelStrategy, _Sharded):
|
|||
) -> Tensor:
|
||||
"""Clip gradients by norm."""
|
||||
self.precision.unscale_gradients(optimizer)
|
||||
return module.clip_grad_norm_(max_norm=max_norm, norm_type=norm_type) # type: ignore[operator]
|
||||
return module.clip_grad_norm_(max_norm=max_norm, norm_type=norm_type)
|
||||
|
||||
def clip_gradients_value(self, module: Module, optimizer: Optimizer, clip_val: Union[float, int]) -> None:
|
||||
"""Clip gradients by value."""
|
||||
|
|
|
@ -12,12 +12,24 @@
|
|||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
from pathlib import Path
|
||||
from typing import Any, Callable, Dict, Iterator, List, Optional, Protocol, TypeVar, Union, runtime_checkable
|
||||
from typing import (
|
||||
Any,
|
||||
Callable,
|
||||
DefaultDict,
|
||||
Dict,
|
||||
Iterator,
|
||||
List,
|
||||
Optional,
|
||||
Protocol,
|
||||
TypeVar,
|
||||
Union,
|
||||
runtime_checkable,
|
||||
)
|
||||
|
||||
import torch
|
||||
from torch import Tensor
|
||||
from torch.optim import Optimizer
|
||||
from typing_extensions import TypeAlias
|
||||
from typing_extensions import TypeAlias, overload
|
||||
|
||||
from lightning.fabric.utilities.imports import _TORCH_GREATER_EQUAL_1_13, _TORCH_GREATER_EQUAL_2_0
|
||||
|
||||
|
@ -117,7 +129,14 @@ class ReduceLROnPlateau(_Stateful[str], Protocol):
|
|||
class Steppable(Protocol):
|
||||
"""To structurally type ``optimizer.step()``"""
|
||||
|
||||
# Inferred from `torch.optim.optimizer.pyi`
|
||||
@overload
|
||||
def step(self, closure: None = ...) -> None:
|
||||
...
|
||||
|
||||
@overload
|
||||
def step(self, closure: Callable[[], float]) -> float:
|
||||
...
|
||||
|
||||
def step(self, closure: Optional[Callable[[], float]] = ...) -> Optional[float]:
|
||||
...
|
||||
|
||||
|
@ -128,7 +147,7 @@ class Optimizable(Steppable, Protocol):
|
|||
|
||||
param_groups: List[Dict[Any, Any]]
|
||||
defaults: Dict[Any, Any]
|
||||
state: Dict[Any, Any]
|
||||
state: DefaultDict[Tensor, Any]
|
||||
|
||||
def state_dict(self) -> Dict[str, Dict[Any, Any]]:
|
||||
...
|
||||
|
|
|
@ -1,9 +1,9 @@
|
|||
import contextlib
|
||||
from typing import Callable
|
||||
|
||||
import torchmetrics
|
||||
from lightning_utilities.core.imports import compare_version as _compare_version
|
||||
|
||||
from lightning.pytorch.utilities.imports import _TORCHMETRICS_GREATER_EQUAL_0_8_0
|
||||
from lightning.pytorch.utilities.migration.utils import _patch_pl_to_mirror_if_necessary
|
||||
|
||||
|
||||
|
@ -12,13 +12,9 @@ def compare_version(package: str, op: Callable, version: str, use_base_version:
|
|||
return _compare_version(new_package, op, version, use_base_version)
|
||||
|
||||
|
||||
# patching is necessary, since up to v.0.7.3 torchmetrics has a hardcoded reference to lightning.pytorch,
|
||||
# which has to be redirected to the unified package:
|
||||
# https://github.com/Lightning-AI/metrics/blob/v0.7.3/torchmetrics/metric.py#L96
|
||||
with contextlib.suppress(AttributeError):
|
||||
if hasattr(torchmetrics.utilities.imports, "_compare_version"):
|
||||
torchmetrics.utilities.imports._compare_version = compare_version
|
||||
|
||||
with contextlib.suppress(AttributeError):
|
||||
if hasattr(torchmetrics.metric, "_compare_version"):
|
||||
torchmetrics.metric._compare_version = compare_version
|
||||
if not _TORCHMETRICS_GREATER_EQUAL_0_8_0:
|
||||
# up to v0.8.0 torchmetrics had a hardcoded reference to lightning.pytorch which has to be redirected to the
|
||||
# unified package. this was removed in
|
||||
# https://github.com/Lightning-AI/torchmetrics/commit/b225889b34b83272117b758cbc28772a5c2356d9
|
||||
torchmetrics.utilities.imports._compare_version = compare_version
|
||||
torchmetrics.metric._compare_version = compare_version
|
||||
|
|
|
@ -127,7 +127,7 @@ class BaseFinetuning(Callback):
|
|||
|
||||
if isinstance(modules, Iterable):
|
||||
_flatten_modules = []
|
||||
for m in modules:
|
||||
for m in modules: # type: ignore[union-attr]
|
||||
_flatten_modules.extend(BaseFinetuning.flatten_modules(m))
|
||||
|
||||
_modules = iter(_flatten_modules)
|
||||
|
|
|
@ -13,7 +13,7 @@
|
|||
# limitations under the License.
|
||||
from contextlib import contextmanager
|
||||
from dataclasses import fields
|
||||
from typing import Any, Callable, Dict, Generator, List, Optional, Tuple, Union
|
||||
from typing import Any, Callable, Dict, Generator, List, Optional, Tuple, Union, overload
|
||||
from weakref import proxy
|
||||
|
||||
import torch
|
||||
|
@ -393,9 +393,17 @@ class _MockOptimizer(Optimizer):
|
|||
def state_dict(self) -> Dict[str, Any]:
|
||||
return {} # Return Empty
|
||||
|
||||
def step(self, closure: Optional[Callable] = None) -> None:
|
||||
@overload
|
||||
def step(self, closure: None = ...) -> None:
|
||||
...
|
||||
|
||||
@overload
|
||||
def step(self, closure: Callable[[], float]) -> float:
|
||||
...
|
||||
|
||||
def step(self, closure: Optional[Callable[[], float]] = None) -> Optional[float]:
|
||||
if closure is not None:
|
||||
closure()
|
||||
return closure()
|
||||
|
||||
def zero_grad(self, set_to_none: Optional[bool] = True) -> None:
|
||||
pass # Do Nothing
|
||||
|
|
|
@ -75,10 +75,10 @@ class PositionalEncoding(nn.Module):
|
|||
self.register_parameter("pe", nn.Parameter(pe, requires_grad=False))
|
||||
|
||||
def reset_parameters(self) -> None:
|
||||
self.pe.copy_(self._init_pos_encoding()) # type: ignore[operator]
|
||||
self.pe.copy_(self._init_pos_encoding())
|
||||
|
||||
def forward(self, x: Tensor) -> Tensor:
|
||||
x + self.pe[: x.size(0), :] # type: ignore[index]
|
||||
x + self.pe[: x.size(0), :]
|
||||
return self.dropout(x)
|
||||
|
||||
def _init_pos_encoding(self) -> Tensor:
|
||||
|
|
|
@ -163,9 +163,7 @@ def _register_ddp_comm_hook(
|
|||
def _sync_module_states(module: torch.nn.Module) -> None:
|
||||
"""Taken from https://github.com/pytorch/pytorch/blob/v2.0.0/torch/nn/parallel/distributed.py#L675-L682."""
|
||||
parameters_to_ignore = (
|
||||
set(module._ddp_params_and_buffers_to_ignore) # type: ignore[arg-type]
|
||||
if hasattr(module, "_ddp_params_and_buffers_to_ignore")
|
||||
else set()
|
||||
set(module._ddp_params_and_buffers_to_ignore) if hasattr(module, "_ddp_params_and_buffers_to_ignore") else set()
|
||||
)
|
||||
from torch.distributed.distributed_c10d import _get_default_group
|
||||
from torch.distributed.utils import _sync_module_states as torch_sync_module_states
|
||||
|
|
|
@ -88,8 +88,8 @@ def to_uncompiled(model: Union["pl.LightningModule", "torch._dynamo.OptimizedMod
|
|||
from torch._dynamo import OptimizedModule
|
||||
|
||||
if isinstance(model, OptimizedModule):
|
||||
model = model._orig_mod
|
||||
if not isinstance(model, pl.LightningModule):
|
||||
original = model._orig_mod
|
||||
if not isinstance(original, pl.LightningModule):
|
||||
raise TypeError(
|
||||
f"Unexpected error, the wrapped model should be a LightningModule, found {type(model).__name__}"
|
||||
)
|
||||
|
@ -99,20 +99,21 @@ def to_uncompiled(model: Union["pl.LightningModule", "torch._dynamo.OptimizedMod
|
|||
raise ValueError(
|
||||
"`model` is required to be a compiled LightningModule. Found a non-compiled LightningModule instead."
|
||||
)
|
||||
original = model
|
||||
|
||||
else:
|
||||
raise ValueError("`model` must either be an instance of OptimizedModule or LightningModule")
|
||||
|
||||
ctx = model._compiler_ctx
|
||||
ctx = original._compiler_ctx
|
||||
if ctx is not None:
|
||||
model.forward = ctx["original_forward"] # type: ignore[method-assign]
|
||||
model.training_step = ctx["original_training_step"] # type: ignore[method-assign]
|
||||
model.validation_step = ctx["original_validation_step"] # type: ignore[method-assign]
|
||||
model.test_step = ctx["original_test_step"] # type: ignore[method-assign]
|
||||
model.predict_step = ctx["original_predict_step"] # type: ignore[method-assign]
|
||||
model._compiler_ctx = None
|
||||
original.forward = ctx["original_forward"] # type: ignore[method-assign]
|
||||
original.training_step = ctx["original_training_step"] # type: ignore[method-assign]
|
||||
original.validation_step = ctx["original_validation_step"] # type: ignore[method-assign]
|
||||
original.test_step = ctx["original_test_step"] # type: ignore[method-assign]
|
||||
original.predict_step = ctx["original_predict_step"] # type: ignore[method-assign]
|
||||
original._compiler_ctx = None
|
||||
|
||||
return model
|
||||
return original
|
||||
|
||||
|
||||
def _maybe_unwrap_optimized(model: object) -> "pl.LightningModule":
|
||||
|
|
|
@ -19,6 +19,7 @@ from lightning_utilities.core.imports import RequirementCache, package_available
|
|||
from lightning_utilities.core.rank_zero import rank_zero_warn
|
||||
|
||||
_PYTHON_GREATER_EQUAL_3_11_0 = (sys.version_info.major, sys.version_info.minor) >= (3, 11)
|
||||
_TORCHMETRICS_GREATER_EQUAL_0_8_0 = RequirementCache("torchmetrics>=0.8.0")
|
||||
_TORCHMETRICS_GREATER_EQUAL_0_9_1 = RequirementCache("torchmetrics>=0.9.1")
|
||||
_TORCHMETRICS_GREATER_EQUAL_0_11 = RequirementCache("torchmetrics>=0.11.0") # using new API with task
|
||||
_TORCHMETRICS_GREATER_EQUAL_1_0_0 = RequirementCache("torchmetrics>=1.0.0")
|
||||
|
|
Loading…
Reference in New Issue