diff --git a/.circleci/config.yml b/.circleci/config.yml index 8758310be9..48797fab85 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -37,10 +37,7 @@ references: run: name: Make Documentation command: | - # First run the same pipeline as Read-The-Docs - # apt-get update && apt-get install -y cmake - # using: https://hub.docker.com/r/readthedocs/build - # we need to use py3.7 ot higher becase of an issue with metaclass inheritence + # the image uses python 2.7 by default, force a different version pyenv global 3.7.3 python --version pip install -r requirements/docs.txt diff --git a/.github/workflows/ci_pkg-install.yml b/.github/workflows/ci_pkg-install.yml index bf7de876d1..0be8b37188 100644 --- a/.github/workflows/ci_pkg-install.yml +++ b/.github/workflows/ci_pkg-install.yml @@ -16,7 +16,7 @@ jobs: # max-parallel: 6 matrix: os: [ubuntu-20.04, macOS-10.15, windows-2019] - python-version: [3.6, 3.9] + python-version: [3.7, 3.9] steps: - uses: actions/checkout@v2 diff --git a/.github/workflows/ci_test-full.yml b/.github/workflows/ci_test-full.yml index ad600623df..2a17c07b05 100644 --- a/.github/workflows/ci_test-full.yml +++ b/.github/workflows/ci_test-full.yml @@ -21,11 +21,7 @@ jobs: python-version: ["3.7", "3.9"] # minimum, maximum requires: ["oldest", "latest"] release: ["stable"] - include: - # test 3.6 only on oldest until EOL: https://github.com/PyTorchLightning/pytorch-lightning/issues/9981 - - {os: ubuntu-18.04, python-version: "3.6", requires: "oldest", release: "stable"} - - {os: windows-2019, python-version: "3.6", requires: "oldest", release: "stable"} - - {os: macOS-10.15, python-version: "3.6", requires: "oldest", release: "stable"} + #include: # nightly: add when there's a release candidate #- {os: ubuntu-20.04, python-version: "3.10", requires: "latest", release: "pre"} diff --git a/.github/workflows/release-docker.yml b/.github/workflows/release-docker.yml index 169e01edd8..39e2598086 100644 --- a/.github/workflows/release-docker.yml +++ b/.github/workflows/release-docker.yml @@ -15,7 +15,7 @@ jobs: strategy: fail-fast: false matrix: - python_version: ["3.6", "3.7", "3.8", "3.9"] + python_version: ["3.7", "3.8", "3.9"] pytorch_version: ["1.7", "1.8", "1.9", "1.10"] steps: - name: Checkout diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 950bf004ea..68bd201eb0 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -49,7 +49,7 @@ repos: rev: v2.29.0 hooks: - id: pyupgrade - args: [--py36-plus] + args: [--py37-plus] name: Upgrade code - repo: https://github.com/myint/docformatter @@ -80,8 +80,8 @@ repos: rev: v1.11.0 hooks: - id: blacken-docs - args: [ --line-length=120 ] - additional_dependencies: [ black==21.7b0 ] + args: [--line-length=120] + additional_dependencies: [black==21.7b0] - repo: https://github.com/executablebooks/mdformat rev: 0.7.10 diff --git a/CHANGELOG.md b/CHANGELOG.md index d874577847..07df818fb3 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -308,6 +308,9 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). - Removed `Accelerator.on_train_start` ([#10999](https://github.com/PyTorchLightning/pytorch-lightning/pull/10999)) + +- Removed support for Python 3.6 ([#11117](https://github.com/PyTorchLightning/pytorch-lightning/pull/11117)) + ### Fixed - Fixed `NeptuneLogger` when using DDP ([#11030](https://github.com/PyTorchLightning/pytorch-lightning/pull/11030)) diff --git a/README.md b/README.md index 9d618955e1..3cdf711489 100644 --- a/README.md +++ b/README.md @@ -86,9 +86,6 @@ Lightning is rigorously tested across multiple GPUs, TPUs CPUs and against major | Linux py3.{7,9} | [![Test](https://github.com/PyTorchLightning/pytorch-lightning/actions/workflows/ci_test-full.yml/badge.svg?branch=master&event=push)](https://github.com/PyTorchLightning/pytorch-lightning/actions/workflows/ci_test-full.yml) | - | - | [![Test](https://github.com/PyTorchLightning/pytorch-lightning/actions/workflows/ci_test-full.yml/badge.svg?branch=master&event=push)](https://github.com/PyTorchLightning/pytorch-lightning/actions/workflows/ci_test-full.yml) | | OSX py3.{7,9} | [![Test](https://github.com/PyTorchLightning/pytorch-lightning/actions/workflows/ci_test-full.yml/badge.svg?branch=master&event=push)](https://github.com/PyTorchLightning/pytorch-lightning/actions/workflows/ci_test-full.yml) | - | - | [![Test](https://github.com/PyTorchLightning/pytorch-lightning/actions/workflows/ci_test-full.yml/badge.svg?branch=master&event=push)](https://github.com/PyTorchLightning/pytorch-lightning/actions/workflows/ci_test-full.yml) | | Windows py3.{7,9} | [![Test](https://github.com/PyTorchLightning/pytorch-lightning/actions/workflows/ci_test-full.yml/badge.svg?branch=master&event=push)](https://github.com/PyTorchLightning/pytorch-lightning/actions/workflows/ci_test-full.yml) | - | - | [![Test](https://github.com/PyTorchLightning/pytorch-lightning/actions/workflows/ci_test-full.yml/badge.svg?branch=master&event=push)](https://github.com/PyTorchLightning/pytorch-lightning/actions/workflows/ci_test-full.yml) | -| Linux py3.6 | [![Test](https://github.com/PyTorchLightning/pytorch-lightning/actions/workflows/ci_test-full.yml/badge.svg?branch=master&event=push)](https://github.com/PyTorchLightning/pytorch-lightning/actions/workflows/ci_test-full.yml) | - | - | - | -| OSX py3.6 | [![Test](https://github.com/PyTorchLightning/pytorch-lightning/actions/workflows/ci_test-full.yml/badge.svg?branch=master&event=push)](https://github.com/PyTorchLightning/pytorch-lightning/actions/workflows/ci_test-full.yml) | - | - | - | -| Windows py3.6 | [![Test](https://github.com/PyTorchLightning/pytorch-lightning/actions/workflows/ci_test-full.yml/badge.svg?branch=master&event=push)](https://github.com/PyTorchLightning/pytorch-lightning/actions/workflows/ci_test-full.yml) | - | - | - | - _\*\* tests run on two NVIDIA P100_ - _\*\*\* tests run on Google GKE TPUv2/3. TPU py3.7 means we support Colab and Kaggle env._ diff --git a/dockers/base-xla/Dockerfile b/dockers/base-xla/Dockerfile index e293343614..714d3199bb 100644 --- a/dockers/base-xla/Dockerfile +++ b/dockers/base-xla/Dockerfile @@ -16,7 +16,7 @@ FROM google/cloud-sdk:slim LABEL maintainer="PyTorchLightning " -# CALL: docker image build -t pytorch-lightning:XLA-extras-py3.6 -f dockers/base-xla/Dockerfile . --build-arg PYTHON_VERSION=3.8 +# CALL: docker image build -t pytorch-lightning:XLA-image -f dockers/base-xla/Dockerfile . --build-arg PYTHON_VERSION=3.8 ARG PYTHON_VERSION=3.9 ARG CONDA_VERSION=4.9.2 ARG XLA_VERSION=1.8 diff --git a/environment.yml b/environment.yml index 7e3c84e913..cc5273f465 100644 --- a/environment.yml +++ b/environment.yml @@ -26,7 +26,7 @@ channels: - conda-forge dependencies: - - python>=3.6 + - python>=3.7 - pip>20.1 - numpy>=1.17.2 - pytorch>=1.7.* diff --git a/pytorch_lightning/accelerators/gpu.py b/pytorch_lightning/accelerators/gpu.py index 06ade654fc..f3d8680c79 100644 --- a/pytorch_lightning/accelerators/gpu.py +++ b/pytorch_lightning/accelerators/gpu.py @@ -110,8 +110,7 @@ def get_nvidia_gpu_stats(device: torch.device) -> Dict[str, float]: result = subprocess.run( [nvidia_smi_path, f"--query-gpu={gpu_query}", "--format=csv,nounits,noheader", f"--id={gpu_id}"], encoding="utf-8", - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, # for backward compatibility with python version 3.6 + capture_output=True, check=True, ) diff --git a/pytorch_lightning/callbacks/gpu_stats_monitor.py b/pytorch_lightning/callbacks/gpu_stats_monitor.py index 98a8300017..6ca9d7712f 100644 --- a/pytorch_lightning/callbacks/gpu_stats_monitor.py +++ b/pytorch_lightning/callbacks/gpu_stats_monitor.py @@ -214,8 +214,7 @@ class GPUStatsMonitor(Callback): f"--id={gpu_ids}", ], encoding="utf-8", - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, # for backward compatibility with python version 3.6 + capture_output=True, check=True, ) diff --git a/pytorch_lightning/core/mixins/device_dtype_mixin.py b/pytorch_lightning/core/mixins/device_dtype_mixin.py index d902958b9b..ed9f1f7683 100644 --- a/pytorch_lightning/core/mixins/device_dtype_mixin.py +++ b/pytorch_lightning/core/mixins/device_dtype_mixin.py @@ -20,7 +20,7 @@ from torch.nn import Module try: from typing_extensions import Self except ImportError: - # workaround for Python 3.6 and 3.7. + # workaround for Python 3.7. # see https://www.python.org/dev/peps/pep-0673/ from typing import TypeVar diff --git a/pytorch_lightning/profiler/base.py b/pytorch_lightning/profiler/base.py index f5df2b5382..4e90d8adc3 100644 --- a/pytorch_lightning/profiler/base.py +++ b/pytorch_lightning/profiler/base.py @@ -129,9 +129,9 @@ class BaseProfiler(AbstractProfiler): def describe(self) -> None: """Logs a profile report after the conclusion of run.""" - # there are pickling issues with open file handles in Python 3.6 - # so to avoid them, we open and close the files within this function - # by calling `_prepare_streams` and `teardown` + # users might call `describe` directly as the profilers can be used by themselves. + # to allow this, we open and close the files within this function by calling `_prepare_streams` and `teardown` + # manually instead of letting the `Trainer` do it through `setup` and `teardown` self._prepare_streams() summary = self.summary() if summary: diff --git a/pytorch_lightning/trainer/connectors/logger_connector/result.py b/pytorch_lightning/trainer/connectors/logger_connector/result.py index 29e91f8f6a..09bd741a32 100644 --- a/pytorch_lightning/trainer/connectors/logger_connector/result.py +++ b/pytorch_lightning/trainer/connectors/logger_connector/result.py @@ -14,7 +14,7 @@ from collections.abc import Generator from dataclasses import asdict, dataclass, replace from functools import partial, wraps -from typing import Any, Callable, Dict, List, Optional, Tuple, Union +from typing import Any, Callable, cast, Dict, List, Optional, Tuple, Union import torch from torchmetrics import Metric @@ -29,8 +29,7 @@ from pytorch_lightning.utilities.memory import recursive_detach from pytorch_lightning.utilities.metrics import metrics_to_scalars from pytorch_lightning.utilities.warnings import WarningCache -# TODO(@tchaton): Typing-pickle issue on python<3.7 (https://github.com/cloudpipe/cloudpickle/pull/318) -_IN_METRIC = Any # Union[Metric, torch.Tensor] # Do not include scalars as they were converted to tensors +_IN_METRIC = Union[Metric, torch.Tensor] # Do not include scalars as they were converted to tensors _OUT_METRIC = Union[torch.Tensor, Dict[str, torch.Tensor]] _PBAR_METRIC = Union[float, Dict[str, float]] _OUT_DICT = Dict[str, _OUT_METRIC] @@ -217,6 +216,7 @@ class _ResultMetric(Metric, DeviceDtypeModuleMixin): def update(self, value: _IN_METRIC, batch_size: int) -> None: # type: ignore[override] if self.is_tensor: + value = cast(torch.Tensor, value) if not torch.is_floating_point(value): dtype = torch.get_default_dtype() warning_cache.warn( @@ -244,6 +244,7 @@ class _ResultMetric(Metric, DeviceDtypeModuleMixin): elif self.meta.is_sum_reduction: self.value += value.mean() else: + value = cast(Metric, value) self.value = value self._forward_cache = value._forward_cache @@ -405,7 +406,9 @@ class _ResultCollection(dict): apply_to_collection(list(self.values()), _ResultMetric, append_fn) return o - def _extract_batch_size(self, value: _METRIC_COLLECTION, batch_size: Optional[int], meta: _Metadata) -> int: + def _extract_batch_size( + self, value: Union[_ResultMetric, _ResultMetricCollection], batch_size: Optional[int], meta: _Metadata + ) -> int: # check if we have extracted the batch size already if batch_size is None: batch_size = self.batch_size diff --git a/pytorch_lightning/utilities/imports.py b/pytorch_lightning/utilities/imports.py index 68b4dcf4c8..d4f615b2e7 100644 --- a/pytorch_lightning/utilities/imports.py +++ b/pytorch_lightning/utilities/imports.py @@ -35,11 +35,7 @@ def _module_available(module_path: str) -> bool: """ try: return find_spec(module_path) is not None - except AttributeError: - # Python 3.6 - return False except ModuleNotFoundError: - # Python 3.7+ return False diff --git a/pytorch_lightning/utilities/memory.py b/pytorch_lightning/utilities/memory.py index b80a2b94bd..488563ab3f 100644 --- a/pytorch_lightning/utilities/memory.py +++ b/pytorch_lightning/utilities/memory.py @@ -163,9 +163,7 @@ def get_gpu_memory_map() -> Dict[str, float]: result = subprocess.run( [nvidia_smi_path, "--query-gpu=memory.used", "--format=csv,nounits,noheader"], encoding="utf-8", - # capture_output=True, # valid for python version >=3.7 - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, # for backward compatibility with python version 3.6 + capture_output=True, check=True, ) diff --git a/pytorch_lightning/utilities/model_summary.py b/pytorch_lightning/utilities/model_summary.py index ccc81fc46d..ef80afb27f 100644 --- a/pytorch_lightning/utilities/model_summary.py +++ b/pytorch_lightning/utilities/model_summary.py @@ -14,7 +14,6 @@ import contextlib import logging -import sys from collections import OrderedDict from typing import Any, Dict, List, Optional, Tuple, Union @@ -267,10 +266,8 @@ class ModelSummary: if trainer is not None: forward_context = trainer.precision_plugin.forward_context() - elif sys.version_info >= (3, 7): - forward_context = contextlib.nullcontext() else: - forward_context = contextlib.suppress() + forward_context = contextlib.nullcontext() with torch.no_grad(), forward_context: # let the model hooks collect the input- and output shapes diff --git a/requirements.txt b/requirements.txt index c708817bfe..2ae75bf5d8 100644 --- a/requirements.txt +++ b/requirements.txt @@ -2,7 +2,6 @@ numpy>=1.17.2 torch>=1.7.* -future>=0.17.1 # required for builtins in setup.py tqdm>=4.41.0 PyYAML>=5.4 fsspec[http]>=2021.05.0, !=2021.06.0 diff --git a/setup.py b/setup.py index 9d54a0d564..956b49a8b9 100755 --- a/setup.py +++ b/setup.py @@ -80,7 +80,7 @@ setup( long_description_content_type="text/markdown", zip_safe=False, keywords=["deep learning", "pytorch", "AI"], - python_requires=">=3.6", + python_requires=">=3.7", setup_requires=[], install_requires=setup_tools._load_requirements(_PATH_ROOT), extras_require=extras, @@ -92,9 +92,7 @@ setup( classifiers=[ "Environment :: Console", "Natural Language :: English", - # How mature is this project? Common values are - # 3 - Alpha, 4 - Beta, 5 - Production/Stable - "Development Status :: 4 - Beta", + "Development Status :: 5 - Production/Stable", # Indicate who your project is intended for "Intended Audience :: Developers", "Topic :: Scientific/Engineering :: Artificial Intelligence", @@ -103,10 +101,8 @@ setup( # Pick your license as you wish "License :: OSI Approved :: Apache Software License", "Operating System :: OS Independent", - # Specify the Python versions you support here. In particular, ensure - # that you indicate whether you support Python 2, Python 3 or both. + # Specify the Python versions you support here. "Programming Language :: Python :: 3", - "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", diff --git a/tests/conftest.py b/tests/conftest.py index 4c85e12a66..e88ec1b3f6 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -13,7 +13,6 @@ # limitations under the License. import os import signal -import sys import threading from functools import partial from http.server import SimpleHTTPRequestHandler @@ -135,28 +134,8 @@ def caplog(caplog): @pytest.fixture def tmpdir_server(tmpdir): - if sys.version_info >= (3, 7): - Handler = partial(SimpleHTTPRequestHandler, directory=str(tmpdir)) - from http.server import ThreadingHTTPServer - else: - # unfortunately SimpleHTTPRequestHandler doesn't accept the directory arg in python3.6 - # so we have to hack it like this - - class Handler(SimpleHTTPRequestHandler): - def translate_path(self, path): - # get the path from cwd - path = super().translate_path(path) - # get the relative path - relpath = os.path.relpath(path, os.getcwd()) - # return the full path from root_dir - return os.path.join(str(tmpdir), relpath) - - # ThreadingHTTPServer was added in 3.7, so we need to define it ourselves - from http.server import HTTPServer - from socketserver import ThreadingMixIn - - class ThreadingHTTPServer(ThreadingMixIn, HTTPServer): - daemon_threads = True + Handler = partial(SimpleHTTPRequestHandler, directory=str(tmpdir)) + from http.server import ThreadingHTTPServer with ThreadingHTTPServer(("localhost", 0), Handler) as server: server_thread = threading.Thread(target=server.serve_forever) diff --git a/tests/trainer/test_trainer_cli.py b/tests/trainer/test_trainer_cli.py index 3218464772..543050d8ef 100644 --- a/tests/trainer/test_trainer_cli.py +++ b/tests/trainer/test_trainer_cli.py @@ -147,7 +147,6 @@ def test_argparse_args_parsing(cli_args, expected): assert Trainer.from_argparse_args(args) -@RunIf(min_python="3.7.0") @pytest.mark.parametrize( "cli_args,expected", [("", False), ("--fast_dev_run=0", False), ("--fast_dev_run=True", True), ("--fast_dev_run 2", 2)], @@ -180,7 +179,6 @@ def test_argparse_args_parsing_gpus(cli_args, expected_parsed, expected_device_i assert trainer.data_parallel_device_ids == expected_device_ids -@RunIf(min_python="3.7.0") @pytest.mark.parametrize( ["cli_args", "extra_args"], [ diff --git a/tests/utilities/test_argparse.py b/tests/utilities/test_argparse.py index a41771ff0e..cc1ec10cb5 100644 --- a/tests/utilities/test_argparse.py +++ b/tests/utilities/test_argparse.py @@ -213,12 +213,6 @@ def test_add_argparse_args_no_argument_group(): assert args.my_parameter == 2 -def test_add_argparse_args_invalid(): - """Test that `add_argparse_args` doesn't raise `TypeError` when a class has args typed as `typing.Generic` in - Python 3.6.""" - add_argparse_args(AddArgparseArgsExampleClassGeneric, ArgumentParser()) - - def test_gpus_allowed_type(): assert _gpus_allowed_type("1,2") == "1,2" assert _gpus_allowed_type("1") == 1 diff --git a/tests/utilities/test_cli.py b/tests/utilities/test_cli.py index 7d1d436a00..849d754161 100644 --- a/tests/utilities/test_cli.py +++ b/tests/utilities/test_cli.py @@ -1226,7 +1226,6 @@ def test_optimizers_and_lr_schedulers_add_arguments_to_parser_implemented_reload assert cli.model.sch_config["init_args"]["anneal_strategy"] == "linear" -@RunIf(min_python="3.7.3") # bpo-17185: `autospec=True` and `inspect.signature` do not play well def test_lightning_cli_config_with_subcommand(): config = {"test": {"trainer": {"limit_test_batches": 1}, "verbose": True, "ckpt_path": "foobar"}} with mock.patch("sys.argv", ["any.py", f"--config={config}"]), mock.patch( @@ -1238,7 +1237,6 @@ def test_lightning_cli_config_with_subcommand(): assert cli.trainer.limit_test_batches == 1 -@RunIf(min_python="3.7.3") def test_lightning_cli_config_before_subcommand(): config = { "validate": {"trainer": {"limit_val_batches": 1}, "verbose": False, "ckpt_path": "barfoo"}, @@ -1262,7 +1260,6 @@ def test_lightning_cli_config_before_subcommand(): assert cli.trainer.limit_val_batches == 1 -@RunIf(min_python="3.7.3") def test_lightning_cli_config_before_subcommand_two_configs(): config1 = {"validate": {"trainer": {"limit_val_batches": 1}, "verbose": False, "ckpt_path": "barfoo"}} config2 = {"test": {"trainer": {"limit_test_batches": 1}, "verbose": True, "ckpt_path": "foobar"}} @@ -1284,7 +1281,6 @@ def test_lightning_cli_config_before_subcommand_two_configs(): assert cli.trainer.limit_val_batches == 1 -@RunIf(min_python="3.7.3") def test_lightning_cli_config_after_subcommand(): config = {"trainer": {"limit_test_batches": 1}, "verbose": True, "ckpt_path": "foobar"} with mock.patch("sys.argv", ["any.py", "test", f"--config={config}"]), mock.patch( @@ -1296,7 +1292,6 @@ def test_lightning_cli_config_after_subcommand(): assert cli.trainer.limit_test_batches == 1 -@RunIf(min_python="3.7.3") def test_lightning_cli_config_before_and_after_subcommand(): config1 = {"test": {"trainer": {"limit_test_batches": 1}, "verbose": True, "ckpt_path": "foobar"}} config2 = {"trainer": {"fast_dev_run": 1}, "verbose": False, "ckpt_path": "foobar"}