From 4ecb0d8bc997bfe2e6f9f64055cfcbdb8c1376d0 Mon Sep 17 00:00:00 2001 From: theory-in-progress <81622504+theory-in-progress@users.noreply.github.com> Date: Mon, 11 Oct 2021 16:53:21 +0530 Subject: [PATCH] Updated quantization imports in PyTorch 1.10 (#9878) Co-authored-by: tchaton Co-authored-by: Rohit Gupta --- pytorch_lightning/callbacks/quantization.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/pytorch_lightning/callbacks/quantization.py b/pytorch_lightning/callbacks/quantization.py index da85b100ca..6e805b79a9 100644 --- a/pytorch_lightning/callbacks/quantization.py +++ b/pytorch_lightning/callbacks/quantization.py @@ -20,13 +20,17 @@ import functools from typing import Any, Callable, Optional, Sequence, Union import torch -from torch.quantization import QConfig import pytorch_lightning as pl from pytorch_lightning.callbacks.base import Callback from pytorch_lightning.utilities import _TORCH_GREATER_EQUAL_1_10 from pytorch_lightning.utilities.exceptions import MisconfigurationException +if _TORCH_GREATER_EQUAL_1_10: + from torch.ao.quantization import QConfig +else: + from torch.quantization import QConfig + def wrap_qat_forward_context( quant_cb, model: "pl.LightningModule", func: Callable, trigger_condition: Optional[Union[Callable, int]] = None