diff --git a/pytorch_lightning/plugins/training_type/parallel.py b/pytorch_lightning/plugins/training_type/parallel.py index e6406ea444..4f4b2c5b8e 100644 --- a/pytorch_lightning/plugins/training_type/parallel.py +++ b/pytorch_lightning/plugins/training_type/parallel.py @@ -44,7 +44,7 @@ class ParallelPlugin(TrainingTypePlugin, ABC): @property @abstractmethod def root_device(self) -> torch.device: - raise NotImplementedError + """Return the root device.""" @property def on_gpu(self) -> bool: diff --git a/pytorch_lightning/plugins/training_type/training_type_plugin.py b/pytorch_lightning/plugins/training_type/training_type_plugin.py index 585c8b6405..a1e55631c2 100644 --- a/pytorch_lightning/plugins/training_type/training_type_plugin.py +++ b/pytorch_lightning/plugins/training_type/training_type_plugin.py @@ -86,19 +86,16 @@ class TrainingTypePlugin(ABC): @abstractmethod def on_gpu(self) -> bool: """Returns whether the current process is done on GPU.""" - raise NotImplementedError @property @abstractmethod def on_tpu(self) -> bool: """Returns whether the current process is done on TPU.""" - raise NotImplementedError @property @abstractmethod def root_device(self) -> torch.device: """Returns the root device.""" - raise NotImplementedError @abstractmethod def model_to_device(self) -> None: @@ -321,7 +318,6 @@ class TrainingTypePlugin(ABC): It is the right place to release memory and free other resources. """ - raise NotImplementedError @classmethod def register_plugins(cls, plugin_registry) -> None: