Skip to content

Commit 1c83154

Browse files
[pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
1 parent 4ac2dd4 commit 1c83154

File tree

12 files changed

+19
-17
lines changed

12 files changed

+19
-17
lines changed

Diff for: src/lightning/fabric/accelerators/cpu.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -49,7 +49,7 @@ def get_parallel_devices(devices: Union[int, str, List[int]]) -> List[torch.devi
4949
"""Gets parallel devices for the Accelerator."""
5050
devices = _parse_cpu_cores(devices)
5151
return [torch.device("cpu")] * devices
52-
52+
5353
@staticmethod
5454
@override
5555
def get_device() -> str:

Diff for: src/lightning/fabric/accelerators/cuda.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -59,7 +59,7 @@ def get_parallel_devices(devices: List[int]) -> List[torch.device]:
5959
@override
6060
def get_device() -> str:
6161
return "cuda"
62-
62+
6363
@staticmethod
6464
@override
6565
def auto_device_count() -> int:

Diff for: src/lightning/fabric/accelerators/mps.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -64,7 +64,7 @@ def get_parallel_devices(devices: Union[int, str, List[int]]) -> List[torch.devi
6464
@override
6565
def get_device() -> str:
6666
return "mps"
67-
67+
6868
@staticmethod
6969
@override
7070
def auto_device_count() -> int:

Diff for: src/lightning/fabric/accelerators/xla.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -68,7 +68,7 @@ def get_parallel_devices(devices: Union[int, List[int]]) -> List[torch.device]:
6868
@override
6969
def get_device() -> str:
7070
return "xla"
71-
71+
7272
@staticmethod
7373
@override
7474
# XLA's multiprocessing will pop the TPU_NUM_DEVICES key, so we need to cache it

Diff for: src/lightning/fabric/connector.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -464,9 +464,9 @@ def _check_and_init_precision(self) -> Precision:
464464
return DeepSpeedPrecision(self._precision_input) # type: ignore
465465
if isinstance(self.strategy, FSDPStrategy):
466466
return FSDPPrecision(
467-
precision=self._precision_input, # type: ignore[arg-type]
467+
precision=self._precision_input, # type: ignore[arg-type]
468468
device=self._accelerator_flag.get_device() if isinstance(self._accelerator_flag, Accelerator) else None,
469-
)
469+
)
470470
mp_precision_supported = ("32-true", "bf16-mixed", "bf16-true", "16-true")
471471
if isinstance(self.strategy, ModelParallelStrategy) and self._precision_input not in mp_precision_supported:
472472
raise ValueError(

Diff for: src/lightning/fabric/plugins/precision/amp.py

+1-2
Original file line numberDiff line numberDiff line change
@@ -55,8 +55,7 @@ def __init__(
5555
if _TORCH_GREATER_EQUAL_2_4
5656
else getattr(
5757
torch,
58-
"cuda" if not isinstance(device, str) or device.split(":")[0] == "cpu"
59-
else device.split(":")[0]
58+
"cuda" if not isinstance(device, str) or device.split(":")[0] == "cpu" else device.split(":")[0],
6059
).amp.GradScaler()
6160
)
6261
if scaler is not None and self.precision == "bf16-mixed":

Diff for: src/lightning/fabric/strategies/deepspeed.py

+4-2
Original file line numberDiff line numberDiff line change
@@ -619,8 +619,10 @@ def _initialize_engine(
619619
@override
620620
def setup_environment(self) -> None:
621621
from deepspeed.runtime.utils import get_accelerator
622-
if (not isinstance(self.accelerator, CUDAAccelerator)) and \
623-
self.accelerator.get_device() != get_accelerator().device_name(): # type: ignore[union-attr]
622+
623+
if (
624+
not isinstance(self.accelerator, CUDAAccelerator)
625+
) and self.accelerator.get_device() != get_accelerator().device_name(): # type: ignore[union-attr]
624626
raise RuntimeError(
625627
f"The DeepSpeed strategy is only supported on {get_accelerator().device_name()} GPUs,"
626628
f"but `{self.accelerator.__class__.__name__}` is used."

Diff for: src/lightning/pytorch/plugins/precision/amp.py

+1-2
Original file line numberDiff line numberDiff line change
@@ -55,8 +55,7 @@ def __init__(
5555
if _TORCH_GREATER_EQUAL_2_4
5656
else getattr(
5757
torch,
58-
"cuda" if not isinstance(device, str) or device.split(":")[0] == "cpu"
59-
else device.split(":")[0]
58+
"cuda" if not isinstance(device, str) or device.split(":")[0] == "cpu" else device.split(":")[0],
6059
).amp.GradScaler()
6160
)
6261
if scaler is not None and self.precision == "bf16-mixed":

Diff for: src/lightning/pytorch/strategies/deepspeed.py

+4-2
Original file line numberDiff line numberDiff line change
@@ -317,8 +317,10 @@ def __init__(
317317
@override
318318
def setup_environment(self) -> None:
319319
from deepspeed.runtime.utils import get_accelerator
320-
if (not isinstance(self.accelerator, CUDAAccelerator)) and \
321-
self.accelerator.get_device() != get_accelerator().device_name(): # type: ignore[union-attr]
320+
321+
if (
322+
not isinstance(self.accelerator, CUDAAccelerator)
323+
) and self.accelerator.get_device() != get_accelerator().device_name(): # type: ignore[union-attr]
322324
raise RuntimeError(
323325
f"The DeepSpeed strategy is only supported on {get_accelerator().device_name()} GPUs,"
324326
f"but `{self.accelerator.__class__.__name__}` is used."

Diff for: src/lightning/pytorch/trainer/connectors/accelerator_connector.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -508,7 +508,7 @@ def _check_and_init_precision(self) -> Precision:
508508
return DeepSpeedPrecision(self._precision_flag) # type: ignore[arg-type]
509509
if isinstance(self.strategy, FSDPStrategy):
510510
return FSDPPrecision(
511-
precision=self._precision_flag, # type: ignore[arg-type]
511+
precision=self._precision_flag, # type: ignore[arg-type]
512512
device=self._accelerator_flag.get_device() if isinstance(self._accelerator_flag, Accelerator) else None,
513513
)
514514
if self._precision_flag in ("16-true", "bf16-true"):

Diff for: tests/tests_fabric/test_connector.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -178,7 +178,7 @@ def parse_devices(devices):
178178
@staticmethod
179179
def get_parallel_devices(devices):
180180
return [torch.device("cpu")] * devices
181-
181+
182182
@staticmethod
183183
def get_device() -> str:
184184
return "cpu"

Diff for: tests/tests_pytorch/trainer/connectors/test_accelerator_connector.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -191,7 +191,7 @@ def parse_devices(devices):
191191
@staticmethod
192192
def get_parallel_devices(devices):
193193
return [torch.device("cpu")] * devices
194-
194+
195195
@staticmethod
196196
def get_device() -> str:
197197
return "cpu"

0 commit comments

Comments
 (0)