Skip to content

Commit

Permalink
MAINT: Upgrade ruff to v0.6.4 (#3095)
Browse files Browse the repository at this point in the history
* MNT Upgrade ruff to 0.6.4

Currently used version, 0.2.1, is quite old at this point.

Not a lot needed to be changed:

- Change ruff version in setup.py
- Remove deprecated ignore-init-module-imports option for ruff
- Type comparison should use is and not ==
- Use f-string instead of % formatting
- Some line wrapping and empty lines

* Oops
  • Loading branch information
BenjaminBossan authored Sep 10, 2024
1 parent ed9a574 commit 3fd02e6
Show file tree
Hide file tree
Showing 12 changed files with 20 additions and 15 deletions.
1 change: 1 addition & 0 deletions benchmarks/fp8/ddp.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@
This particular script verifies this for DDP training.
"""

import evaluate
import torch
import transformer_engine.common.recipe as te_recipe
Expand Down
1 change: 1 addition & 0 deletions benchmarks/fp8/distrib_deepspeed.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@
This particular script verifies this for DDP training.
"""

from unittest.mock import patch

import deepspeed
Expand Down
1 change: 1 addition & 0 deletions benchmarks/fp8/fsdp.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@
This particular script verifies this for FSDP training.
"""

from functools import partial

import evaluate
Expand Down
1 change: 1 addition & 0 deletions benchmarks/fp8/non_distributed.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@
This particular script verifies this for single GPU training.
"""

import evaluate
import torch
import transformer_engine.common.recipe as te_recipe
Expand Down
1 change: 1 addition & 0 deletions examples/config_yaml_templates/run_me.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@
"""
A base script which outputs the accelerate config for the given environment
"""

from accelerate import Accelerator


Expand Down
1 change: 0 additions & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,6 @@ target-version = "py38"

[tool.ruff.lint]
preview = true
ignore-init-module-imports = true
extend-select = [
"B009", # static getattr
"B010", # static setattr
Expand Down
2 changes: 1 addition & 1 deletion setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@
extras["quality"] = [
"black ~= 23.1", # hf-doc-builder has a hidden dependency on `black`
"hf-doc-builder >= 0.3.0",
"ruff ~= 0.2.1",
"ruff ~= 0.6.4",
]
extras["docs"] = []
extras["test_prod"] = ["pytest>=7.2.0,<=8.0.0", "pytest-xdist", "pytest-subtests", "parameterized"]
Expand Down
16 changes: 8 additions & 8 deletions src/accelerate/utils/dataclasses.py
Original file line number Diff line number Diff line change
Expand Up @@ -1281,14 +1281,14 @@ class FullyShardedDataParallelPlugin:
"If passing in a `dict`, it should have the following keys: `param_dtype`, `reduce_dtype`, and `buffer_dtype`."
},
)
auto_wrap_policy: Optional[
Union[Callable, Literal["transformer_based_wrap", "size_based_wrap", "no_wrap"]]
] = field(
default=None,
metadata={
"help": "A callable or string specifying a policy to recursively wrap layers with FSDP. If a string, it must be one of `transformer_based_wrap`, `size_based_wrap`, or `no_wrap`. "
"Defaults to `NO_WRAP`. See `torch.distributed.fsdp.wrap.size_based_wrap_policy` for a direction on what it should look like"
},
auto_wrap_policy: Optional[Union[Callable, Literal["transformer_based_wrap", "size_based_wrap", "no_wrap"]]] = (
field(
default=None,
metadata={
"help": "A callable or string specifying a policy to recursively wrap layers with FSDP. If a string, it must be one of `transformer_based_wrap`, `size_based_wrap`, or `no_wrap`. "
"Defaults to `NO_WRAP`. See `torch.distributed.fsdp.wrap.size_based_wrap_policy` for a direction on what it should look like"
},
)
)
cpu_offload: Union[bool, "torch.distributed.fsdp.CPUOffload"] = field(
default=None,
Expand Down
2 changes: 1 addition & 1 deletion src/accelerate/utils/environment.py
Original file line number Diff line number Diff line change
Expand Up @@ -106,7 +106,7 @@ def _nvidia_smi():
# try from systemd drive with default installation path
command = which("nvidia-smi")
if command is None:
command = "%s\\Program Files\\NVIDIA Corporation\\NVSMI\\nvidia-smi.exe" % os.environ["systemdrive"]
command = f"{os.environ['systemdrive']}\\Program Files\\NVIDIA Corporation\\NVSMI\\nvidia-smi.exe"
else:
command = "nvidia-smi"
return command
Expand Down
2 changes: 1 addition & 1 deletion src/accelerate/utils/fsdp_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -130,7 +130,7 @@ def load_fsdp_model(fsdp_plugin, accelerator, model, input_dir, model_index=0, a
model, fsdp_plugin.state_dict_type, fsdp_plugin.state_dict_config, fsdp_plugin.optim_state_dict_config
):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if type(model) != FSDP and accelerator.process_index != 0:
if type(model) is not FSDP and accelerator.process_index != 0:
if not fsdp_plugin.sync_module_states:
raise ValueError(
"Set the `sync_module_states` flag to `True` so that model states are synced across processes when "
Expand Down
1 change: 1 addition & 0 deletions src/accelerate/utils/memory.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@
A collection of utilities for ensuring that training can always occur. Heavily influenced by the
[toma](https://github.com/BlackHC/toma) library.
"""

import functools
import gc
import importlib
Expand Down
6 changes: 3 additions & 3 deletions tests/deepspeed/test_deepspeed.py
Original file line number Diff line number Diff line change
Expand Up @@ -783,9 +783,9 @@ def test_autofill_dsconfig_from_ds_plugin(self, dtype):

def test_ds_config_assertions(self):
ambiguous_env = self.dist_env.copy()
ambiguous_env[
"ACCELERATE_CONFIG_DS_FIELDS"
] = "gradient_accumulation_steps,gradient_clipping,zero_stage,offload_optimizer_device,offload_param_device,zero3_save_16bit_model,mixed_precision"
ambiguous_env["ACCELERATE_CONFIG_DS_FIELDS"] = (
"gradient_accumulation_steps,gradient_clipping,zero_stage,offload_optimizer_device,offload_param_device,zero3_save_16bit_model,mixed_precision"
)

with mockenv_context(**ambiguous_env):
with self.assertRaises(ValueError) as cm:
Expand Down

0 comments on commit 3fd02e6

Please sign in to comment.