Skip to content

Commit

Permalink
set backend to hccl only if hccl initialization sets a local rank
Browse files Browse the repository at this point in the history
  • Loading branch information
IlyasMoutawwakil committed Feb 5, 2025
1 parent 7f72745 commit f66c5df
Showing 1 changed file with 4 additions and 10 deletions.
14 changes: 4 additions & 10 deletions src/accelerate/state.py
Original file line number Diff line number Diff line change
Expand Up @@ -730,18 +730,12 @@ def _prepare_backend(
backend = "xla"
distributed_type = DistributedType.XLA
elif is_hpu_available():
from habana_frameworks.torch.distributed.hccl import initialize_distributed_hpu # noqa: F401
import habana_frameworks.torch.distributed.hccl

print("after importing habana_frameworks in prepare_backend")

print(os.environ.get("LOCAL_RANK", -1))
print(os.environ.get("WORLD_SIZE", -1))
print(os.environ.get("RANK", -1))
print(os.environ.get("MASTER_ADDR", -1))
print(os.environ.get("MASTER_PORT", -1))
if int(os.environ.get("LOCAL_RANK", -1)) != -1:
backend = "hccl"
distributed_type = DistributedType.MULTI_HPU

backend = "hccl"
distributed_type = DistributedType.MULTI_HPU
elif int(os.environ.get("LOCAL_RANK", -1)) != -1 and not cpu:
if is_mlu_available():
backend = "cncl"
Expand Down

0 comments on commit f66c5df

Please sign in to comment.