Skip to content

Commit dcc1f5a

Browse files
authored
skip MOE python test when MPI is not installed (microsoft#24116)
### Description It is not common that dev machine have MPI installed. Skip the test if MPI is not installed. ### Motivation and Context Make it easy to run pytest in dev machine without the need to skip the test manually.
1 parent 9e53afa commit dcc1f5a

File tree

1 file changed

+18
-9
lines changed

1 file changed

+18
-9
lines changed

onnxruntime/test/python/transformers/sharded_moe/test_sharded_moe.py

+18-9
Original file line numberDiff line numberDiff line change
@@ -6,22 +6,28 @@
66
import unittest
77

88
import numpy as np
9-
from mpi4py import MPI
109
from onnx import TensorProto, helper
1110

1211
import onnxruntime
1312

14-
np.random.seed(3)
13+
try:
14+
from mpi4py import MPI
15+
16+
comm = MPI.COMM_WORLD
17+
except (ImportError, RuntimeError):
18+
comm = None
1519

16-
comm = MPI.COMM_WORLD
20+
has_mpi = comm is not None
21+
22+
np.random.seed(3)
1723

1824

1925
def get_rank():
20-
return comm.Get_rank()
26+
return comm.Get_rank() if comm else 0
2127

2228

2329
def get_size():
24-
return comm.Get_size()
30+
return comm.Get_size() if comm else 0
2531

2632

2733
def print_out(*args):
@@ -254,7 +260,7 @@ def run_ort_with_parity_check(
254260
)
255261

256262

257-
def test_moe_with_tensor_parallelism(
263+
def run_moe_with_tensor_parallelism(
258264
hidden_size,
259265
inter_size,
260266
num_experts,
@@ -327,7 +333,7 @@ def get_fc2_tensor_shards(expert_weights):
327333
)
328334

329335

330-
def test_moe_with_expert_parallelism(
336+
def run_moe_with_expert_parallelism(
331337
hidden_size,
332338
inter_size,
333339
num_experts,
@@ -390,19 +396,22 @@ def test_moe_with_expert_parallelism(
390396

391397
class TestMoE(unittest.TestCase):
392398
def test_moe_parallelism(self):
399+
if not has_mpi:
400+
self.skipTest("No MPI support")
401+
393402
for hidden_size in [128, 1024]:
394403
for inter_size in [512, 2048]:
395404
for num_experts in [64]:
396405
for num_rows in [1024]:
397406
print_out("EP")
398-
test_moe_with_expert_parallelism(
407+
run_moe_with_expert_parallelism(
399408
hidden_size,
400409
inter_size,
401410
num_experts,
402411
num_rows,
403412
)
404413
print_out("TP")
405-
test_moe_with_tensor_parallelism(
414+
run_moe_with_tensor_parallelism(
406415
hidden_size,
407416
inter_size,
408417
num_experts,

0 commit comments

Comments
 (0)