Skip to content

Commit 36ba877

Browse files
committed
Downgrade from 3.13 to 3.12
1 parent 83ccca4 commit 36ba877

File tree

3 files changed

+22
-23
lines changed

3 files changed

+22
-23
lines changed

multimodal/vl2l/README.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@ on how to install Miniconda on your host machine. Then, you can create a new con
2323
environment via:
2424

2525
```bash
26-
conda create -n mlperf-inf-mm-vl2l python=3.13
26+
conda create -n mlperf-inf-mm-vl2l python=3.12
2727
```
2828

2929
### Install LoadGen

multimodal/vl2l/pyproject.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,7 @@ classifiers = [
88
"Intended Audience :: Developers",
99
"Operating System :: POSIX :: Linux",
1010
]
11-
requires-python = ">=3.13"
11+
requires-python = ">=3.12"
1212
dependencies = [
1313
"datasets",
1414
"loguru",

multimodal/vl2l/src/mlperf_inference_multimodal_vl2l/cli.py

Lines changed: 20 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -28,13 +28,6 @@ class TestScenario(StrEnum):
2828
OFFLINE = auto()
2929
"""Run the benchmark in offline/batch scenario."""
3030

31-
class UnknownValueError(ValueError):
32-
"""The exception raised when an unknown test scenario is encountered."""
33-
34-
def __init__(self, test_scenario: TestScenario) -> None:
35-
"""Initialize the exception."""
36-
super().__init__(f"Unknown test scenario: {test_scenario}")
37-
3831
def to_lgtype(self) -> lg.TestScenario:
3932
"""Convert the test scenario to its corresponding LoadGen type."""
4033
match self:
@@ -43,7 +36,15 @@ def to_lgtype(self) -> lg.TestScenario:
4336
case TestScenario.OFFLINE:
4437
return lg.TestScenario.Offline
4538
case _:
46-
raise TestScenario.UnknownValueError(self)
39+
raise UnknownTestScenarioValueError(self)
40+
41+
42+
class UnknownTestScenarioValueError(ValueError):
43+
"""The exception raised when an unknown test scenario is encountered."""
44+
45+
def __init__(self, test_scenario: TestScenario) -> None:
46+
"""Initialize the exception."""
47+
super().__init__(f"Unknown test scenario: {test_scenario}")
4748

4849

4950
class TestMode(StrEnum):
@@ -55,13 +56,6 @@ class TestMode(StrEnum):
5556
ACCURACY_ONLY = auto()
5657
"""Run the benchmark to evaluate model quality."""
5758

58-
class UnknownValueError(ValueError):
59-
"""The exception raised when an unknown test mode is encountered."""
60-
61-
def __init__(self, test_mode: TestMode) -> None:
62-
"""Initialize the exception."""
63-
super().__init__(f"Unknown test mode: {test_mode}")
64-
6559
def to_lgtype(self) -> lg.TestMode:
6660
"""Convert the test mode to its corresponding LoadGen type."""
6761
match self:
@@ -70,7 +64,15 @@ def to_lgtype(self) -> lg.TestMode:
7064
case TestMode.ACCURACY_ONLY:
7165
return lg.TestMode.AccuracyOnly
7266
case _:
73-
raise TestMode.UnknownValueError(self)
67+
raise UnknownTestModeValueError(self)
68+
69+
70+
class UnknownTestModeValueError(ValueError):
71+
"""The exception raised when an unknown test mode is encountered."""
72+
73+
def __init__(self, test_mode: TestMode) -> None:
74+
"""Initialize the exception."""
75+
super().__init__(f"Unknown test mode: {test_mode}")
7476

7577

7678
class TestSettings(BaseModel):
@@ -114,8 +116,7 @@ def to_lgtype(self) -> lg.TestSettings:
114116
settings.scenario = self.senario.to_lgtype()
115117
settings.mode = self.mode.to_lgtype()
116118
settings.offline_expected_qps = self.offline_expected_qps
117-
settings.min_duration_ms = round(
118-
self.min_duration.total_seconds() * 1000)
119+
settings.min_duration_ms = round(self.min_duration.total_seconds() * 1000)
119120
settings.use_token_latencies = True
120121
return settings
121122

@@ -200,9 +201,7 @@ def main(
200201
logger.info("Running VL2L benchmark with settings: {}", settings)
201202
logger.info("Running VL2L benchmark with model: {}", model)
202203
logger.info("Running VL2L benchmark with dataset: {}", dataset)
203-
logger.info(
204-
"Running VL2L benchmark with OpenAI API endpoint: {}",
205-
endpoint)
204+
logger.info("Running VL2L benchmark with OpenAI API endpoint: {}", endpoint)
206205
logger.info("Running VL2L benchmark with random seed: {}", random_seed)
207206
lg_settings = settings.to_lgtype()
208207
task = ShopifyGlobalCatalogue(

0 commit comments

Comments
 (0)