|
| 1 | +# MIT License |
| 2 | + |
| 3 | +# Copyright (c) 2024 The SGLang Team |
| 4 | + |
| 5 | +# Permission is hereby granted, free of charge, to any person obtaining a copy |
| 6 | +# of this software and associated documentation files (the "Software"), to deal |
| 7 | +# in the Software without restriction, including without limitation the rights |
| 8 | +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell |
| 9 | +# copies of the Software, and to permit persons to whom the Software is |
| 10 | +# furnished to do so, subject to the following conditions: |
| 11 | + |
| 12 | +# The above copyright notice and this permission notice shall be included in all |
| 13 | +# copies or substantial portions of the Software. |
| 14 | + |
| 15 | +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 16 | +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 17 | +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE |
| 18 | +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
| 19 | +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, |
| 20 | +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
| 21 | +# SOFTWARE. |
| 22 | +import os |
| 23 | +from typing import Optional |
| 24 | + |
| 25 | +from typer import Argument, Option |
| 26 | +from typing_extensions import Annotated |
| 27 | + |
| 28 | + |
| 29 | +TOKEN = os.getenv("HF_TOKEN") |
| 30 | +CACHE_DIR: str = os.getenv("HF_HOME", "/scratch") |
| 31 | + |
| 32 | +HELP_PANEL_NAME_1 = "Common Parameters" |
| 33 | +HELP_PANEL_NAME_2 = "Logging Parameters" |
| 34 | +HELP_PANEL_NAME_3 = "Debug Parameters" |
| 35 | +HELP_PANEL_NAME_4 = "Modeling Parameters" |
| 36 | + |
| 37 | + |
| 38 | +def sglang( |
| 39 | + # === general === |
| 40 | + model_args: Annotated[ |
| 41 | + str, |
| 42 | + Argument( |
| 43 | + help="Model arguments in the form key1=value1,key2=value2,... or path to yaml config file (see examples/model_configs/transformers_model.yaml)" |
| 44 | + ), |
| 45 | + ], |
| 46 | + tasks: Annotated[str, Argument(help="Comma-separated list of tasks to evaluate on.")], |
| 47 | + # === Common parameters === |
| 48 | + use_chat_template: Annotated[ |
| 49 | + bool, Option(help="Use chat template for evaluation.", rich_help_panel=HELP_PANEL_NAME_4) |
| 50 | + ] = False, |
| 51 | + system_prompt: Annotated[ |
| 52 | + Optional[str], Option(help="Use system prompt for evaluation.", rich_help_panel=HELP_PANEL_NAME_4) |
| 53 | + ] = None, |
| 54 | + dataset_loading_processes: Annotated[ |
| 55 | + int, Option(help="Number of processes to use for dataset loading.", rich_help_panel=HELP_PANEL_NAME_1) |
| 56 | + ] = 1, |
| 57 | + custom_tasks: Annotated[ |
| 58 | + Optional[str], Option(help="Path to custom tasks directory.", rich_help_panel=HELP_PANEL_NAME_1) |
| 59 | + ] = None, |
| 60 | + cache_dir: Annotated[ |
| 61 | + str, Option(help="Cache directory for datasets and models.", rich_help_panel=HELP_PANEL_NAME_1) |
| 62 | + ] = CACHE_DIR, |
| 63 | + num_fewshot_seeds: Annotated[ |
| 64 | + int, Option(help="Number of seeds to use for few-shot evaluation.", rich_help_panel=HELP_PANEL_NAME_1) |
| 65 | + ] = 1, |
| 66 | + load_responses_from_details_date_id: Annotated[ |
| 67 | + Optional[str], Option(help="Load responses from details directory.", rich_help_panel=HELP_PANEL_NAME_1) |
| 68 | + ] = None, |
| 69 | + # === saving === |
| 70 | + output_dir: Annotated[ |
| 71 | + str, Option(help="Output directory for evaluation results.", rich_help_panel=HELP_PANEL_NAME_2) |
| 72 | + ] = "results", |
| 73 | + push_to_hub: Annotated[ |
| 74 | + bool, Option(help="Push results to the huggingface hub.", rich_help_panel=HELP_PANEL_NAME_2) |
| 75 | + ] = False, |
| 76 | + push_to_tensorboard: Annotated[ |
| 77 | + bool, Option(help="Push results to tensorboard.", rich_help_panel=HELP_PANEL_NAME_2) |
| 78 | + ] = False, |
| 79 | + public_run: Annotated[ |
| 80 | + bool, Option(help="Push results and details to a public repo.", rich_help_panel=HELP_PANEL_NAME_2) |
| 81 | + ] = False, |
| 82 | + results_org: Annotated[ |
| 83 | + Optional[str], Option(help="Organization to push results to.", rich_help_panel=HELP_PANEL_NAME_2) |
| 84 | + ] = None, |
| 85 | + save_details: Annotated[ |
| 86 | + bool, Option(help="Save detailed, sample per sample, results.", rich_help_panel=HELP_PANEL_NAME_2) |
| 87 | + ] = False, |
| 88 | + # === debug === |
| 89 | + max_samples: Annotated[ |
| 90 | + Optional[int], Option(help="Maximum number of samples to evaluate on.", rich_help_panel=HELP_PANEL_NAME_3) |
| 91 | + ] = None, |
| 92 | + job_id: Annotated[ |
| 93 | + int, Option(help="Optional job id for future reference.", rich_help_panel=HELP_PANEL_NAME_3) |
| 94 | + ] = 0, |
| 95 | +): |
| 96 | + """ |
| 97 | + Evaluate models using vllm as backend. |
| 98 | + """ |
| 99 | + import yaml |
| 100 | + |
| 101 | + from lighteval.logging.evaluation_tracker import EvaluationTracker |
| 102 | + from lighteval.models.model_input import GenerationParameters |
| 103 | + from lighteval.models.sglang.sglang_model import SGLangModelConfig |
| 104 | + from lighteval.pipeline import EnvConfig, ParallelismManager, Pipeline, PipelineParameters |
| 105 | + |
| 106 | + TOKEN = os.getenv("HF_TOKEN") |
| 107 | + |
| 108 | + env_config = EnvConfig(token=TOKEN, cache_dir=cache_dir) |
| 109 | + |
| 110 | + evaluation_tracker = EvaluationTracker( |
| 111 | + output_dir=output_dir, |
| 112 | + save_details=save_details, |
| 113 | + push_to_hub=push_to_hub, |
| 114 | + push_to_tensorboard=push_to_tensorboard, |
| 115 | + public=public_run, |
| 116 | + hub_results_org=results_org, |
| 117 | + ) |
| 118 | + |
| 119 | + pipeline_params = PipelineParameters( |
| 120 | + launcher_type=ParallelismManager.SGLANG, |
| 121 | + env_config=env_config, |
| 122 | + job_id=job_id, |
| 123 | + dataset_loading_processes=dataset_loading_processes, |
| 124 | + custom_tasks_directory=custom_tasks, |
| 125 | + override_batch_size=-1, |
| 126 | + num_fewshot_seeds=num_fewshot_seeds, |
| 127 | + max_samples=max_samples, |
| 128 | + use_chat_template=use_chat_template, |
| 129 | + system_prompt=system_prompt, |
| 130 | + load_responses_from_details_date_id=load_responses_from_details_date_id, |
| 131 | + ) |
| 132 | + |
| 133 | + if model_args.endswith(".yaml"): |
| 134 | + with open(model_args, "r") as f: |
| 135 | + config = yaml.safe_load(f)["model"] |
| 136 | + model_args = config["base_params"]["model_args"] |
| 137 | + generation_parameters = GenerationParameters.from_dict(config) |
| 138 | + else: |
| 139 | + generation_parameters = GenerationParameters() |
| 140 | + |
| 141 | + model_args_dict: dict = {k.split("=")[0]: k.split("=")[1] if "=" in k else True for k in model_args.split(",")} |
| 142 | + model_config = SGLangModelConfig(**model_args_dict, generation_parameters=generation_parameters) |
| 143 | + |
| 144 | + pipeline = Pipeline( |
| 145 | + tasks=tasks, |
| 146 | + pipeline_parameters=pipeline_params, |
| 147 | + evaluation_tracker=evaluation_tracker, |
| 148 | + model_config=model_config, |
| 149 | + ) |
| 150 | + |
| 151 | + pipeline.evaluate() |
| 152 | + |
| 153 | + pipeline.show_results() |
| 154 | + |
| 155 | + results = pipeline.get_results() |
| 156 | + |
| 157 | + pipeline.save_and_push_results() |
| 158 | + |
| 159 | + return results |
0 commit comments