Skip to content

Commit 0057894

Browse files
[Core] Rename PromptInputs and inputs(vllm-project#8673)
1 parent 0f961b3 commit 0057894

File tree

18 files changed

+157
-162
lines changed

18 files changed

+157
-162
lines changed

benchmarks/benchmark_latency.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@
1111

1212
from vllm import LLM, SamplingParams
1313
from vllm.engine.arg_utils import DEVICE_OPTIONS, EngineArgs
14-
from vllm.inputs import PromptInputs
14+
from vllm.inputs import PromptType
1515
from vllm.model_executor.layers.quantization import QUANTIZATION_METHODS
1616
from vllm.utils import FlexibleArgumentParser
1717

@@ -61,7 +61,7 @@ def main(args: argparse.Namespace):
6161
dummy_prompt_token_ids = np.random.randint(10000,
6262
size=(args.batch_size,
6363
args.input_len))
64-
dummy_inputs: List[PromptInputs] = [{
64+
dummy_prompts: List[PromptType] = [{
6565
"prompt_token_ids": batch
6666
} for batch in dummy_prompt_token_ids.tolist()]
6767

@@ -74,13 +74,13 @@ def run_to_completion(profile_dir: Optional[str] = None):
7474
],
7575
on_trace_ready=torch.profiler.tensorboard_trace_handler(
7676
str(profile_dir))) as p:
77-
llm.generate(dummy_inputs,
77+
llm.generate(dummy_prompts,
7878
sampling_params=sampling_params,
7979
use_tqdm=False)
8080
print(p.key_averages())
8181
else:
8282
start_time = time.perf_counter()
83-
llm.generate(dummy_inputs,
83+
llm.generate(dummy_prompts,
8484
sampling_params=sampling_params,
8585
use_tqdm=False)
8686
end_time = time.perf_counter()

docs/source/dev/multimodal/multimodal_index.rst

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,7 @@ Multi-Modality
88
vLLM provides experimental support for multi-modal models through the :mod:`vllm.multimodal` package.
99

1010
Multi-modal inputs can be passed alongside text and token prompts to :ref:`supported models <supported_vlms>`
11-
via the ``multi_modal_data`` field in :class:`vllm.inputs.PromptInputs`.
11+
via the ``multi_modal_data`` field in :class:`vllm.inputs.PromptType`.
1212

1313
Currently, vLLM only has built-in support for image data. You can extend vLLM to process additional modalities
1414
by following :ref:`this guide <adding_multimodal_plugin>`.

docs/source/dev/offline_inference/llm_inputs.rst

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
LLM Inputs
22
==========
33

4-
.. autodata:: vllm.inputs.PromptInputs
4+
.. autodata:: vllm.inputs.PromptType
55

66
.. autoclass:: vllm.inputs.TextPrompt
77
:show-inheritance:

docs/source/models/vlm.rst

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,7 @@ The :class:`~vllm.LLM` class can be instantiated in much the same way as languag
2727
We have removed all vision language related CLI args in the ``0.5.1`` release. **This is a breaking change**, so please update your code to follow
2828
the above snippet. Specifically, ``image_feature_size`` can no longer be specified as we now calculate that internally for each model.
2929

30-
To pass an image to the model, note the following in :class:`vllm.inputs.PromptInputs`:
30+
To pass an image to the model, note the following in :class:`vllm.inputs.PromptType`:
3131

3232
* ``prompt``: The prompt should follow the format that is documented on HuggingFace.
3333
* ``multi_modal_data``: This is a dictionary that follows the schema defined in :class:`vllm.multimodal.MultiModalDataDict`.

tests/mq_llm_engine/test_error_handling.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -61,15 +61,15 @@ async def test_evil_forward(tmp_socket):
6161

6262
# Throws an error in first forward pass.
6363
with pytest.raises(RAISED_ERROR):
64-
async for _ in client.generate(inputs="Hello my name is",
64+
async for _ in client.generate(prompt="Hello my name is",
6565
sampling_params=SamplingParams(),
6666
request_id=uuid.uuid4()):
6767
pass
6868
assert client.errored
6969

7070
# Engine is errored, should get ENGINE_DEAD_ERROR.
7171
with pytest.raises(MQEngineDeadError):
72-
async for _ in client.generate(inputs="Hello my name is",
72+
async for _ in client.generate(prompt="Hello my name is",
7373
sampling_params=SamplingParams(),
7474
request_id=uuid.uuid4()):
7575
pass
@@ -118,7 +118,7 @@ async def test_failed_health_check(tmp_socket):
118118

119119
# Generate call should throw ENGINE_DEAD_ERROR
120120
with pytest.raises(MQEngineDeadError):
121-
async for _ in client.generate(inputs="Hello my name is",
121+
async for _ in client.generate(prompt="Hello my name is",
122122
sampling_params=SamplingParams(),
123123
request_id=uuid.uuid4()):
124124
pass
@@ -165,7 +165,7 @@ async def bad_abort_after_2s():
165165
# with reference to the original KeyError("foo")
166166
with pytest.raises(MQEngineDeadError) as execinfo:
167167
async for _ in client.generate(
168-
inputs="Hello my name is",
168+
prompt="Hello my name is",
169169
sampling_params=SamplingParams(max_tokens=2000),
170170
request_id=uuid.uuid4()):
171171
pass
@@ -190,7 +190,7 @@ async def test_bad_request(tmp_socket):
190190

191191
# Invalid request should fail, but not crash the server.
192192
with pytest.raises(ValueError):
193-
async for _ in client.generate(inputs="Hello my name is",
193+
async for _ in client.generate(prompt="Hello my name is",
194194
sampling_params=SamplingParams(),
195195
request_id="abcd-1",
196196
lora_request=LoRARequest(
@@ -199,7 +199,7 @@ async def test_bad_request(tmp_socket):
199199
pass
200200

201201
# This request should be okay.
202-
async for _ in client.generate(inputs="Hello my name is",
202+
async for _ in client.generate(prompt="Hello my name is",
203203
sampling_params=SamplingParams(),
204204
request_id="abcd-2"):
205205
pass

tests/mq_llm_engine/utils.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,7 @@ async def generate(
2020
count = 0
2121
async for out in client.generate(
2222
request_id=request_id,
23-
inputs="Hello my name is Robert and",
23+
prompt="Hello my name is Robert and",
2424
sampling_params=SamplingParams(max_tokens=num_tokens,
2525
temperature=0)):
2626

vllm/__init__.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@
55
from vllm.engine.llm_engine import LLMEngine
66
from vllm.entrypoints.llm import LLM
77
from vllm.executor.ray_utils import initialize_ray_cluster
8-
from vllm.inputs import PromptInputs, TextPrompt, TokensPrompt
8+
from vllm.inputs import PromptType, TextPrompt, TokensPrompt
99
from vllm.model_executor.models import ModelRegistry
1010
from vllm.outputs import (CompletionOutput, EmbeddingOutput,
1111
EmbeddingRequestOutput, RequestOutput)
@@ -19,7 +19,7 @@
1919
"__version__",
2020
"LLM",
2121
"ModelRegistry",
22-
"PromptInputs",
22+
"PromptType",
2323
"TextPrompt",
2424
"TokensPrompt",
2525
"SamplingParams",

vllm/engine/async_llm_engine.py

Lines changed: 11 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,7 @@
1717
from vllm.executor.executor_base import ExecutorAsyncBase
1818
from vllm.executor.gpu_executor import GPUExecutorAsync
1919
from vllm.executor.ray_utils import initialize_ray_cluster
20-
from vllm.inputs import PromptInputs
20+
from vllm.inputs import PromptType
2121
from vllm.logger import init_logger
2222
from vllm.lora.request import LoRARequest
2323
from vllm.model_executor.layers.sampler import SamplerOutput
@@ -405,7 +405,7 @@ async def stop_remote_worker_execution_loop_async(self) -> None:
405405
async def add_request_async(
406406
self,
407407
request_id: str,
408-
inputs: PromptInputs,
408+
prompt: PromptType,
409409
params: Union[SamplingParams, PoolingParams],
410410
arrival_time: Optional[float] = None,
411411
lora_request: Optional[LoRARequest] = None,
@@ -420,7 +420,7 @@ async def add_request_async(
420420
arrival_time = time.time()
421421

422422
preprocessed_inputs = await self.input_preprocessor.preprocess_async(
423-
inputs,
423+
prompt,
424424
request_id=request_id,
425425
lora_request=lora_request,
426426
prompt_adapter_request=prompt_adapter_request,
@@ -777,7 +777,7 @@ async def run_engine_loop(engine_ref: ReferenceType):
777777
async def add_request(
778778
self,
779779
request_id: str,
780-
inputs: PromptInputs,
780+
prompt: PromptType,
781781
params: Union[SamplingParams, PoolingParams],
782782
arrival_time: Optional[float] = None,
783783
lora_request: Optional[LoRARequest] = None,
@@ -797,7 +797,7 @@ async def add_request(
797797
stream = self._request_tracker.add_request(
798798
request_id,
799799
verbose=self.log_requests,
800-
inputs=inputs,
800+
prompt=prompt,
801801
params=params,
802802
arrival_time=arrival_time or time.time(),
803803
lora_request=lora_request,
@@ -808,7 +808,7 @@ async def add_request(
808808

809809
async def generate(
810810
self,
811-
inputs: PromptInputs,
811+
prompt: PromptType,
812812
sampling_params: SamplingParams,
813813
request_id: str,
814814
lora_request: Optional[LoRARequest] = None,
@@ -822,8 +822,7 @@ async def generate(
822822
from the LLMEngine to the caller.
823823
824824
Args:
825-
inputs: The inputs to the LLM. See
826-
:class:`~vllm.inputs.PromptInputs`
825+
prompt: The prompt to the LLM. See :class:`~vllm.inputs.PromptType`
827826
for more details about the format of each input.
828827
sampling_params: The sampling parameters of the request.
829828
request_id: The unique id of the request.
@@ -881,7 +880,7 @@ async def generate(
881880
"""
882881
async for output in await self.add_request(
883882
request_id,
884-
inputs,
883+
prompt,
885884
sampling_params,
886885
lora_request=lora_request,
887886
trace_headers=trace_headers,
@@ -891,7 +890,7 @@ async def generate(
891890

892891
async def encode(
893892
self,
894-
inputs: PromptInputs,
893+
prompt: PromptType,
895894
pooling_params: PoolingParams,
896895
request_id: str,
897896
lora_request: Optional[LoRARequest] = None,
@@ -904,8 +903,7 @@ async def encode(
904903
from the LLMEngine to the caller.
905904
906905
Args:
907-
inputs: The inputs to the LLM. See
908-
:class:`~vllm.inputs.PromptInputs`
906+
prompt: The prompt to the LLM. See :class:`~vllm.inputs.PromptType`
909907
for more details about the format of each input.
910908
pooling_params: The pooling parameters of the request.
911909
request_id: The unique id of the request.
@@ -959,7 +957,7 @@ async def encode(
959957
"""
960958
async for output in await self.add_request(
961959
request_id,
962-
inputs,
960+
prompt,
963961
pooling_params,
964962
lora_request=lora_request,
965963
trace_headers=trace_headers,

vllm/engine/llm_engine.py

Lines changed: 4 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,7 @@
2929
from vllm.executor.gpu_executor import GPUExecutor
3030
from vllm.executor.ray_utils import initialize_ray_cluster
3131
from vllm.inputs import (INPUT_REGISTRY, EncoderDecoderLLMInputs,
32-
InputRegistry, LLMInputs, PromptInputs)
32+
InputRegistry, LLMInputs, PromptType)
3333
from vllm.inputs.preprocess import InputPreprocessor
3434
from vllm.logger import init_logger
3535
from vllm.lora.request import LoRARequest
@@ -680,7 +680,7 @@ def stop_remote_worker_execution_loop(self) -> None:
680680
def add_request(
681681
self,
682682
request_id: str,
683-
inputs: PromptInputs,
683+
prompt: PromptType,
684684
params: Union[SamplingParams, PoolingParams],
685685
arrival_time: Optional[float] = None,
686686
lora_request: Optional[LoRARequest] = None,
@@ -695,8 +695,7 @@ def add_request(
695695
696696
Args:
697697
request_id: The unique ID of the request.
698-
inputs: The inputs to the LLM. See
699-
:class:`~vllm.inputs.PromptInputs`
698+
prompt: The prompt to the LLM. See :class:`~vllm.inputs.PromptType`
700699
for more details about the format of each input.
701700
params: Parameters for sampling or pooling.
702701
:class:`~vllm.SamplingParams` for text generation.
@@ -736,7 +735,7 @@ def add_request(
736735
arrival_time = time.time()
737736

738737
preprocessed_inputs = self.input_preprocessor.preprocess(
739-
inputs,
738+
prompt,
740739
request_id=request_id,
741740
lora_request=lora_request,
742741
prompt_adapter_request=prompt_adapter_request,

vllm/engine/multiprocessing/__init__.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@
33
from typing import List, Mapping, Optional, Union
44

55
from vllm import PoolingParams
6-
from vllm.inputs import PromptInputs
6+
from vllm.inputs import PromptType
77
from vllm.lora.request import LoRARequest
88
from vllm.outputs import RequestOutput
99
from vllm.prompt_adapter.request import PromptAdapterRequest
@@ -23,7 +23,7 @@ class MQEngineDeadError(RuntimeError):
2323

2424
@dataclass
2525
class RPCProcessRequest:
26-
inputs: PromptInputs
26+
prompt: PromptType
2727
params: Union[SamplingParams, PoolingParams]
2828
request_id: str
2929
lora_request: Optional[LoRARequest] = None

vllm/engine/multiprocessing/client.py

Lines changed: 9 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -25,7 +25,7 @@
2525
RPCStartupResponse)
2626
# yapf: enable
2727
from vllm.envs import VLLM_RPC_TIMEOUT
28-
from vllm.inputs import PromptInputs
28+
from vllm.inputs import PromptType
2929
from vllm.logger import init_logger
3030
from vllm.lora.request import LoRARequest
3131
from vllm.outputs import EmbeddingRequestOutput, RequestOutput
@@ -375,7 +375,7 @@ def dead_error(self) -> BaseException:
375375

376376
def generate(
377377
self,
378-
inputs: PromptInputs,
378+
prompt: PromptType,
379379
sampling_params: SamplingParams,
380380
request_id: str,
381381
lora_request: Optional[LoRARequest] = None,
@@ -389,8 +389,7 @@ def generate(
389389
from the LLMEngine to the caller.
390390
391391
Args:
392-
inputs: The inputs to the LLM. See
393-
:class:`~vllm.inputs.PromptInputs`
392+
prompt: The prompt to the LLM. See :class:`~vllm.inputs.PromptType`
394393
for more details about the format of each input.
395394
sampling_params: The sampling parameters of the request.
396395
request_id: The unique id of the request.
@@ -399,13 +398,13 @@ def generate(
399398
prompt_adapter_request: Prompt Adapter request to use
400399
for generation, if any.
401400
"""
402-
return self._process_request(inputs, sampling_params, request_id,
401+
return self._process_request(prompt, sampling_params, request_id,
403402
lora_request, trace_headers,
404403
prompt_adapter_request)
405404

406405
def encode(
407406
self,
408-
inputs: PromptInputs,
407+
prompt: PromptType,
409408
pooling_params: PoolingParams,
410409
request_id: str,
411410
lora_request: Optional[LoRARequest] = None,
@@ -418,8 +417,7 @@ def encode(
418417
from the LLMEngine to the caller.
419418
420419
Args:
421-
inputs: The inputs to the LLM. See
422-
:class:`~vllm.inputs.PromptInputs`
420+
prompt: The prompt to the LLM. See :class:`~vllm.inputs.PromptType`
423421
for more details about the format of each input.
424422
pooling_params: The pooling parameters of the request.
425423
request_id: The unique id of the request.
@@ -430,12 +428,12 @@ def encode(
430428
The output `EmbeddingRequestOutput` objects from the LLMEngine
431429
for the request.
432430
"""
433-
return self._process_request(inputs, pooling_params, request_id,
431+
return self._process_request(prompt, pooling_params, request_id,
434432
lora_request, trace_headers)
435433

436434
async def _process_request(
437435
self,
438-
inputs: PromptInputs,
436+
prompt: PromptType,
439437
params: Union[SamplingParams, PoolingParams],
440438
request_id: str,
441439
lora_request: Optional[LoRARequest] = None,
@@ -468,7 +466,7 @@ async def _process_request(
468466

469467
request_bytes = pickle.dumps(
470468
RPCProcessRequest(
471-
inputs=inputs,
469+
prompt=prompt,
472470
params=params,
473471
request_id=request_id,
474472
lora_request=lora_request,

vllm/engine/multiprocessing/engine.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -245,7 +245,7 @@ def _handle_process_request(self, request: RPCProcessRequest):
245245
try:
246246
self.engine.add_request(
247247
request_id=request_id,
248-
inputs=request.inputs,
248+
prompt=request.prompt,
249249
params=request.params,
250250
lora_request=request.lora_request,
251251
trace_headers=request.trace_headers,

0 commit comments

Comments
 (0)