Skip to content
40 changes: 37 additions & 3 deletions src/agents/extensions/models/litellm_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,7 @@
)
from openai.types.chat.chat_completion_message_function_tool_call import Function
from openai.types.responses import Response
from openai.types.responses.tool_choice_function import ToolChoiceFunction

from ... import _debug
from ...agent_output import AgentOutputSchemaBase
Expand Down Expand Up @@ -367,15 +368,48 @@ async def _fetch_response(
if isinstance(ret, litellm.types.utils.ModelResponse):
return ret

# Convert tool_choice to the correct type for Response
# tool_choice can be a Literal, ToolChoiceFunction,
# dict from ChatCompletions Converter, or omit
response_tool_choice: Literal["auto", "required", "none"] | ToolChoiceFunction
if tool_choice is omit:
response_tool_choice = "auto"
elif isinstance(tool_choice, ToolChoiceFunction):
# Already a ToolChoiceFunction, use directly
response_tool_choice = tool_choice
elif isinstance(tool_choice, dict):
# Convert from ChatCompletions format dict to ToolChoiceFunction
# ChatCompletions Converter returns: {"type": "function", "function": {"name": "..."}}
func_data = tool_choice.get("function")
if (
tool_choice.get("type") == "function"
and func_data is not None
and isinstance(func_data, dict)
):
tool_name = func_data.get("name")
if isinstance(tool_name, str) and tool_name: # Ensure non-empty string
response_tool_choice = ToolChoiceFunction(type="function", name=tool_name)
else:
# Fallback to auto if name is missing or invalid
response_tool_choice = "auto"
else:
# Fallback to auto if unexpected format
response_tool_choice = "auto"
elif tool_choice in ("auto", "required", "none"):
from typing import cast

response_tool_choice = cast(Literal["auto", "required", "none"], tool_choice)
else:
# Fallback to auto for any other case
response_tool_choice = "auto"

response = Response(
id=FAKE_RESPONSES_ID,
created_at=time.time(),
model=self.model,
object="response",
output=[],
tool_choice=cast(Literal["auto", "required", "none"], tool_choice)
if tool_choice is not omit
else "auto",
tool_choice=response_tool_choice,
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I tested this, and it’s still not fixed, response_tool_choice always ends up being "auto", even when I pass:ModelSettings(tool_choice="my_tool")

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Thanks your test, I will test it again later!

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

✅ Fixed in commit fca3ed5 and verified with integration testing.

Root cause: The initial fix incorrectly assumed LiteLLM uses openai_responses.Converter (flat format), but it actually uses chatcmpl_converter.Converter which returns nested ChatCompletions format.

The fix: Now correctly handles the nested dict structure {"type": "function", "function": {"name": "my_tool"}} by accessing tool_choice.get("function").get("name") (lines 382-393).

Verification: Integration test confirms that when ModelSettings(tool_choice="my_specific_tool") is passed, litellm.acompletion receives the correct nested dict format, and Response.tool_choice is properly set to ToolChoiceFunction(name="my_specific_tool").

Test output:

litellm.acompletion called with tool_choice: {'type': 'function', 'function': {'name': 'my_specific_tool'}}

The fix is now working correctly!

top_p=model_settings.top_p,
temperature=model_settings.temperature,
tools=[],
Expand Down
Loading