Skip to content

Commit 74a2ca8

Browse files
authored
fix: re-add litellm after accidental removal (#852)
In #848 I improved the Google ADK structure but mistakenly committed the removal of litellm. This PR re-adds the original contribution from #726 Closes ## By Submitting this PR I confirm: - I am familiar with the [Contributing Guidelines](https://github.com/NVIDIA/NeMo-Agent-Toolkit/blob/develop/docs/source/resources/contributing.md). - We require that all contributors "sign-off" on their commits. This certifies that the contribution is your original work, or you have rights to submit it under the same license, or a compatible license. - Any contribution which contains commits that are not Signed-Off will not be accepted. - When the PR is ready for review, new or existing tests cover these changes. - When the PR is ready for review, the documentation is up to date with these changes. ## Summary by CodeRabbit - New Features - Added LiteLLM as a selectable LLM provider. - Introduced configuration options including API key, base URL (api_base), model, temperature, top_p, and seed. - Enabled an ADK client endpoint for LiteLLM to use within workflows. - Chores - Provider auto-registration on import to simplify setup. Authors: - Will Killian (https://github.com/willkill07) Approvers: - David Gardner (https://github.com/dagardner-nv) URL: #852
1 parent 65bb853 commit 74a2ca8

File tree

3 files changed

+81
-0
lines changed

3 files changed

+81
-0
lines changed

packages/nvidia_nat_adk/src/nat/plugins/adk/llm.py

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,7 @@
2020
from nat.builder.framework_enum import LLMFrameworkEnum
2121
from nat.cli.register_workflow import register_llm_client
2222
from nat.llm.azure_openai_llm import AzureOpenAIModelConfig
23+
from nat.llm.litellm_llm import LiteLlmModelConfig
2324
from nat.llm.nim_llm import NIMModelConfig
2425
from nat.llm.openai_llm import OpenAIModelConfig
2526

@@ -47,6 +48,16 @@ async def azure_openai_adk(config: AzureOpenAIModelConfig, _builder: Builder):
4748
yield LiteLlm(f"azure/{config.azure_deployment}", **config_dict)
4849

4950

51+
@register_llm_client(config_type=LiteLlmModelConfig, wrapper_type=LLMFrameworkEnum.ADK)
52+
async def litellm_adk(litellm_config: LiteLlmModelConfig, _builder: Builder):
53+
from google.adk.models.lite_llm import LiteLlm
54+
yield LiteLlm(**litellm_config.model_dump(
55+
exclude={"type", "max_retries", "thinking"},
56+
by_alias=True,
57+
exclude_none=True,
58+
))
59+
60+
5061
@register_llm_client(config_type=NIMModelConfig, wrapper_type=LLMFrameworkEnum.ADK)
5162
async def nim_adk(config: NIMModelConfig, _builder: Builder):
5263
"""Create and yield a Google ADK `NIM` client from a NAT `NIMModelConfig`.

src/nat/llm/litellm_llm.py

Lines changed: 69 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,69 @@
1+
# SPDX-FileCopyrightText: Copyright (c) 2025, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
2+
# SPDX-License-Identifier: Apache-2.0
3+
#
4+
# Licensed under the Apache License, Version 2.0 (the "License");
5+
# you may not use this file except in compliance with the License.
6+
# You may obtain a copy of the License at
7+
#
8+
# http://www.apache.org/licenses/LICENSE-2.0
9+
#
10+
# Unless required by applicable law or agreed to in writing, software
11+
# distributed under the License is distributed on an "AS IS" BASIS,
12+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13+
# See the License for the specific language governing permissions and
14+
# limitations under the License.
15+
16+
from collections.abc import AsyncIterator
17+
18+
from pydantic import AliasChoices
19+
from pydantic import ConfigDict
20+
from pydantic import Field
21+
22+
from nat.builder.builder import Builder
23+
from nat.builder.llm import LLMProviderInfo
24+
from nat.cli.register_workflow import register_llm_provider
25+
from nat.data_models.llm import LLMBaseConfig
26+
from nat.data_models.retry_mixin import RetryMixin
27+
from nat.data_models.temperature_mixin import TemperatureMixin
28+
from nat.data_models.thinking_mixin import ThinkingMixin
29+
from nat.data_models.top_p_mixin import TopPMixin
30+
31+
32+
class LiteLlmModelConfig(
33+
LLMBaseConfig,
34+
RetryMixin,
35+
TemperatureMixin,
36+
TopPMixin,
37+
ThinkingMixin,
38+
name="litellm",
39+
):
40+
"""A LiteLlm provider to be used with an LLM client."""
41+
42+
model_config = ConfigDict(protected_namespaces=(), extra="allow")
43+
44+
api_key: str | None = Field(default=None, description="API key to interact with hosted model.")
45+
base_url: str | None = Field(default=None,
46+
description="Base url to the hosted model.",
47+
validation_alias=AliasChoices("base_url", "api_base"),
48+
serialization_alias="api_base")
49+
model_name: str = Field(validation_alias=AliasChoices("model_name", "model"),
50+
serialization_alias="model",
51+
description="The LiteLlm hosted model name.")
52+
seed: int | None = Field(default=None, description="Random seed to set for generation.")
53+
54+
55+
@register_llm_provider(config_type=LiteLlmModelConfig)
56+
async def litellm_model(
57+
config: LiteLlmModelConfig,
58+
_builder: Builder,
59+
) -> AsyncIterator[LLMProviderInfo]:
60+
"""Litellm model provider.
61+
62+
Args:
63+
config (LiteLlmModelConfig): The LiteLlm model configuration.
64+
_builder (Builder): The NAT builder instance.
65+
66+
Returns:
67+
AsyncIterator[LLMProviderInfo]: An async iterator that yields an LLMProviderInfo object.
68+
"""
69+
yield LLMProviderInfo(config=config, description="A LiteLlm model for use with an LLM client.")

src/nat/llm/register.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -22,5 +22,6 @@
2222
# Import any providers which need to be automatically registered here
2323
from . import aws_bedrock_llm
2424
from . import azure_openai_llm
25+
from . import litellm_llm
2526
from . import nim_llm
2627
from . import openai_llm

0 commit comments

Comments
 (0)