Skip to content

Commit

Permalink
Merge branch 'main' into declarative-mcp-tools
Browse files Browse the repository at this point in the history
  • Loading branch information
EItanya authored Jan 25, 2025
2 parents 95fc65e + 138913b commit 3c1a8ce
Show file tree
Hide file tree
Showing 8 changed files with 88 additions and 31 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@
SystemMessage,
UserMessage,
)
from autogen_core.tools import FunctionTool, Tool
from autogen_core.tools import FunctionTool, BaseTool
from pydantic import BaseModel
from typing_extensions import Self

Expand Down Expand Up @@ -57,7 +57,7 @@ class AssistantAgentConfig(BaseModel):

name: str
model_client: ComponentModel
# tools: List[Any] | None = None # TBD
tools: List[ComponentModel] | None
handoffs: List[HandoffBase | str] | None = None
model_context: ComponentModel | None = None
description: str
Expand Down Expand Up @@ -130,7 +130,7 @@ class AssistantAgent(BaseChatAgent, Component[AssistantAgentConfig]):
Args:
name (str): The name of the agent.
model_client (ChatCompletionClient): The model client to use for inference.
tools (List[Tool | Callable[..., Any] | Callable[..., Awaitable[Any]]] | None, optional): The tools to register with the agent.
tools (List[BaseTool[Any, Any] | Callable[..., Any] | Callable[..., Awaitable[Any]]] | None, optional): The tools to register with the agent.
handoffs (List[HandoffBase | str] | None, optional): The handoff configurations for the agent,
allowing it to transfer to other agents by responding with a :class:`HandoffMessage`.
The transfer is only executed when the team is in :class:`~autogen_agentchat.teams.Swarm`.
Expand Down Expand Up @@ -261,7 +261,7 @@ def __init__(
name: str,
model_client: ChatCompletionClient,
*,
tools: List[Tool | Callable[..., Any] | Callable[..., Awaitable[Any]]] | None = None,
tools: List[BaseTool[Any, Any] | Callable[..., Any] | Callable[..., Awaitable[Any]]] | None = None,
handoffs: List[HandoffBase | str] | None = None,
model_context: ChatCompletionContext | None = None,
description: str = "An agent that provides assistance with ability to use tools.",
Expand All @@ -288,12 +288,12 @@ def __init__(
self._system_messages = []
else:
self._system_messages = [SystemMessage(content=system_message)]
self._tools: List[Tool] = []
self._tools: List[BaseTool[Any, Any]] = []
if tools is not None:
if model_client.model_info["function_calling"] is False:
raise ValueError("The model does not support function calling.")
for tool in tools:
if isinstance(tool, Tool):
if isinstance(tool, BaseTool):
self._tools.append(tool)
elif callable(tool):
if hasattr(tool, "__doc__") and tool.__doc__ is not None:
Expand All @@ -308,7 +308,7 @@ def __init__(
if len(tool_names) != len(set(tool_names)):
raise ValueError(f"Tool names must be unique: {tool_names}")
# Handoff tools.
self._handoff_tools: List[Tool] = []
self._handoff_tools: List[BaseTool[Any, Any]] = []
self._handoffs: Dict[str, HandoffBase] = {}
if handoffs is not None:
if model_client.model_info["function_calling"] is False:
Expand Down Expand Up @@ -528,15 +528,10 @@ async def load_state(self, state: Mapping[str, Any]) -> None:
def _to_config(self) -> AssistantAgentConfig:
"""Convert the assistant agent to a declarative config."""

# raise an error if tools is not empty until it is implemented
# TBD : Implement serializing tools and remove this check.
if self._tools and len(self._tools) > 0:
raise NotImplementedError("Serializing tools is not implemented yet.")

return AssistantAgentConfig(
name=self.name,
model_client=self._model_client.dump_component(),
# tools=[], # TBD
tools=[tool.dump_component() for tool in self._tools],
handoffs=list(self._handoffs.values()),
model_context=self._model_context.dump_component(),
description=self.description,
Expand All @@ -553,7 +548,7 @@ def _from_config(cls, config: AssistantAgentConfig) -> Self:
return cls(
name=config.name,
model_client=ChatCompletionClient.load_component(config.model_client),
# tools=[], # TBD
tools=[BaseTool.load_component(tool) for tool in config.tools] if config.tools else None,
handoffs=config.handoffs,
model_context=None,
description=config.description,
Expand Down
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
import logging
from typing import Any, Dict

from autogen_core.tools import FunctionTool, Tool
from autogen_core.tools import FunctionTool, BaseTool
from pydantic import BaseModel, Field, model_validator

from .. import EVENT_LOGGER_NAME
Expand Down Expand Up @@ -47,7 +47,7 @@ def set_defaults(cls, values: Dict[str, Any]) -> Dict[str, Any]:
return values

@property
def handoff_tool(self) -> Tool:
def handoff_tool(self) -> BaseTool[BaseModel, BaseModel]:
"""Create a handoff tool from this handoff configuration."""

def _handoff_tool() -> str:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -774,5 +774,5 @@ async def test_assistant_agent_declarative(monkeypatch: pytest.MonkeyPatch) -> N
FunctionTool(_echo_function, description="Echo"),
],
)
with pytest.raises(NotImplementedError):
agent3.dump_component()
agent3_config = agent3.dump_component()
assert agent3_config.provider == "autogen_agentchat.agents.AssistantAgent"
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,7 @@ See each feature below for detailed information on how to migrate.

- [Model Client](#model-client)
- [Model Client for OpenAI-Compatible APIs](#model-client-for-openai-compatible-apis)
- [Model Client Cache](#model-client-cache)
- [Assistant Agent](#assistant-agent)
- [Multi-Modal Agent](#multi-modal-agent)
- [User Proxy](#user-proxy)
Expand All @@ -59,8 +60,6 @@ See each feature below for detailed information on how to migrate.
The following features currently in `v0.2`
will be provided in the future releases of `v0.4.*` versions:

- Model Client Cache [#4752](https://github.com/microsoft/autogen/issues/4752)
- Jupyter Code Executor [#4795](https://github.com/microsoft/autogen/issues/4795)
- Model Client Cost [#4835](https://github.com/microsoft/autogen/issues/4835)
- Teachable Agent
- RAG Agent
Expand Down Expand Up @@ -161,6 +160,65 @@ in AgentChat Tutorial and more detailed information on [Core API Docs](../core-u

Support for other hosted models will be added in the future.

## Model Client Cache

In `v0.2`, you can set the cache seed through the `cache_seed` parameter in the LLM config.
The cache is enabled by default.

```python
llm_config = {
"config_list": [{"model": "gpt-4o", "api_key": "sk-xxx"}],
"seed": 42,
"temperature": 0,
"cache_seed": 42,
}
```

In `v0.4`, the cache is not enabled by default, to use it you need to use a
{py:class}`~autogen_ext.models.cache.ChatCompletionCache` wrapper around the model client.

You can use a {py:class}`~autogen_ext.cache_store.diskcache.DiskCacheStore` or {py:class}`~autogen_ext.cache_store.redis.RedisStore` to store the cache.

```bash
pip install -U "autogen-ext[openai, diskcache, redis]"
```

Here's an example of using `diskcache` for local caching:

```python
import asyncio
import tempfile

from autogen_core.models import UserMessage
from autogen_ext.models.openai import OpenAIChatCompletionClient
from autogen_ext.models.cache import ChatCompletionCache, CHAT_CACHE_VALUE_TYPE
from autogen_ext.cache_store.diskcache import DiskCacheStore
from diskcache import Cache


async def main():
with tempfile.TemporaryDirectory() as tmpdirname:
# Initialize the original client
openai_model_client = OpenAIChatCompletionClient(model="gpt-4o")

# Then initialize the CacheStore, in this case with diskcache.Cache.
# You can also use redis like:
# from autogen_ext.cache_store.redis import RedisStore
# import redis
# redis_instance = redis.Redis()
# cache_store = RedisCacheStore[CHAT_CACHE_VALUE_TYPE](redis_instance)
cache_store = DiskCacheStore[CHAT_CACHE_VALUE_TYPE](Cache(tmpdirname))
cache_client = ChatCompletionCache(openai_model_client, cache_store)

response = await cache_client.create([UserMessage(content="Hello, how are you?", source="user")])
print(response) # Should print response from OpenAI
response = await cache_client.create([UserMessage(content="Hello, how are you?", source="user")])
print(response) # Should print cached response


asyncio.run(main())
```

## Assistant Agent

In `v0.2`, you create an assistant agent as follows:
Expand Down
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
import asyncio
import functools
import warnings
from textwrap import dedent
from typing import Any, Callable, Sequence
import warnings

from pydantic import BaseModel
from typing_extensions import Self
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,8 @@

from autogen_agentchat.agents import AssistantAgent
from autogen_core.models import ChatCompletionClient
from autogen_core.tools import Tool
from autogen_core.tools import BaseTool
from pydantic import BaseModel

from .tools import (
extract_audio,
Expand Down Expand Up @@ -38,7 +39,7 @@ class VideoSurfer(AssistantAgent):
Args:
name (str): The name of the agent.
model_client (ChatCompletionClient): The model client used for generating responses.
tools (List[Tool | Callable[..., Any] | Callable[..., Awaitable[Any]]] | None, optional):
tools (List[BaseTool[BaseModel, BaseModel] | Callable[..., Any] | Callable[..., Awaitable[Any]]] | None, optional):
A list of tools or functions the agent can use. If not provided, defaults to all video tools from the action space.
description (str, optional): A brief description of the agent. Defaults to "An agent that can answer questions about a local video.".
system_message (str | None, optional): The system message guiding the agent's behavior. Defaults to a predefined message.
Expand Down Expand Up @@ -137,7 +138,7 @@ def __init__(
name: str,
model_client: ChatCompletionClient,
*,
tools: List[Tool | Callable[..., Any] | Callable[..., Awaitable[Any]]] | None = None,
tools: List[BaseTool[BaseModel, BaseModel] | Callable[..., Any] | Callable[..., Awaitable[Any]]] | None = None,
description: Optional[str] = None,
system_message: Optional[str] = None,
):
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
import warnings
from typing import Any, AsyncGenerator, List, Mapping, Optional, Sequence, Union, cast

from autogen_core import CacheStore, CancellationToken
from autogen_core import CacheStore, CancellationToken, InMemoryStore
from autogen_core.models import (
ChatCompletionClient,
CreateResult,
Expand Down Expand Up @@ -74,11 +74,16 @@ async def main():
client (ChatCompletionClient): The original ChatCompletionClient to wrap.
store (CacheStore): A store object that implements get and set methods.
The user is responsible for managing the store's lifecycle & clearing it (if needed).
Defaults to using in-memory cache.
"""

def __init__(self, client: ChatCompletionClient, store: CacheStore[CHAT_CACHE_VALUE_TYPE]):
def __init__(
self,
client: ChatCompletionClient,
store: Optional[CacheStore[CHAT_CACHE_VALUE_TYPE]] = None,
):
self.client = client
self.store = store
self.store = store or InMemoryStore[CHAT_CACHE_VALUE_TYPE]()

def _check_cache(
self,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2,15 +2,14 @@
from typing import List, Tuple, Union

import pytest
from autogen_core import InMemoryStore
from autogen_core.models import (
ChatCompletionClient,
CreateResult,
LLMMessage,
SystemMessage,
UserMessage,
)
from autogen_ext.models.cache import CHAT_CACHE_VALUE_TYPE, ChatCompletionCache
from autogen_ext.models.cache import ChatCompletionCache
from autogen_ext.models.replay import ReplayChatCompletionClient


Expand All @@ -21,8 +20,7 @@ def get_test_data() -> Tuple[list[str], list[str], SystemMessage, ChatCompletion
system_prompt = SystemMessage(content="This is a system prompt")
replay_client = ReplayChatCompletionClient(responses)
replay_client.set_cached_bool_value(False)
store = InMemoryStore[CHAT_CACHE_VALUE_TYPE]()
cached_client = ChatCompletionCache(replay_client, store)
cached_client = ChatCompletionCache(replay_client)

return responses, prompts, system_prompt, replay_client, cached_client

Expand Down

0 comments on commit 3c1a8ce

Please sign in to comment.