Skip to content

Commit

Permalink
Merge branch 'main' into lpinheiro/feat/add-sk-model-adapter
Browse files Browse the repository at this point in the history
  • Loading branch information
lspinheiro authored Jan 13, 2025
2 parents db3258a + 9570e82 commit b5d115b
Show file tree
Hide file tree
Showing 33 changed files with 420 additions and 187 deletions.
3 changes: 2 additions & 1 deletion .github/workflows/docs.yml
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ jobs:
[
# For main use the workflow target
{ ref: "${{github.ref}}", dest-dir: dev, uv-version: "0.5.13", sphinx-release-override: "dev" },
{ ref: "v0.4.0.post1", dest-dir: stable, uv-version: "0.5.13", sphinx-release-override: "stable" },
{ ref: "v0.4.1", dest-dir: stable, uv-version: "0.5.13", sphinx-release-override: "stable" },
{ ref: "v0.4.0.dev0", dest-dir: "0.4.0.dev0", uv-version: "0.5.11", sphinx-release-override: "" },
{ ref: "v0.4.0.dev1", dest-dir: "0.4.0.dev1", uv-version: "0.5.11", sphinx-release-override: "" },
{ ref: "v0.4.0.dev2", dest-dir: "0.4.0.dev2", uv-version: "0.5.11", sphinx-release-override: "" },
Expand All @@ -49,6 +49,7 @@ jobs:
{ ref: "v0.4.0.dev12", dest-dir: "0.4.0.dev12", uv-version: "0.5.13", sphinx-release-override: "" },
{ ref: "v0.4.0.dev13", dest-dir: "0.4.0.dev13", uv-version: "0.5.13", sphinx-release-override: "" },
{ ref: "v0.4.0.post1", dest-dir: "0.4.0", uv-version: "0.5.13", sphinx-release-override: "" },
{ ref: "v0.4.1", dest-dir: "0.4.1", uv-version: "0.5.13", sphinx-release-override: "" },
]
steps:
- name: Checkout
Expand Down
16 changes: 16 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,13 @@ pip install -U "autogen-agentchat" "autogen-ext[openai]"

The current stable version is v0.4. If you are upgrading from AutoGen v0.2, please refer to the [Migration Guide](https://microsoft.github.io/autogen/dev/user-guide/agentchat-user-guide/migration-guide.html) for detailed instructions on how to update your code and configurations.

```bash
# Install AutoGen Studio for no-code GUI
pip install -U "autogenstudio"
```

## Quickstart

### Hello World

Create an assistant agent using OpenAI's GPT-4o model.
Expand Down Expand Up @@ -69,6 +76,15 @@ async def main() -> None:
asyncio.run(main())
```

### AutoGen Studio

Use AutoGen Studio to prototype and run multi-agent workflows without writing code.

```bash
# Run AutoGen Studio on http://localhost:8080
autogenstudio ui --port 8080 --appdir ./my-app
```

## Why Use AutoGen?

<div align="center">
Expand Down
7 changes: 6 additions & 1 deletion docs/switcher.json
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[
{
"name": "0.4.0 (stable)",
"name": "0.4.1 (stable)",
"version": "stable",
"url": "/autogen/stable/",
"preferred": true
Expand All @@ -15,6 +15,11 @@
"version": "0.2",
"url": "/autogen/0.2/"
},
{
"name": "0.4.0",
"version": "0.4.0",
"url": "/autogen/0.4.0/"
},
{
"name": "0.4.0.dev0",
"version": "0.4.0.dev0",
Expand Down
4 changes: 2 additions & 2 deletions python/packages/autogen-agentchat/pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ build-backend = "hatchling.build"

[project]
name = "autogen-agentchat"
version = "0.4.0"
version = "0.4.1"
license = {file = "LICENSE-CODE"}
description = "AutoGen agents and teams library"
readme = "README.md"
Expand All @@ -15,7 +15,7 @@ classifiers = [
"Operating System :: OS Independent",
]
dependencies = [
"autogen-core==0.4.0",
"autogen-core==0.4.1",
"aioconsole>=0.8.1"
]

Expand Down
Original file line number Diff line number Diff line change
@@ -1,15 +1,17 @@
import asyncio
import uuid
from contextlib import contextmanager
from contextvars import ContextVar
from inspect import iscoroutinefunction
from typing import Awaitable, Callable, Optional, Sequence, Union, cast
from typing import Any, AsyncGenerator, Awaitable, Callable, ClassVar, Generator, Optional, Sequence, Union, cast

from aioconsole import ainput # type: ignore
from autogen_core import CancellationToken

from ..base import Response
from ..messages import ChatMessage, HandoffMessage, TextMessage
from ..messages import AgentEvent, ChatMessage, HandoffMessage, TextMessage, UserInputRequestedEvent
from ._base_chat_agent import BaseChatAgent

# Define input function types more precisely
SyncInputFunc = Callable[[str], str]
AsyncInputFunc = Callable[[str, Optional[CancellationToken]], Awaitable[str]]
InputFuncType = Union[SyncInputFunc, AsyncInputFunc]
Expand Down Expand Up @@ -109,6 +111,33 @@ async def cancellable_user_agent():
print(f"BaseException: {e}")
"""

class InputRequestContext:
def __init__(self) -> None:
raise RuntimeError(
"InputRequestContext cannot be instantiated. It is a static class that provides context management for user input requests."
)

_INPUT_REQUEST_CONTEXT_VAR: ClassVar[ContextVar[str]] = ContextVar("_INPUT_REQUEST_CONTEXT_VAR")

@classmethod
@contextmanager
def populate_context(cls, ctx: str) -> Generator[None, Any, None]:
""":meta private:"""
token = UserProxyAgent.InputRequestContext._INPUT_REQUEST_CONTEXT_VAR.set(ctx)
try:
yield
finally:
UserProxyAgent.InputRequestContext._INPUT_REQUEST_CONTEXT_VAR.reset(token)

@classmethod
def request_id(cls) -> str:
try:
return cls._INPUT_REQUEST_CONTEXT_VAR.get()
except LookupError as e:
raise RuntimeError(
"InputRequestContext.runtime() must be called within the input callback of a UserProxyAgent."
) from e

def __init__(
self,
name: str,
Expand Down Expand Up @@ -153,9 +182,15 @@ async def _get_input(self, prompt: str, cancellation_token: Optional[Cancellatio
except Exception as e:
raise RuntimeError(f"Failed to get user input: {str(e)}") from e

async def on_messages(
self, messages: Sequence[ChatMessage], cancellation_token: Optional[CancellationToken] = None
) -> Response:
async def on_messages(self, messages: Sequence[ChatMessage], cancellation_token: CancellationToken) -> Response:
async for message in self.on_messages_stream(messages, cancellation_token):
if isinstance(message, Response):
return message
raise AssertionError("The stream should have returned the final result.")

async def on_messages_stream(
self, messages: Sequence[ChatMessage], cancellation_token: CancellationToken
) -> AsyncGenerator[AgentEvent | ChatMessage | Response, None]:
"""Handle incoming messages by requesting user input."""
try:
# Check for handoff first
Expand All @@ -164,15 +199,18 @@ async def on_messages(
f"Handoff received from {handoff.source}. Enter your response: " if handoff else "Enter your response: "
)

user_input = await self._get_input(prompt, cancellation_token)
request_id = str(uuid.uuid4())

input_requested_event = UserInputRequestedEvent(request_id=request_id, source=self.name)
yield input_requested_event
with UserProxyAgent.InputRequestContext.populate_context(request_id):
user_input = await self._get_input(prompt, cancellation_token)

# Return appropriate message type based on handoff presence
if handoff:
return Response(
chat_message=HandoffMessage(content=user_input, target=handoff.source, source=self.name)
)
yield Response(chat_message=HandoffMessage(content=user_input, target=handoff.source, source=self.name))
else:
return Response(chat_message=TextMessage(content=user_input, source=self.name))
yield Response(chat_message=TextMessage(content=user_input, source=self.name))

except asyncio.CancelledError:
raise
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -103,25 +103,40 @@ class ToolCallSummaryMessage(BaseChatMessage):
type: Literal["ToolCallSummaryMessage"] = "ToolCallSummaryMessage"


class UserInputRequestedEvent(BaseAgentEvent):
"""An event signaling a that the user proxy has requested user input. Published prior to invoking the input callback."""

request_id: str
"""Identifier for the user input request."""

content: Literal[""] = ""
"""Empty content for compat with consumers expecting a content field."""

type: Literal["UserInputRequestedEvent"] = "UserInputRequestedEvent"


ChatMessage = Annotated[
TextMessage | MultiModalMessage | StopMessage | ToolCallSummaryMessage | HandoffMessage, Field(discriminator="type")
]
"""Messages for agent-to-agent communication only."""


AgentEvent = Annotated[ToolCallRequestEvent | ToolCallExecutionEvent, Field(discriminator="type")]
AgentEvent = Annotated[
ToolCallRequestEvent | ToolCallExecutionEvent | UserInputRequestedEvent, Field(discriminator="type")
]
"""Events emitted by agents and teams when they work, not used for agent-to-agent communication."""


__all__ = [
"AgentEvent",
"BaseMessage",
"TextMessage",
"ChatMessage",
"HandoffMessage",
"MultiModalMessage",
"StopMessage",
"HandoffMessage",
"ToolCallRequestEvent",
"TextMessage",
"ToolCallExecutionEvent",
"ToolCallRequestEvent",
"ToolCallSummaryMessage",
"ChatMessage",
"AgentEvent",
"UserInputRequestedEvent",
]
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,6 @@
This module implements utility classes for formatting/printing agent messages.
"""

from ._console import Console
from ._console import Console, UserInputManager

__all__ = ["Console"]
__all__ = ["Console", "UserInputManager"]
Original file line number Diff line number Diff line change
@@ -1,14 +1,17 @@
import asyncio
import os
import sys
import time
from typing import AsyncGenerator, List, Optional, TypeVar, cast
from inspect import iscoroutinefunction
from typing import AsyncGenerator, Awaitable, Callable, Dict, List, Optional, TypeVar, Union, cast

from aioconsole import aprint # type: ignore
from autogen_core import Image
from autogen_core import CancellationToken, Image
from autogen_core.models import RequestUsage

from autogen_agentchat.agents import UserProxyAgent
from autogen_agentchat.base import Response, TaskResult
from autogen_agentchat.messages import AgentEvent, ChatMessage, MultiModalMessage
from autogen_agentchat.messages import AgentEvent, ChatMessage, MultiModalMessage, UserInputRequestedEvent


def _is_running_in_iterm() -> bool:
Expand All @@ -19,25 +22,76 @@ def _is_output_a_tty() -> bool:
return sys.stdout.isatty()


SyncInputFunc = Callable[[str], str]
AsyncInputFunc = Callable[[str, Optional[CancellationToken]], Awaitable[str]]
InputFuncType = Union[SyncInputFunc, AsyncInputFunc]

T = TypeVar("T", bound=TaskResult | Response)


class UserInputManager:
def __init__(self, callback: InputFuncType):
self.input_events: Dict[str, asyncio.Event] = {}
self.callback = callback

def get_wrapped_callback(self) -> AsyncInputFunc:
async def user_input_func_wrapper(prompt: str, cancellation_token: Optional[CancellationToken]) -> str:
# Lookup the event for the prompt, if it exists wait for it.
# If it doesn't exist, create it and store it.
# Get request ID:
request_id = UserProxyAgent.InputRequestContext.request_id()
if request_id in self.input_events:
event = self.input_events[request_id]
else:
event = asyncio.Event()
self.input_events[request_id] = event

await event.wait()

del self.input_events[request_id]

if iscoroutinefunction(self.callback):
# Cast to AsyncInputFunc for proper typing
async_func = cast(AsyncInputFunc, self.callback)
return await async_func(prompt, cancellation_token)
else:
# Cast to SyncInputFunc for proper typing
sync_func = cast(SyncInputFunc, self.callback)
loop = asyncio.get_event_loop()
return await loop.run_in_executor(None, sync_func, prompt)

return user_input_func_wrapper

def notify_event_received(self, request_id: str) -> None:
if request_id in self.input_events:
self.input_events[request_id].set()
else:
event = asyncio.Event()
self.input_events[request_id] = event


async def Console(
stream: AsyncGenerator[AgentEvent | ChatMessage | T, None],
*,
no_inline_images: bool = False,
output_stats: bool = True,
output_stats: bool = False,
user_input_manager: UserInputManager | None = None,
) -> T:
"""
Consumes the message stream from :meth:`~autogen_agentchat.base.TaskRunner.run_stream`
or :meth:`~autogen_agentchat.base.ChatAgent.on_messages_stream` and renders the messages to the console.
Returns the last processed TaskResult or Response.
.. note::
`output_stats` is experimental and the stats may not be accurate.
It will be improved in future releases.
Args:
stream (AsyncGenerator[AgentEvent | ChatMessage | TaskResult, None] | AsyncGenerator[AgentEvent | ChatMessage | Response, None]): Message stream to render.
This can be from :meth:`~autogen_agentchat.base.TaskRunner.run_stream` or :meth:`~autogen_agentchat.base.ChatAgent.on_messages_stream`.
no_inline_images (bool, optional): If terminal is iTerm2 will render images inline. Use this to disable this behavior. Defaults to False.
output_stats (bool, optional): If True, will output a summary of the messages and inline token usage info. Defaults to True.
output_stats (bool, optional): (Experimental) If True, will output a summary of the messages and inline token usage info. Defaults to False.
Returns:
last_processed: A :class:`~autogen_agentchat.base.TaskResult` if the stream is from :meth:`~autogen_agentchat.base.TaskRunner.run_stream`
Expand All @@ -62,6 +116,7 @@ async def Console(
f"Duration: {duration:.2f} seconds\n"
)
await aprint(output, end="")

# mypy ignore
last_processed = message # type: ignore

Expand Down Expand Up @@ -91,9 +146,13 @@ async def Console(
f"Duration: {duration:.2f} seconds\n"
)
await aprint(output, end="")

# mypy ignore
last_processed = message # type: ignore

# We don't want to print UserInputRequestedEvent messages, we just use them to signal the user input event.
elif isinstance(message, UserInputRequestedEvent):
if user_input_manager is not None:
user_input_manager.notify_event_received(message.request_id)
else:
# Cast required for mypy to be happy
message = cast(AgentEvent | ChatMessage, message) # type: ignore
Expand Down
6 changes: 3 additions & 3 deletions python/packages/autogen-core/docs/src/index.md
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,7 @@ A console-based multi-agent assistant for web and file-based tasks.
Built on AgentChat.

```bash
pip install magentic-one-cli
pip install -U magentic-one-cli
m1 "Find flights from Seattle to Paris and format the result in a table"
```

Expand All @@ -83,8 +83,8 @@ An app for prototyping and managing agents without writing code.
Built on AgentChat.

```bash
pip install autogenstudio
autogenstudio ui --port 8080
pip install -U autogenstudio
autogenstudio ui --port 8080 --appdir ./myapp
```

+++
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -80,5 +80,5 @@ pip install "autogen-ext[openai]"
If you are using Azure OpenAI with AAD authentication, you need to install the following:

```bash
pip install "autogen-ext[azure]==0.4.0.dev13"
pip install "autogen-ext[azure]"
```
Original file line number Diff line number Diff line change
Expand Up @@ -83,7 +83,7 @@
"source": [
"## Running a Team\n",
"\n",
"Let's calls the {py:meth}`~autogen_agentchat.teams.BaseGroupChat.run` method\n",
"Let's call the {py:meth}`~autogen_agentchat.teams.BaseGroupChat.run` method\n",
"to start the team with a task."
]
},
Expand Down
Loading

0 comments on commit b5d115b

Please sign in to comment.