diff --git a/README.md b/README.md index 807f13077b29..179067fe9106 100644 --- a/README.md +++ b/README.md @@ -7,6 +7,7 @@ [![LinkedIn](https://img.shields.io/badge/LinkedIn-Company?style=flat&logo=linkedin&logoColor=white)](https://www.linkedin.com/company/105812540) [![Discord](https://img.shields.io/badge/discord-chat-green?logo=discord)](https://aka.ms/autogen-discord) [![Documentation](https://img.shields.io/badge/Documentation-AutoGen-blue?logo=read-the-docs)](https://microsoft.github.io/autogen/) + # AutoGen @@ -15,6 +16,8 @@ ## Installation +AutoGen requires **Python 3.10 or later**. + ```bash # Install AgentChat and OpenAI client from Extensions pip install -U "autogen-agentchat" "autogen-ext[openai]" @@ -22,6 +25,13 @@ pip install -U "autogen-agentchat" "autogen-ext[openai]" The current stable version is v0.4. If you are upgrading from AutoGen v0.2, please refer to the [Migration Guide](https://microsoft.github.io/autogen/dev/user-guide/agentchat-user-guide/migration-guide.html) for detailed instructions on how to update your code and configurations. +```bash +# Install AutoGen Studio for no-code GUI +pip install -U "autogenstudio" +``` + +## Quickstart + ### Hello World Create an assistant agent using OpenAI's GPT-4o model. @@ -33,7 +43,7 @@ from autogen_ext.models.openai import OpenAIChatCompletionClient async def main() -> None: agent = AssistantAgent("assistant", OpenAIChatCompletionClient(model="gpt-4o")) - print(agent.run(task="Say 'Hello World!'")) + print(await agent.run(task="Say 'Hello World!'")) asyncio.run(main()) ``` @@ -66,6 +76,15 @@ async def main() -> None: asyncio.run(main()) ``` +### AutoGen Studio + +Use AutoGen Studio to prototype and run multi-agent workflows without writing code. + +```bash +# Run AutoGen Studio on http://localhost:8080 +autogenstudio ui --port 8080 --appdir ./my-app +``` + ## Why Use AutoGen?
@@ -74,13 +93,13 @@ asyncio.run(main()) The AutoGen ecosystem provides everything you need to create AI agents, especially multi-agent workflows -- framework, developer tools, and applications. -The *framework* uses a layered and extensible design. Layers have clearly divided responsibilities and build on top of layers below. This design enables you to use the framework at different levels of abstraction, from high-level APIs to low-level components. +The _framework_ uses a layered and extensible design. Layers have clearly divided responsibilities and build on top of layers below. This design enables you to use the framework at different levels of abstraction, from high-level APIs to low-level components. - [Core API](./python/packages/autogen-core/) implements message passing, event-driven agents, and local and distributed runtime for flexibility and power. It also support cross-language support for .NET and Python. - [AgentChat API](./python/packages/autogen-agentchat/) implements a simpler but opinionated API rapid for prototyping. This API is built on top of the Core API and is closest to what users of v0.2 are familiar with and supports familiar multi-agent patterns such as two-agent chat or group chats. - [Extensions API](./python/packages/autogen-ext/) enables first- and third-party extensions continuously expanding framework capabilities. It support specific implementation of LLM clients (e.g., OpenAI, AzureOpenAI), and capabilities such as code execution. -The ecosystem also supports two essential *developer tools*: +The ecosystem also supports two essential _developer tools_:
AutoGen Studio Screenshot @@ -97,17 +116,17 @@ With AutoGen you get to join and contribute to a thriving ecosystem. We host wee
-| | [![Python](https://img.shields.io/badge/AutoGen-Python-blue?logo=python&logoColor=white)](./python) | [![.NET](https://img.shields.io/badge/AutoGen-.NET-green?logo=.net&logoColor=white)](./dotnet) | [![Studio](https://img.shields.io/badge/AutoGen-Studio-purple?logo=visual-studio&logoColor=white)](./python/packages/autogen-studio) | -|----------------------|--------------------------------------------------------------------------------------------|-------------------|-------------------| -| Installation | [![Installation](https://img.shields.io/badge/Install-blue)](https://microsoft.github.io/autogen/dev/user-guide/agentchat-user-guide/installation.html) | * | [![Install](https://img.shields.io/badge/Install-purple)](https://microsoft.github.io/autogen/dev/user-guide/autogenstudio-user-guide/installation.html) | -| Quickstart | [![Quickstart](https://img.shields.io/badge/Quickstart-blue)](https://microsoft.github.io/autogen/dev/user-guide/agentchat-user-guide/quickstart.html#) | * | * | -| Tutorial | [![Tutorial](https://img.shields.io/badge/Tutorial-blue)](https://microsoft.github.io/autogen/dev/user-guide/agentchat-user-guide/tutorial/models.html) | *| * | -| API Reference | [![API](https://img.shields.io/badge/Docs-blue)](https://microsoft.github.io/autogen/dev/reference/index.html#) | * | [![API](https://img.shields.io/badge/Docs-purple)](https://microsoft.github.io/autogen/dev/user-guide/autogenstudio-user-guide/usage.html) | -| Packages | [![PyPi autogen-core](https://img.shields.io/badge/PyPi-autogen--core-blue?logo=pypi)](https://pypi.org/project/autogen-core/)
[![PyPi autogen-agentchat](https://img.shields.io/badge/PyPi-autogen--agentchat-blue?logo=pypi)](https://pypi.org/project/autogen-agentchat/)
[![PyPi autogen-ext](https://img.shields.io/badge/PyPi-autogen--ext-blue?logo=pypi)](https://pypi.org/project/autogen-ext/) | * | [![PyPi autogenstudio](https://img.shields.io/badge/PyPi-autogenstudio-purple?logo=pypi)](https://pypi.org/project/autogenstudio/) | +| | [![Python](https://img.shields.io/badge/AutoGen-Python-blue?logo=python&logoColor=white)](./python) | [![.NET](https://img.shields.io/badge/AutoGen-.NET-green?logo=.net&logoColor=white)](./dotnet) | [![Studio](https://img.shields.io/badge/AutoGen-Studio-purple?logo=visual-studio&logoColor=white)](./python/packages/autogen-studio) | +| ------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ---------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------- | +| Installation | [![Installation](https://img.shields.io/badge/Install-blue)](https://microsoft.github.io/autogen/dev/user-guide/agentchat-user-guide/installation.html) | \* | [![Install](https://img.shields.io/badge/Install-purple)](https://microsoft.github.io/autogen/dev/user-guide/autogenstudio-user-guide/installation.html) | +| Quickstart | [![Quickstart](https://img.shields.io/badge/Quickstart-blue)](https://microsoft.github.io/autogen/dev/user-guide/agentchat-user-guide/quickstart.html#) | \* | [![Usage](https://img.shields.io/badge/Quickstart-blue)](https://microsoft.github.io/autogen/dev/user-guide/autogenstudio-user-guide/usage.html#) | +| Tutorial | [![Tutorial](https://img.shields.io/badge/Tutorial-blue)](https://microsoft.github.io/autogen/dev/user-guide/agentchat-user-guide/tutorial/models.html) | \* | [![Usage](https://img.shields.io/badge/Quickstart-blue)](https://microsoft.github.io/autogen/dev/user-guide/autogenstudio-user-guide/usage.html#) | +| API Reference | [![API](https://img.shields.io/badge/Docs-blue)](https://microsoft.github.io/autogen/dev/reference/index.html#) | \* | [![API](https://img.shields.io/badge/Docs-purple)](https://microsoft.github.io/autogen/dev/user-guide/autogenstudio-user-guide/usage.html) | +| Packages | [![PyPi autogen-core](https://img.shields.io/badge/PyPi-autogen--core-blue?logo=pypi)](https://pypi.org/project/autogen-core/)
[![PyPi autogen-agentchat](https://img.shields.io/badge/PyPi-autogen--agentchat-blue?logo=pypi)](https://pypi.org/project/autogen-agentchat/)
[![PyPi autogen-ext](https://img.shields.io/badge/PyPi-autogen--ext-blue?logo=pypi)](https://pypi.org/project/autogen-ext/) | \* | [![PyPi autogenstudio](https://img.shields.io/badge/PyPi-autogenstudio-purple?logo=pypi)](https://pypi.org/project/autogenstudio/) |
-**Releasing soon* +\*_Releasing soon_ Interested in contributing? See [CONTRIBUTING.md](./CONTRIBUTING.md) for guidelines on how to get started. We welcome contributions of all kinds, including bug fixes, new features, and documentation improvements. Join our community and help us make AutoGen better! diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_user_proxy_agent.py b/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_user_proxy_agent.py index 2ad9a24682f0..89e0b61a50ee 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_user_proxy_agent.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_user_proxy_agent.py @@ -1,15 +1,17 @@ import asyncio +import uuid +from contextlib import contextmanager +from contextvars import ContextVar from inspect import iscoroutinefunction -from typing import Awaitable, Callable, Optional, Sequence, Union, cast +from typing import Any, AsyncGenerator, Awaitable, Callable, ClassVar, Generator, Optional, Sequence, Union, cast from aioconsole import ainput # type: ignore from autogen_core import CancellationToken from ..base import Response -from ..messages import ChatMessage, HandoffMessage, TextMessage +from ..messages import AgentEvent, ChatMessage, HandoffMessage, TextMessage, UserInputRequestedEvent from ._base_chat_agent import BaseChatAgent -# Define input function types more precisely SyncInputFunc = Callable[[str], str] AsyncInputFunc = Callable[[str, Optional[CancellationToken]], Awaitable[str]] InputFuncType = Union[SyncInputFunc, AsyncInputFunc] @@ -109,6 +111,33 @@ async def cancellable_user_agent(): print(f"BaseException: {e}") """ + class InputRequestContext: + def __init__(self) -> None: + raise RuntimeError( + "InputRequestContext cannot be instantiated. It is a static class that provides context management for user input requests." + ) + + _INPUT_REQUEST_CONTEXT_VAR: ClassVar[ContextVar[str]] = ContextVar("_INPUT_REQUEST_CONTEXT_VAR") + + @classmethod + @contextmanager + def populate_context(cls, ctx: str) -> Generator[None, Any, None]: + """:meta private:""" + token = UserProxyAgent.InputRequestContext._INPUT_REQUEST_CONTEXT_VAR.set(ctx) + try: + yield + finally: + UserProxyAgent.InputRequestContext._INPUT_REQUEST_CONTEXT_VAR.reset(token) + + @classmethod + def request_id(cls) -> str: + try: + return cls._INPUT_REQUEST_CONTEXT_VAR.get() + except LookupError as e: + raise RuntimeError( + "InputRequestContext.runtime() must be called within the input callback of a UserProxyAgent." + ) from e + def __init__( self, name: str, @@ -153,9 +182,15 @@ async def _get_input(self, prompt: str, cancellation_token: Optional[Cancellatio except Exception as e: raise RuntimeError(f"Failed to get user input: {str(e)}") from e - async def on_messages( - self, messages: Sequence[ChatMessage], cancellation_token: Optional[CancellationToken] = None - ) -> Response: + async def on_messages(self, messages: Sequence[ChatMessage], cancellation_token: CancellationToken) -> Response: + async for message in self.on_messages_stream(messages, cancellation_token): + if isinstance(message, Response): + return message + raise AssertionError("The stream should have returned the final result.") + + async def on_messages_stream( + self, messages: Sequence[ChatMessage], cancellation_token: CancellationToken + ) -> AsyncGenerator[AgentEvent | ChatMessage | Response, None]: """Handle incoming messages by requesting user input.""" try: # Check for handoff first @@ -164,15 +199,18 @@ async def on_messages( f"Handoff received from {handoff.source}. Enter your response: " if handoff else "Enter your response: " ) - user_input = await self._get_input(prompt, cancellation_token) + request_id = str(uuid.uuid4()) + + input_requested_event = UserInputRequestedEvent(request_id=request_id, source=self.name) + yield input_requested_event + with UserProxyAgent.InputRequestContext.populate_context(request_id): + user_input = await self._get_input(prompt, cancellation_token) # Return appropriate message type based on handoff presence if handoff: - return Response( - chat_message=HandoffMessage(content=user_input, target=handoff.source, source=self.name) - ) + yield Response(chat_message=HandoffMessage(content=user_input, target=handoff.source, source=self.name)) else: - return Response(chat_message=TextMessage(content=user_input, source=self.name)) + yield Response(chat_message=TextMessage(content=user_input, source=self.name)) except asyncio.CancelledError: raise diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/messages.py b/python/packages/autogen-agentchat/src/autogen_agentchat/messages.py index 07fc3123eb4c..21fb32d9d584 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/messages.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/messages.py @@ -103,25 +103,40 @@ class ToolCallSummaryMessage(BaseChatMessage): type: Literal["ToolCallSummaryMessage"] = "ToolCallSummaryMessage" +class UserInputRequestedEvent(BaseAgentEvent): + """An event signaling a that the user proxy has requested user input. Published prior to invoking the input callback.""" + + request_id: str + """Identifier for the user input request.""" + + content: Literal[""] = "" + """Empty content for compat with consumers expecting a content field.""" + + type: Literal["UserInputRequestedEvent"] = "UserInputRequestedEvent" + + ChatMessage = Annotated[ TextMessage | MultiModalMessage | StopMessage | ToolCallSummaryMessage | HandoffMessage, Field(discriminator="type") ] """Messages for agent-to-agent communication only.""" -AgentEvent = Annotated[ToolCallRequestEvent | ToolCallExecutionEvent, Field(discriminator="type")] +AgentEvent = Annotated[ + ToolCallRequestEvent | ToolCallExecutionEvent | UserInputRequestedEvent, Field(discriminator="type") +] """Events emitted by agents and teams when they work, not used for agent-to-agent communication.""" __all__ = [ + "AgentEvent", "BaseMessage", - "TextMessage", + "ChatMessage", + "HandoffMessage", "MultiModalMessage", "StopMessage", - "HandoffMessage", - "ToolCallRequestEvent", + "TextMessage", "ToolCallExecutionEvent", + "ToolCallRequestEvent", "ToolCallSummaryMessage", - "ChatMessage", - "AgentEvent", + "UserInputRequestedEvent", ] diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/ui/__init__.py b/python/packages/autogen-agentchat/src/autogen_agentchat/ui/__init__.py index 65c4f1e07ad9..9cc0837c58c2 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/ui/__init__.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/ui/__init__.py @@ -2,6 +2,6 @@ This module implements utility classes for formatting/printing agent messages. """ -from ._console import Console +from ._console import Console, UserInputManager -__all__ = ["Console"] +__all__ = ["Console", "UserInputManager"] diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/ui/_console.py b/python/packages/autogen-agentchat/src/autogen_agentchat/ui/_console.py index 79d39d6add7f..767dc68d8b4e 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/ui/_console.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/ui/_console.py @@ -1,14 +1,17 @@ +import asyncio import os import sys import time -from typing import AsyncGenerator, List, Optional, TypeVar, cast +from inspect import iscoroutinefunction +from typing import AsyncGenerator, Awaitable, Callable, Dict, List, Optional, TypeVar, Union, cast from aioconsole import aprint # type: ignore -from autogen_core import Image +from autogen_core import CancellationToken, Image from autogen_core.models import RequestUsage +from autogen_agentchat.agents import UserProxyAgent from autogen_agentchat.base import Response, TaskResult -from autogen_agentchat.messages import AgentEvent, ChatMessage, MultiModalMessage +from autogen_agentchat.messages import AgentEvent, ChatMessage, MultiModalMessage, UserInputRequestedEvent def _is_running_in_iterm() -> bool: @@ -19,25 +22,76 @@ def _is_output_a_tty() -> bool: return sys.stdout.isatty() +SyncInputFunc = Callable[[str], str] +AsyncInputFunc = Callable[[str, Optional[CancellationToken]], Awaitable[str]] +InputFuncType = Union[SyncInputFunc, AsyncInputFunc] + T = TypeVar("T", bound=TaskResult | Response) +class UserInputManager: + def __init__(self, callback: InputFuncType): + self.input_events: Dict[str, asyncio.Event] = {} + self.callback = callback + + def get_wrapped_callback(self) -> AsyncInputFunc: + async def user_input_func_wrapper(prompt: str, cancellation_token: Optional[CancellationToken]) -> str: + # Lookup the event for the prompt, if it exists wait for it. + # If it doesn't exist, create it and store it. + # Get request ID: + request_id = UserProxyAgent.InputRequestContext.request_id() + if request_id in self.input_events: + event = self.input_events[request_id] + else: + event = asyncio.Event() + self.input_events[request_id] = event + + await event.wait() + + del self.input_events[request_id] + + if iscoroutinefunction(self.callback): + # Cast to AsyncInputFunc for proper typing + async_func = cast(AsyncInputFunc, self.callback) + return await async_func(prompt, cancellation_token) + else: + # Cast to SyncInputFunc for proper typing + sync_func = cast(SyncInputFunc, self.callback) + loop = asyncio.get_event_loop() + return await loop.run_in_executor(None, sync_func, prompt) + + return user_input_func_wrapper + + def notify_event_received(self, request_id: str) -> None: + if request_id in self.input_events: + self.input_events[request_id].set() + else: + event = asyncio.Event() + self.input_events[request_id] = event + + async def Console( stream: AsyncGenerator[AgentEvent | ChatMessage | T, None], *, no_inline_images: bool = False, - output_stats: bool = True, + output_stats: bool = False, + user_input_manager: UserInputManager | None = None, ) -> T: """ Consumes the message stream from :meth:`~autogen_agentchat.base.TaskRunner.run_stream` or :meth:`~autogen_agentchat.base.ChatAgent.on_messages_stream` and renders the messages to the console. Returns the last processed TaskResult or Response. + .. note:: + + `output_stats` is experimental and the stats may not be accurate. + It will be improved in future releases. + Args: stream (AsyncGenerator[AgentEvent | ChatMessage | TaskResult, None] | AsyncGenerator[AgentEvent | ChatMessage | Response, None]): Message stream to render. This can be from :meth:`~autogen_agentchat.base.TaskRunner.run_stream` or :meth:`~autogen_agentchat.base.ChatAgent.on_messages_stream`. no_inline_images (bool, optional): If terminal is iTerm2 will render images inline. Use this to disable this behavior. Defaults to False. - output_stats (bool, optional): If True, will output a summary of the messages and inline token usage info. Defaults to True. + output_stats (bool, optional): (Experimental) If True, will output a summary of the messages and inline token usage info. Defaults to False. Returns: last_processed: A :class:`~autogen_agentchat.base.TaskResult` if the stream is from :meth:`~autogen_agentchat.base.TaskRunner.run_stream` @@ -62,6 +116,7 @@ async def Console( f"Duration: {duration:.2f} seconds\n" ) await aprint(output, end="") + # mypy ignore last_processed = message # type: ignore @@ -91,9 +146,13 @@ async def Console( f"Duration: {duration:.2f} seconds\n" ) await aprint(output, end="") + # mypy ignore last_processed = message # type: ignore - + # We don't want to print UserInputRequestedEvent messages, we just use them to signal the user input event. + elif isinstance(message, UserInputRequestedEvent): + if user_input_manager is not None: + user_input_manager.notify_event_received(message.request_id) else: # Cast required for mypy to be happy message = cast(AgentEvent | ChatMessage, message) # type: ignore diff --git a/python/packages/autogen-core/docs/src/index.md b/python/packages/autogen-core/docs/src/index.md index e62b398dce58..f19fd42490c3 100644 --- a/python/packages/autogen-core/docs/src/index.md +++ b/python/packages/autogen-core/docs/src/index.md @@ -84,7 +84,7 @@ Built on AgentChat. ```bash pip install autogenstudio -autogenstudio ui --port 8080 +autogenstudio ui --port 8080 --appdir ./myapp ``` +++ @@ -109,7 +109,7 @@ Get Started
A programming framework for building conversational single and multi-agent applications. -Built on Core. +Built on Core. Requires Python 3.10+. ```python # pip install -U "autogen-agentchat" "autogen-ext[openai]" @@ -119,7 +119,7 @@ from autogen_ext.models.openai import OpenAIChatCompletionClient async def main() -> None: agent = AssistantAgent("assistant", OpenAIChatCompletionClient(model="gpt-4o")) - print(agent.run(task="Say 'Hello World!'")) + print(await agent.run(task="Say 'Hello World!'")) asyncio.run(main()) ``` diff --git a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/teams.ipynb b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/teams.ipynb index ce0f39664158..d12a273edbda 100644 --- a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/teams.ipynb +++ b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/teams.ipynb @@ -83,7 +83,7 @@ "source": [ "## Running a Team\n", "\n", - "Let's calls the {py:meth}`~autogen_agentchat.teams.BaseGroupChat.run` method\n", + "Let's call the {py:meth}`~autogen_agentchat.teams.BaseGroupChat.run` method\n", "to start the team with a task." ] }, diff --git a/python/packages/autogen-core/docs/src/user-guide/autogenstudio-user-guide/index.md b/python/packages/autogen-core/docs/src/user-guide/autogenstudio-user-guide/index.md index 4582657bc24e..09de3f9ac14f 100644 --- a/python/packages/autogen-core/docs/src/user-guide/autogenstudio-user-guide/index.md +++ b/python/packages/autogen-core/docs/src/user-guide/autogenstudio-user-guide/index.md @@ -49,15 +49,6 @@ AutoGen Studio offers four main interfaces to help you build and manage multi-ag - Setup and test endpoints based on a team configuration - Run teams in a docker container -This revision improves clarity by: - -- Organizing capabilities into clearly numbered sections -- Using more precise language -- Breaking down complex features into digestible points -- Maintaining consistent formatting and structure -- Eliminating awkward phrasing and grammatical issues -- Adding context about how each interface serves users - ### Roadmap Review project roadmap and issues [here](https://github.com/microsoft/autogen/issues/4006) . diff --git a/python/packages/autogen-core/docs/src/user-guide/autogenstudio-user-guide/installation.md b/python/packages/autogen-core/docs/src/user-guide/autogenstudio-user-guide/installation.md index 2ebc167213d2..2ca91af58251 100644 --- a/python/packages/autogen-core/docs/src/user-guide/autogenstudio-user-guide/installation.md +++ b/python/packages/autogen-core/docs/src/user-guide/autogenstudio-user-guide/installation.md @@ -9,35 +9,83 @@ myst: There are two ways to install AutoGen Studio - from PyPi or from source. We **recommend installing from PyPi** unless you plan to modify the source code. -1. **Install from PyPi** +## Create a Virtual Environment (Recommended) - We recommend using a virtual environment (e.g., conda) to avoid conflicts with existing Python packages. With Python 3.10 or newer active in your virtual environment, use pip to install AutoGen Studio: +We recommend using a virtual environment as this will ensure that the dependencies for AutoGen Studio are isolated from the rest of your system. - ```bash - pip install -U autogenstudio - ``` +``````{tab-set} -2. **Install from Source** +`````{tab-item} venv - > Note: This approach requires some familiarity with building interfaces in React. +Create and activate: - If you prefer to install from source, ensure you have Python 3.10+ and Node.js (version above 14.15.0) installed. Here's how you get started: +```bash +python3 -m venv .venv +source .venv/bin/activate +``` + +To deactivate later, run: + +```bash +deactivate +``` + +````` + +`````{tab-item} conda + +[Install Conda](https://docs.conda.io/projects/conda/en/stable/user-guide/install/index.html) if you have not already. + + +Create and activate: + +```bash +conda create -n autogen python=3.10 +conda activate autogen +``` + +To deactivate later, run: + +```bash +conda deactivate +``` + + +````` + + + +`````` + +## Install Using pip (Recommended) + +You can install AutoGen Studio using pip, the Python package manager. + +```bash +pip install -U autogenstudio +``` + +### Install from Source\*\* + +> Note: This approach requires some familiarity with building interfaces in React. + +If you prefer to install from source, ensure you have Python 3.10+ and Node.js (version above 14.15.0) installed. Here's how you get started: - - Clone the AutoGen Studio repository and install its Python dependencies: +- Clone the AutoGen Studio repository and install its Python dependencies: - ```bash - pip install -e . - ``` + ```bash + pip install -e . + ``` - - Navigate to the `samples/apps/autogen-studio/frontend` directory, install dependencies, and build the UI: +- Navigate to the `samples/apps/autogen-studio/frontend` directory, install dependencies, and build the UI: - ```bash - npm install -g gatsby-cli - npm install --global yarn - cd frontend - yarn install - yarn build - ``` + ```bash + npm install -g gatsby-cli + npm install --global yarn + cd frontend + yarn install + yarn build + ``` For Windows users, to build the frontend, you may need alternative commands to build the frontend. @@ -47,7 +95,7 @@ For Windows users, to build the frontend, you may need alternative commands to b ``` -### Running the Application +## Running the Application Once installed, run the web UI by entering the following in a terminal: @@ -62,8 +110,8 @@ AutoGen Studio also takes several parameters to customize the application: - `--host ` argument to specify the host address. By default, it is set to `localhost`. - `--appdir ` argument to specify the directory where the app files (e.g., database and generated user files) are stored. By default, it is set to the a `.autogenstudio` directory in the user's home directory. - `--port ` argument to specify the port number. By default, it is set to `8080`. -- `--upgrade-database` argument to upgrade the database schema (assuming there are changes in the version you are installing). By default, it is set to `False`. +- `--upgrade-database` argument to force-upgrade it's internal database schema (assuming there are changes in the version you are installing). By default, it is set to `False`. - `--reload` argument to enable auto-reloading of the server when changes are made to the code. By default, it is set to `False`. -- `--database-uri` argument to specify the database URI. Example values include `sqlite:///database.sqlite` for SQLite and `postgresql+psycopg://user:password@localhost/dbname` for PostgreSQL. If this is not specified, the database URI defaults to a `database.sqlite` file in the `--appdir` directory. +- `--database-uri` argument to specify the database URI. Example values include `sqlite:///database.sqlite` for SQLite and `postgresql+psycopg://user:password@localhost/dbname` for PostgreSQL. If this is not specified, the database URI defaults to a `autogen.db` file in the `--appdir` directory. Now that you have AutoGen Studio installed and running, you are ready to explore its capabilities, including defining and modifying agent workflows, interacting with agents and sessions, and expanding agent skills. diff --git a/python/packages/autogen-core/docs/src/user-guide/autogenstudio-user-guide/usage.md b/python/packages/autogen-core/docs/src/user-guide/autogenstudio-user-guide/usage.md index 12a409e157df..fa88712c1971 100644 --- a/python/packages/autogen-core/docs/src/user-guide/autogenstudio-user-guide/usage.md +++ b/python/packages/autogen-core/docs/src/user-guide/autogenstudio-user-guide/usage.md @@ -7,51 +7,103 @@ myst: # Usage -The expected usage behavior is that developers use the provided Team Builder interface to to define teams - create agents, attach tools and models to agents, and define termination conditions. Once the team is defined, users can run the team in the Playground to interact with the team to accomplish tasks. +AutoGen Studio provides a Team Builder interface where developers can define multiple components and behaviors. Users can create teams, add agents to teams, attach tools and models to agents, and define team termination conditions. +After defining a team, users can test it in the Playground view to accomplish various tasks through direct interaction. ![AutoGen Studio](https://media.githubusercontent.com/media/microsoft/autogen/refs/heads/main/python/packages/autogen-studio/docs/ags_screen.png) +## Declarative Specification of Componenents + +AutoGen Studio uses a declarative specification system to build its GUI components. At runtime, the AGS API loads these specifications into AutoGen AgentChat objects to address tasks. + +Here's an example of a declarative team specification: + +```json +{ + "version": "1.0.0", + "component_type": "team", + "name": "sample_team", + "participants": [ + { + "component_type": "agent", + "name": "assistant_agent", + "agent_type": "AssistantAgent", + "system_message": "You are a helpful assistant. Solve tasks carefully. When done respond with TERMINATE", + "model_client": { + "component_type": "model", + "model": "gpt-4o-2024-08-06", + "model_type": "OpenAIChatCompletionClient" + }, + "tools": [] + } + ], + "team_type": "RoundRobinGroupChat", + "termination_condition": { + "component_type": "termination", + "termination_type": "MaxMessageTermination", + "max_messages": 3 + } +} +``` + +This example shows a team with a single agent, using the `RoundRobinGroupChat` type and a `MaxMessageTermination` condition limited to 3 messages. + +```{note} +Work is currently in progress to make the entire AgentChat API declarative. This will allow all agentchat components to be `dumped` into the same declarative specification format used by AGS. +``` + ## Building an Agent Team -AutoGen Studio is tied very closely with all of the component abstractions provided by AutoGen AgentChat. This includes - {py:class}`~autogen_agentchat.teams`, {py:class}`~autogen_agentchat.agents`, {py:class}`~autogen_core.models`, {py:class}`~autogen_core.tools`, termination {py:class}`~autogen_agentchat.conditions`. +
-Users can define these components in the Team Builder interface either via a declarative specification or by dragging and dropping components from a component library. +
-## Interactively Running Teams +AutoGen Studio integrates closely with all component abstractions provided by AutoGen AgentChat, including {py:class}`~autogen_agentchat.teams`, {py:class}`~autogen_agentchat.agents`, {py:class}`~autogen_core.models`, {py:class}`~autogen_core.tools`, and termination {py:class}`~autogen_agentchat.conditions`. -AutoGen Studio Playground allows users to interactively test teams on tasks and review resulting artifacts (such as images, code, and text). +The Team Builder interface allows users to define components through either declarative specification or drag-and-drop functionality: -Users can also review the “inner monologue” of team as they address tasks, and view profiling information such as costs associated with the run (such as number of turns, number of tokens etc.), and agent actions (such as whether tools were called and the outcomes of code execution). +Team Builder Operations: -## Importing and Reusing Team Configurations +- Create a new team + - Edit Team JSON directly (toggle visual builder mode off) or + - Use the visual builder, drag-and-drop components from the library: + - Teams: Add agents and termination conditions + - Agents: Add models and tools +- Save team configurations -AutoGen Studio provides a Gallery view which provides a built-in default gallery. A Gallery is simply is a collection of components - teams, agents, models tools etc. Furthermore, users can import components from 3rd party community sources either by providing a URL to a JSON Gallery spec or pasting in the gallery JSON. This allows users to reuse and share team configurations with others. +Component Library Management: -- Gallery -> New Gallery -> Import -- Set as default gallery (in side bar, by clicking pin icon) -- Reuse components in Team Builder. Team Builder -> Sidebar -> From Gallery +- Create new galleries via Gallery -> New Gallery +- Edit gallery JSON as needed +- Set a **default** gallery (click pin icon in sidebar) to make components available in Team Builder -### Using AutoGen Studio Teams in a Python Application +## Interactively Running Teams -An exported team can be easily integrated into any Python application using the `TeamManager` class with just two lines of code. Underneath, the `TeamManager` rehydrates the team specification into AutoGen AgentChat agents that are subsequently used to address tasks. +The AutoGen Studio Playground enables users to: -```python +- Test teams on specific tasks +- Review generated artifacts (images, code, text) +- Monitor team "inner monologue" during task execution +- View performance metrics (turn count, token usage) +- Track agent actions (tool usage, code execution results) -from autogenstudio.teammanager import TeamManager +## Importing and Reusing Team Configurations -tm = TeamManager() -result_stream = tm.run(task="What is the weather in New York?", team_config="team.json") # or wm.run_stream(..) +AutoGen Studio's Gallery view offers a default component collection and supports importing external configurations: -``` +- Create/Import galleries through Gallery -> New Gallery -> Import +- Set default galleries via sidebar pin icon +- Access components in Team Builder through Sidebar -> From Gallery -To export a team configuration, click on the export button in the Team Builder interface. This will generate a JSON file that can be used to rehydrate the team in a Python application. +### Python Integration - +To export team configurations, use the export button in Team Builder to generate a JSON file for Python application use. diff --git a/python/packages/autogen-core/src/autogen_core/_component_config.py b/python/packages/autogen-core/src/autogen_core/_component_config.py index f5426428c90c..1045282921f2 100644 --- a/python/packages/autogen-core/src/autogen_core/_component_config.py +++ b/python/packages/autogen-core/src/autogen_core/_component_config.py @@ -2,7 +2,7 @@ import importlib import warnings -from typing import Any, ClassVar, Dict, Generic, Literal, Protocol, Type, cast, overload, runtime_checkable +from typing import Any, ClassVar, Dict, Generic, List, Literal, Protocol, Type, cast, overload, runtime_checkable from pydantic import BaseModel from typing_extensions import Self, TypeVar @@ -243,9 +243,9 @@ def _from_config(cls, config: Config) -> MyComponent: return cls(value=config.value) """ - required_class_vars = ["component_config_schema", "component_type"] + required_class_vars: ClassVar[List[str]] = ["component_config_schema", "component_type"] - def __init_subclass__(cls, **kwargs: Any): + def __init_subclass__(cls, **kwargs: Any) -> None: super().__init_subclass__(**kwargs) # TODO: validate provider is loadable diff --git a/python/packages/autogen-core/src/autogen_core/models/_types.py b/python/packages/autogen-core/src/autogen_core/models/_types.py index fb118562e4d3..a3d6af1edde4 100644 --- a/python/packages/autogen-core/src/autogen_core/models/_types.py +++ b/python/packages/autogen-core/src/autogen_core/models/_types.py @@ -52,7 +52,7 @@ class RequestUsage: completion_tokens: int -FinishReasons = Literal["stop", "length", "function_calls", "content_filter"] +FinishReasons = Literal["stop", "length", "function_calls", "content_filter", "unknown"] @dataclass diff --git a/python/packages/autogen-ext/src/autogen_ext/models/openai/_openai_client.py b/python/packages/autogen-ext/src/autogen_ext/models/openai/_openai_client.py index befb83746063..b525e6340fd0 100644 --- a/python/packages/autogen-ext/src/autogen_ext/models/openai/_openai_client.py +++ b/python/packages/autogen-ext/src/autogen_ext/models/openai/_openai_client.py @@ -30,6 +30,7 @@ Image, MessageHandlerContext, ) +from autogen_core.models import FinishReasons from autogen_core.logging import LLMCallEvent from autogen_core.models import ( AssistantMessage, @@ -327,6 +328,21 @@ def assert_valid_name(name: str) -> str: return name +def normalize_stop_reason(stop_reason: str | None) -> FinishReasons: + if stop_reason is None: + return "unknown" + + # Convert to lower case + stop_reason = stop_reason.lower() + + KNOWN_STOP_MAPPINGS: Dict[str, FinishReasons] = { + "end_turn": "stop", + "tool_calls": "function_calls", + } + + return KNOWN_STOP_MAPPINGS.get(stop_reason, "unknown") + + class BaseOpenAIChatCompletionClient(ChatCompletionClient): def __init__( self, @@ -747,8 +763,8 @@ async def create_stream( else: prompt_tokens = 0 - if stop_reason is None: - raise ValueError("No stop reason found") + if stop_reason == "function_call": + raise ValueError("Function calls are not supported in this context") content: Union[str, List[FunctionCall]] if len(content_deltas) > 1: @@ -770,13 +786,9 @@ async def create_stream( prompt_tokens=prompt_tokens, completion_tokens=completion_tokens, ) - if stop_reason == "function_call": - raise ValueError("Function calls are not supported in this context") - if stop_reason == "tool_calls": - stop_reason = "function_calls" result = CreateResult( - finish_reason=stop_reason, # type: ignore + finish_reason=normalize_stop_reason(stop_reason), content=content, usage=usage, cached=False, diff --git a/python/packages/autogen-ext/src/autogen_ext/py.typed b/python/packages/autogen-ext/src/autogen_ext/py.typed new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/python/packages/autogen-ext/src/autogen_ext/teams/magentic_one.py b/python/packages/autogen-ext/src/autogen_ext/teams/magentic_one.py index 23aca97014c3..fc2e4f6b9129 100644 --- a/python/packages/autogen-ext/src/autogen_ext/teams/magentic_one.py +++ b/python/packages/autogen-ext/src/autogen_ext/teams/magentic_one.py @@ -1,9 +1,10 @@ import warnings -from typing import List +from typing import Awaitable, Callable, List, Optional, Union from autogen_agentchat.agents import CodeExecutorAgent, UserProxyAgent from autogen_agentchat.base import ChatAgent from autogen_agentchat.teams import MagenticOneGroupChat +from autogen_core import CancellationToken from autogen_core.models import ChatCompletionClient from autogen_ext.agents.file_surfer import FileSurfer @@ -12,6 +13,10 @@ from autogen_ext.code_executors.local import LocalCommandLineCodeExecutor from autogen_ext.models.openai._openai_client import BaseOpenAIChatCompletionClient +SyncInputFunc = Callable[[str], str] +AsyncInputFunc = Callable[[str, Optional[CancellationToken]], Awaitable[str]] +InputFuncType = Union[SyncInputFunc, AsyncInputFunc] + class MagenticOne(MagenticOneGroupChat): """ @@ -116,7 +121,12 @@ async def example_usage_hil(): """ - def __init__(self, client: ChatCompletionClient, hil_mode: bool = False): + def __init__( + self, + client: ChatCompletionClient, + hil_mode: bool = False, + input_func: InputFuncType | None = None, + ): self.client = client self._validate_client_capabilities(client) @@ -126,7 +136,7 @@ def __init__(self, client: ChatCompletionClient, hil_mode: bool = False): executor = CodeExecutorAgent("Executor", code_executor=LocalCommandLineCodeExecutor()) agents: List[ChatAgent] = [fs, ws, coder, executor] if hil_mode: - user_proxy = UserProxyAgent("User") + user_proxy = UserProxyAgent("User", input_func=input_func) agents.append(user_proxy) super().__init__(agents, model_client=client) diff --git a/python/packages/autogen-studio/README.md b/python/packages/autogen-studio/README.md index 007210350829..e75d6d3d4309 100644 --- a/python/packages/autogen-studio/README.md +++ b/python/packages/autogen-studio/README.md @@ -1,9 +1,9 @@ # AutoGen Studio [![PyPI version](https://badge.fury.io/py/autogenstudio.svg)](https://badge.fury.io/py/autogenstudio) -[![Downloads](https://static.pepy.tech/badge/autogenstudio/week)](https://pepy.tech/project/autogenstudio) +![PyPI - Downloads](https://img.shields.io/pypi/dm/autogenstudio) -![ARA](./docs/ags_screen.png) +![ARA](https://media.githubusercontent.com/media/microsoft/autogen/refs/heads/main/python/packages/autogen-studio/docs/ags_screen.png) AutoGen Studio is an AutoGen-powered AI app (user interface) to help you rapidly prototype AI agents, enhance them with skills, compose them into workflows and interact with them to accomplish tasks. It is built on top of the [AutoGen](https://microsoft.github.io/autogen) framework, which is a toolkit for building AI agents. diff --git a/python/packages/autogen-studio/docs/ags_screen.png b/python/packages/autogen-studio/docs/ags_screen.png index 017b69aac25d..3cafcb18b933 100644 --- a/python/packages/autogen-studio/docs/ags_screen.png +++ b/python/packages/autogen-studio/docs/ags_screen.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:54473a4fbfcded2b3e008b448c00117e801462cc7687b0bc14a1c22c92dbdb97 -size 621469 +oid sha256:876389d20f68c9c6e230563a145f8e10c6870bf8633163f0a6fe1f5db8d8ffe8 +size 195570 diff --git a/python/packages/autogen-studio/pyproject.toml b/python/packages/autogen-studio/pyproject.toml index 5fa6676198a6..5c9cbcc6cf47 100644 --- a/python/packages/autogen-studio/pyproject.toml +++ b/python/packages/autogen-studio/pyproject.toml @@ -35,7 +35,8 @@ dependencies = [ "pyyaml", "autogen-core==0.4.0", "autogen-agentchat==0.4.0", - "autogen-ext[magentic-one]==0.4.0" + "autogen-ext[magentic-one, openai, azure]==0.4.0", + "azure-identity" ] optional-dependencies = {web = ["fastapi", "uvicorn"], database = ["psycopg"]} diff --git a/python/packages/magentic-one-cli/src/magentic_one_cli/_m1.py b/python/packages/magentic-one-cli/src/magentic_one_cli/_m1.py index e5a07b164939..e7a3f2ed1e89 100644 --- a/python/packages/magentic-one-cli/src/magentic_one_cli/_m1.py +++ b/python/packages/magentic-one-cli/src/magentic_one_cli/_m1.py @@ -1,8 +1,11 @@ import argparse import asyncio import warnings +from typing import Optional -from autogen_agentchat.ui import Console +from aioconsole import ainput # type: ignore +from autogen_agentchat.ui import Console, UserInputManager +from autogen_core import CancellationToken from autogen_ext.models.openai import OpenAIChatCompletionClient from autogen_ext.teams.magentic_one import MagenticOne @@ -10,6 +13,13 @@ warnings.filterwarnings(action="ignore", message="unclosed", category=ResourceWarning) +async def cancellable_input(prompt: str, cancellation_token: Optional[CancellationToken]) -> str: + task: asyncio.Task[str] = asyncio.create_task(ainput(prompt)) # type: ignore + if cancellation_token is not None: + cancellation_token.link_future(task) + return await task + + def main() -> None: """ Command-line interface for running a complex task using MagenticOne. @@ -37,9 +47,10 @@ def main() -> None: args = parser.parse_args() async def run_task(task: str, hil_mode: bool) -> None: + input_manager = UserInputManager(callback=cancellable_input) client = OpenAIChatCompletionClient(model="gpt-4o") - m1 = MagenticOne(client=client, hil_mode=hil_mode) - await Console(m1.run_stream(task=task), output_stats=False) + m1 = MagenticOne(client=client, hil_mode=hil_mode, input_func=input_manager.get_wrapped_callback()) + await Console(m1.run_stream(task=task), output_stats=False, user_input_manager=input_manager) task = args.task[0] asyncio.run(run_task(task, not args.no_hil)) diff --git a/python/uv.lock b/python/uv.lock index 219b5176d2d1..156a298cc262 100644 --- a/python/uv.lock +++ b/python/uv.lock @@ -659,7 +659,8 @@ dependencies = [ { name = "alembic" }, { name = "autogen-agentchat" }, { name = "autogen-core" }, - { name = "autogen-ext", extra = ["magentic-one"] }, + { name = "autogen-ext", extra = ["azure", "magentic-one", "openai"] }, + { name = "azure-identity" }, { name = "fastapi" }, { name = "loguru" }, { name = "numpy" }, @@ -689,7 +690,8 @@ requires-dist = [ { name = "alembic" }, { name = "autogen-agentchat", editable = "packages/autogen-agentchat" }, { name = "autogen-core", editable = "packages/autogen-core" }, - { name = "autogen-ext", extras = ["magentic-one"], editable = "packages/autogen-ext" }, + { name = "autogen-ext", extras = ["azure", "magentic-one", "openai"], editable = "packages/autogen-ext" }, + { name = "azure-identity" }, { name = "fastapi" }, { name = "fastapi", marker = "extra == 'web'" }, { name = "loguru" },