From a427b38000ea5087ba7e2b725f198b0ea7c3d34e Mon Sep 17 00:00:00 2001 From: Jack Gerrits Date: Wed, 8 Jan 2025 14:48:09 -0500 Subject: [PATCH 01/23] Add stable website dir, to be updated (#4948) * Add stable * stable redirect --- .github/workflows/docs.yml | 1 + python/packages/autogen-core/docs/redirects/redirect_urls.txt | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml index 876b39496ded..0afb1745ba10 100644 --- a/.github/workflows/docs.yml +++ b/.github/workflows/docs.yml @@ -33,6 +33,7 @@ jobs: [ # For main use the workflow target { ref: "${{github.ref}}", dest-dir: dev, uv-version: "0.5.13" }, + { ref: "${{github.ref}}", dest-dir: stable, uv-version: "0.5.13" }, { ref: "v0.4.0.dev0", dest-dir: "0.4.0.dev0", uv-version: "0.5.11" }, { ref: "v0.4.0.dev1", dest-dir: "0.4.0.dev1", uv-version: "0.5.11" }, { ref: "v0.4.0.dev2", dest-dir: "0.4.0.dev2", uv-version: "0.5.11" }, diff --git a/python/packages/autogen-core/docs/redirects/redirect_urls.txt b/python/packages/autogen-core/docs/redirects/redirect_urls.txt index 779023764a1c..6d1b0c9c7ce5 100644 --- a/python/packages/autogen-core/docs/redirects/redirect_urls.txt +++ b/python/packages/autogen-core/docs/redirects/redirect_urls.txt @@ -1,4 +1,4 @@ -/autogen/,/autogen/0.2/ +/autogen/,/autogen/stable/ /autogen/docs/Getting-Started,/autogen/0.2/docs/Getting-Started /autogen/docs/installation/,/autogen/0.2/docs/installation/ /autogen/docs/tutorial/introduction,/autogen/0.2/docs/tutorial/introduction From 7131dc945d93831069e20fc8b984a52305da791b Mon Sep 17 00:00:00 2001 From: afourney Date: Wed, 8 Jan 2025 14:05:08 -0800 Subject: [PATCH 02/23] Added m1 cli package (#4949) * Added m1 cli package * update CI, install card, deprecations * Update python/packages/magentic-one-cli/pyproject.toml * fix mypy and pyright * add package * Suppress 'ResourceWarning: unclosed socket' --------- Co-authored-by: Jack Gerrits --- .github/workflows/checks.yml | 6 ++- .github/workflows/single-python-package.yml | 1 + .../packages/autogen-core/docs/src/index.md | 2 +- python/packages/autogen-ext/pyproject.toml | 5 +- .../models/openai/_openai_client.py | 12 ++--- python/packages/magentic-one-cli/LICENSE-CODE | 21 +++++++++ python/packages/magentic-one-cli/README.md | 1 + .../packages/magentic-one-cli/pyproject.toml | 47 +++++++++++++++++++ .../src/magentic_one_cli/__init__.py | 0 .../src/magentic_one_cli/__main__.py | 3 ++ .../src/magentic_one_cli/_m1.py} | 5 +- .../src/magentic_one_cli/py.typed | 0 python/pyproject.toml | 3 +- python/uv.lock | 21 +++++++++ 14 files changed, 112 insertions(+), 15 deletions(-) create mode 100644 python/packages/magentic-one-cli/LICENSE-CODE create mode 100644 python/packages/magentic-one-cli/README.md create mode 100644 python/packages/magentic-one-cli/pyproject.toml create mode 100644 python/packages/magentic-one-cli/src/magentic_one_cli/__init__.py create mode 100644 python/packages/magentic-one-cli/src/magentic_one_cli/__main__.py rename python/packages/{autogen-ext/src/autogen_ext/teams/magentic_one_cli.py => magentic-one-cli/src/magentic_one_cli/_m1.py} (90%) create mode 100644 python/packages/magentic-one-cli/src/magentic_one_cli/py.typed diff --git a/.github/workflows/checks.yml b/.github/workflows/checks.yml index 347e3f7042f0..34d71e3b2996 100644 --- a/.github/workflows/checks.yml +++ b/.github/workflows/checks.yml @@ -58,6 +58,7 @@ jobs: "./packages/agbench", "./packages/autogen-ext", "./packages/autogen-agentchat", + "./packages/magentic-one-cli", ] steps: - uses: actions/checkout@v4 @@ -86,6 +87,7 @@ jobs: "./packages/agbench", "./packages/autogen-ext", "./packages/autogen-agentchat", + "./packages/magentic-one-cli", ] steps: - uses: actions/checkout@v4 @@ -132,7 +134,7 @@ jobs: source ${{ github.workspace }}/python/.venv/bin/activate poe --directory ${{ matrix.package }} test working-directory: ./python - + codecov: runs-on: ubuntu-latest strategy: @@ -213,7 +215,7 @@ jobs: source ${{ github.workspace }}/python/.venv/bin/activate poe --directory ${{ matrix.package }} docs-check-examples working-directory: ./python - + samples-code-check: runs-on: ubuntu-latest steps: diff --git a/.github/workflows/single-python-package.yml b/.github/workflows/single-python-package.yml index b4657a5f5163..1c73ceb35bb0 100644 --- a/.github/workflows/single-python-package.yml +++ b/.github/workflows/single-python-package.yml @@ -14,6 +14,7 @@ on: - agbench - autogen-magentic-one - autogen-studio + - magentic-one-cli ref: description: 'Tag to deploy' required: true diff --git a/python/packages/autogen-core/docs/src/index.md b/python/packages/autogen-core/docs/src/index.md index 0af7d1079ee3..469f697f822f 100644 --- a/python/packages/autogen-core/docs/src/index.md +++ b/python/packages/autogen-core/docs/src/index.md @@ -60,7 +60,7 @@ A multi-agent assistant for web and file-based tasks. Built on AgentChat. ```bash -pip install magentic-one +pip install magentic-one-cli m1 "Find flights from Seattle to Paris and format the result in a table" ``` diff --git a/python/packages/autogen-ext/pyproject.toml b/python/packages/autogen-ext/pyproject.toml index a4e7d0d9f1e2..8b972e98b8b0 100644 --- a/python/packages/autogen-ext/pyproject.toml +++ b/python/packages/autogen-ext/pyproject.toml @@ -18,14 +18,11 @@ dependencies = [ "autogen-core==0.4.0.dev13", ] -[project.scripts] -m1 = "autogen_ext.teams.magentic_one_cli:main" - [project.optional-dependencies] langchain = ["langchain_core~= 0.3.3"] azure = ["azure-core", "azure-identity"] docker = ["docker~=7.0"] -openai = ["openai>=1.52.2", "aiofiles"] +openai = ["openai>=1.52.2", "tiktoken>=0.8.0", "aiofiles"] file-surfer = [ "autogen-agentchat==0.4.0.dev13", "markitdown>=0.0.1a2", diff --git a/python/packages/autogen-ext/src/autogen_ext/models/openai/_openai_client.py b/python/packages/autogen-ext/src/autogen_ext/models/openai/_openai_client.py index 31db4974bbb0..0a811dacce83 100644 --- a/python/packages/autogen-ext/src/autogen_ext/models/openai/_openai_client.py +++ b/python/packages/autogen-ext/src/autogen_ext/models/openai/_openai_client.py @@ -409,14 +409,14 @@ async def create( # TODO: allow custom handling. # For now we raise an error if images are present and vision is not supported - if self.capabilities["vision"] is False: + if self.model_info["vision"] is False: for message in messages: if isinstance(message, UserMessage): if isinstance(message.content, list) and any(isinstance(x, Image) for x in message.content): raise ValueError("Model does not support vision and image was provided") if json_output is not None: - if self.capabilities["json_output"] is False and json_output is True: + if self.model_info["json_output"] is False and json_output is True: raise ValueError("Model does not support JSON output") if json_output is True: @@ -424,13 +424,13 @@ async def create( else: create_args["response_format"] = {"type": "text"} - if self.capabilities["json_output"] is False and json_output is True: + if self.model_info["json_output"] is False and json_output is True: raise ValueError("Model does not support JSON output") oai_messages_nested = [to_oai_type(m) for m in messages] oai_messages = [item for sublist in oai_messages_nested for item in sublist] - if self.capabilities["function_calling"] is False and len(tools) > 0: + if self.model_info["function_calling"] is False and len(tools) > 0: raise ValueError("Model does not support function calling") future: Union[Task[ParsedChatCompletion[BaseModel]], Task[ChatCompletion]] if len(tools) > 0: @@ -622,14 +622,14 @@ async def create_stream( # TODO: allow custom handling. # For now we raise an error if images are present and vision is not supported - if self.capabilities["vision"] is False: + if self.model_info["vision"] is False: for message in messages: if isinstance(message, UserMessage): if isinstance(message.content, list) and any(isinstance(x, Image) for x in message.content): raise ValueError("Model does not support vision and image was provided") if json_output is not None: - if self.capabilities["json_output"] is False and json_output is True: + if self.model_info["json_output"] is False and json_output is True: raise ValueError("Model does not support JSON output") if json_output is True: diff --git a/python/packages/magentic-one-cli/LICENSE-CODE b/python/packages/magentic-one-cli/LICENSE-CODE new file mode 100644 index 000000000000..9e841e7a26e4 --- /dev/null +++ b/python/packages/magentic-one-cli/LICENSE-CODE @@ -0,0 +1,21 @@ + MIT License + + Copyright (c) Microsoft Corporation. + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in all + copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + SOFTWARE diff --git a/python/packages/magentic-one-cli/README.md b/python/packages/magentic-one-cli/README.md new file mode 100644 index 000000000000..ccc319776481 --- /dev/null +++ b/python/packages/magentic-one-cli/README.md @@ -0,0 +1 @@ +# magentic-one-cli diff --git a/python/packages/magentic-one-cli/pyproject.toml b/python/packages/magentic-one-cli/pyproject.toml new file mode 100644 index 000000000000..b5db07ed79e5 --- /dev/null +++ b/python/packages/magentic-one-cli/pyproject.toml @@ -0,0 +1,47 @@ +[build-system] +requires = ["hatchling"] +build-backend = "hatchling.build" + +[project] +name = "magentic-one-cli" +version = "0.1.0" +license = {file = "LICENSE-CODE"} +description = "Magentic-One is a generalist multi-agent system, built on `AutoGen-AgentChat`, for solving complex web and file-based tasks. This package installs the `m1` command-line utility to quickly get started with Magentic-One." +readme = "README.md" +requires-python = ">=3.10" +classifiers = [ + "Programming Language :: Python :: 3", + "License :: OSI Approved :: MIT License", + "Operating System :: OS Independent", +] +dependencies = [ + "autogen-agentchat", #>=0.4.0<0.5 + "autogen-ext[openai,magentic-one]", #>=0.4.0<0.5 +] + +[project.scripts] +m1 = "magentic_one_cli._m1:main" + +[dependency-groups] +dev = [] + + +[tool.ruff] +extend = "../../pyproject.toml" +include = ["src/**", "tests/*.py"] + +[tool.pyright] +extends = "../../pyproject.toml" +include = ["src"] + +[tool.pytest.ini_options] +minversion = "6.0" +testpaths = ["tests"] + +[tool.poe] +include = "../../shared_tasks.toml" + +[tool.poe.tasks] +mypy = "mypy --config-file $POE_ROOT/../../pyproject.toml src" +test = "true" +coverage = "true" diff --git a/python/packages/magentic-one-cli/src/magentic_one_cli/__init__.py b/python/packages/magentic-one-cli/src/magentic_one_cli/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/python/packages/magentic-one-cli/src/magentic_one_cli/__main__.py b/python/packages/magentic-one-cli/src/magentic_one_cli/__main__.py new file mode 100644 index 000000000000..44220c725c7b --- /dev/null +++ b/python/packages/magentic-one-cli/src/magentic_one_cli/__main__.py @@ -0,0 +1,3 @@ +from ._m1 import main + +main() diff --git a/python/packages/autogen-ext/src/autogen_ext/teams/magentic_one_cli.py b/python/packages/magentic-one-cli/src/magentic_one_cli/_m1.py similarity index 90% rename from python/packages/autogen-ext/src/autogen_ext/teams/magentic_one_cli.py rename to python/packages/magentic-one-cli/src/magentic_one_cli/_m1.py index 97abac5933b7..d2698d23f598 100644 --- a/python/packages/autogen-ext/src/autogen_ext/teams/magentic_one_cli.py +++ b/python/packages/magentic-one-cli/src/magentic_one_cli/_m1.py @@ -1,11 +1,14 @@ import argparse import asyncio +import warnings from autogen_agentchat.ui import Console - from autogen_ext.models.openai import OpenAIChatCompletionClient from autogen_ext.teams.magentic_one import MagenticOne +# Suppress warnings about the requests.Session() not being closed +warnings.filterwarnings(action="ignore", message="unclosed", category=ResourceWarning) + def main() -> None: """ diff --git a/python/packages/magentic-one-cli/src/magentic_one_cli/py.typed b/python/packages/magentic-one-cli/src/magentic_one_cli/py.typed new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/python/pyproject.toml b/python/pyproject.toml index f3b5c9453aad..56e7c1f61274 100644 --- a/python/pyproject.toml +++ b/python/pyproject.toml @@ -31,9 +31,10 @@ autogen-agentchat = { workspace = true } autogen-core = { workspace = true } autogen-ext = { workspace = true } autogen-magentic-one = { workspace = true } -autogenstudio = { workspace = true } autogen-test-utils = { workspace = true } +autogenstudio = { workspace = true } component-schema-gen = { workspace = true } +magentic-one-cli = { workspace = true } [tool.ruff] line-length = 120 diff --git a/python/uv.lock b/python/uv.lock index 9f8ed0980e56..330f570ef432 100644 --- a/python/uv.lock +++ b/python/uv.lock @@ -26,6 +26,7 @@ members = [ "autogen-test-utils", "autogenstudio", "component-schema-gen", + "magentic-one-cli", ] [manifest.dependency-groups] @@ -509,6 +510,7 @@ magentic-one = [ openai = [ { name = "aiofiles" }, { name = "openai" }, + { name = "tiktoken" }, ] video-surfer = [ { name = "autogen-agentchat" }, @@ -553,6 +555,7 @@ requires-dist = [ { name = "pillow", marker = "extra == 'web-surfer'", specifier = ">=11.0.0" }, { name = "playwright", marker = "extra == 'magentic-one'", specifier = ">=1.48.0" }, { name = "playwright", marker = "extra == 'web-surfer'", specifier = ">=1.48.0" }, + { name = "tiktoken", marker = "extra == 'openai'", specifier = ">=0.8.0" }, ] [package.metadata.requires-dev] @@ -2463,6 +2466,24 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/ba/b2/6a22fb5c0885da3b00e116aee81f0b829ec9ac8f736cd414b4a09413fc7d/lxml-5.3.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:6e91cf736959057f7aac7adfc83481e03615a8e8dd5758aa1d95ea69e8931dba", size = 3487557 }, ] +[[package]] +name = "magentic-one-cli" +version = "0.1.0" +source = { editable = "packages/magentic-one-cli" } +dependencies = [ + { name = "autogen-agentchat" }, + { name = "autogen-ext", extra = ["magentic-one", "openai"] }, +] + +[package.metadata] +requires-dist = [ + { name = "autogen-agentchat", editable = "packages/autogen-agentchat" }, + { name = "autogen-ext", extras = ["openai", "magentic-one"], editable = "packages/autogen-ext" }, +] + +[package.metadata.requires-dev] +dev = [] + [[package]] name = "mako" version = "1.3.6" From 318820e5ed6b98c01b6698bf9a0c5ab5d98e966e Mon Sep 17 00:00:00 2001 From: Eric Zhu Date: Wed, 8 Jan 2025 15:18:08 -0800 Subject: [PATCH 03/23] "magentic one" --> "magentic one cli" on landing page (#4951) --- python/packages/autogen-core/docs/src/index.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/python/packages/autogen-core/docs/src/index.md b/python/packages/autogen-core/docs/src/index.md index 469f697f822f..763da49b622a 100644 --- a/python/packages/autogen-core/docs/src/index.md +++ b/python/packages/autogen-core/docs/src/index.md @@ -54,9 +54,9 @@ A framework for building AI agents and applications
{fas}`book;pst-color-primary` -Magentic-One [![PyPi magentic-one](https://img.shields.io/badge/PyPi-magentic--one-blue?logo=pypi)](https://pypi.org/project/magentic-one/) +Magentic-One CLI [![PyPi magentic-one-cli](https://img.shields.io/badge/PyPi-magentic--one--cli-blue?logo=pypi)](https://pypi.org/project/magentic-one-cli/)
-A multi-agent assistant for web and file-based tasks. +A console-based multi-agent assistant for web and file-based tasks. Built on AgentChat. ```bash From 903305e810286e82ef94846f3740861e3659ec55 Mon Sep 17 00:00:00 2001 From: Eric Zhu Date: Wed, 8 Jan 2025 15:24:12 -0800 Subject: [PATCH 04/23] Add tutorial index page; improve installation pages; improve Core tutorial to mention how to use AgentChat agent in Core. (#4950) --- .../user-guide/agentchat-user-guide/index.md | 9 +- .../agentchat-user-guide/installation.md | 17 +-- .../agentchat-user-guide/tutorial/index.md | 72 ++++++++++++ .../framework/agent-and-agent-runtime.ipynb | 104 ++++++++++++++---- .../core-user-guide/installation.md | 71 ++++++++++++ 5 files changed, 235 insertions(+), 38 deletions(-) create mode 100644 python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/index.md diff --git a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/index.md b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/index.md index 46f526c6c3aa..9754bd8ca5ed 100644 --- a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/index.md +++ b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/index.md @@ -31,19 +31,19 @@ How to install AgentChat Build your first agent ::: -:::{grid-item-card} {fas}`graduation-cap;pst-color-primary` Tutorial -:link: ./tutorial/models.html +:::{grid-item-card} {fas}`school;pst-color-primary` Tutorial +:link: ./tutorial/index.html Step-by-step guide to using AgentChat, learn about agents, teams, and more ::: -:::{grid-item-card} {fas}`book;pst-color-primary` Selector Group Chat +:::{grid-item-card} {fas}`sitemap;pst-color-primary` Selector Group Chat :link: ./selector-group-chat.html Multi-agent coordination through a shared context and centralized, customizable selector ::: -:::{grid-item-card} {fas}`book;pst-color-primary` Swarm +:::{grid-item-card} {fas}`dove;pst-color-primary` Swarm :link: ./swarm.html Multi-agent coordination through a shared context and localized, tool-based selector @@ -82,6 +82,7 @@ migration-guide :hidden: :caption: Tutorial +tutorial/index tutorial/models tutorial/messages tutorial/agents diff --git a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/installation.md b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/installation.md index ce6fecb0b2ff..b55fcccd54ca 100644 --- a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/installation.md +++ b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/installation.md @@ -38,7 +38,7 @@ deactivate Create and activate: ```bash -conda create -n autogen python=3.10 +conda create -n autogen python=3.12 conda activate autogen ``` @@ -77,15 +77,8 @@ extensions: pip install "autogen-ext[openai]==0.4.0.dev13" ``` -## Install Docker for Code Execution +If you are using Azure OpenAI with AAD authentication, you need to install the following: -We recommend using Docker for code execution. -To install Docker, follow the instructions for your operating system on the [Docker website](https://docs.docker.com/get-docker/). - -A simple example of how to use Docker for code execution is shown below: - - - -To learn more about agents that execute code, see the [agents tutorial](./tutorial/agents.ipynb). +```bash +pip install "autogen-ext[azure]==0.4.0.dev13" +``` diff --git a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/index.md b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/index.md new file mode 100644 index 000000000000..abb8bc72f291 --- /dev/null +++ b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/index.md @@ -0,0 +1,72 @@ +--- +myst: + html_meta: + "description lang=en": | + Tutorial for AgentChat, a high-level API for AutoGen +--- + +# Introduction + +This tutorial provides a step-by-step guide to using AgentChat. +Make sure you have first followed the [installation instructions](../installation.md) +to prepare your environment. + +At any point you are stuck, feel free to ask for help on +[GitHub Discussions](https://github.com/microsoft/autogen/discussions) +or [Discord](https://aka.ms/autogen-discord). + +```{note} +If you are coming from AutoGen v0.2, please read the [migration guide](../migration-guide.md). +``` + +::::{grid} 2 2 2 2 +:gutter: 3 + +:::{grid-item-card} {fas}`brain;pst-color-primary` Models +:link: ./models.html + +How to use LLM model clients +::: + +:::{grid-item-card} {fas}`envelope;pst-color-primary` Messages +:link: ./messages.html + +Understand the message types +::: + +:::{grid-item-card} {fas}`robot;pst-color-primary` Agents +:link: ./agents.html + +Work with AgentChat agents and get started with {py:class}`~autogen_agentchat.agents.AssistantAgent` +::: + +:::{grid-item-card} {fas}`sitemap;pst-color-primary` Teams +:link: ./teams.html + +Work with teams of agents and get started with {py:class}`~autogen_agentchat.teams.RoundRobinGroupChat`. +::: + +:::{grid-item-card} {fas}`person-chalkboard;pst-color-primary` Human-in-the-Loop +:link: ./human-in-the-loop.html + +Best practices for providing feedback to a team +::: + +:::{grid-item-card} {fas}`circle-stop;pst-color-primary` Termination +:link: ./termination.html + +Control a team using termination conditions +::: + +:::{grid-item-card} {fas}`code;pst-color-primary` Custom Agents +:link: ./custom-agents.html + +Create your own agents +::: + +:::{grid-item-card} {fas}`database;pst-color-primary` Managing State +:link: ./state.html + +Save and load agents and teams for persistent sessions +::: +:::: diff --git a/python/packages/autogen-core/docs/src/user-guide/core-user-guide/framework/agent-and-agent-runtime.ipynb b/python/packages/autogen-core/docs/src/user-guide/core-user-guide/framework/agent-and-agent-runtime.ipynb index b704944b835b..2c1b9179b7c5 100644 --- a/python/packages/autogen-core/docs/src/user-guide/core-user-guide/framework/agent-and-agent-runtime.ipynb +++ b/python/packages/autogen-core/docs/src/user-guide/core-user-guide/framework/agent-and-agent-runtime.ipynb @@ -7,9 +7,8 @@ "# Agent and Agent Runtime\n", "\n", "In this and the following section, we focus on the core concepts of AutoGen:\n", - "agents, agent runtime, messages, and communication.\n", - "You will not find any AI models or tools here, just the foundational\n", - "building blocks for building multi-agent applications.\n", + "agents, agent runtime, messages, and communication -- \n", + "the foundational building blocks for an multi-agent applications.\n", "\n", "```{note}\n", "The Core API is designed to be unopinionated and flexible. So at times, you\n", @@ -25,22 +24,31 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "An agent in AutoGen is an entity defined by the base class {py:class}`autogen_core.Agent`.\n", - "It has a unique identifier of the type {py:class}`autogen_core.AgentId`,\n", - "a metadata dictionary of the type {py:class}`autogen_core.AgentMetadata`,\n", + "An agent in AutoGen is an entity defined by the base interface {py:class}`~autogen_core.Agent`.\n", + "It has a unique identifier of the type {py:class}`~autogen_core.AgentId`,\n", + "a metadata dictionary of the type {py:class}`~autogen_core.AgentMetadata`.\n", "\n", - "and method for handling messages {py:meth}`autogen_core.BaseAgent.on_message_impl`. In most cases, you can subclass your agents from higher level class {py:class}`autogen_core.RoutedAgent` which enables you to route messages to corresponding message handler specified with {py:meth}`autogen_core.message_handler` decorator and proper type hint for the `message` variable.\n", + "In most cases, you can subclass your agents from higher level class {py:class}`~autogen_core.RoutedAgent` which enables you to route messages to corresponding message handler specified with {py:meth}`~autogen_core.message_handler` decorator and proper type hint for the `message` variable.\n", "An agent runtime is the execution environment for agents in AutoGen.\n", + "\n", "Similar to the runtime environment of a programming language,\n", "an agent runtime provides the necessary infrastructure to facilitate communication\n", "between agents, manage agent lifecycles, enforce security boundaries, and support monitoring and\n", "debugging.\n", + "\n", "For local development, developers can use {py:class}`~autogen_core.SingleThreadedAgentRuntime`,\n", "which can be embedded in a Python application.\n", "\n", "```{note}\n", "Agents are not directly instantiated and managed by application code.\n", "Instead, they are created by the runtime when needed and managed by the runtime.\n", + "\n", + "If you are already familiar with [AgentChat](../../agentchat-user-guide/index.md),\n", + "it is important to note that AgentChat's agents such as\n", + "{py:class}`~autogen_agentchat.agents.AssistantAgent` are created by application \n", + "and thus not directly managed by the runtime. To use an AgentChat agent in Core,\n", + "you need to create a wrapper Core agent that delegates messages to the AgentChat agent\n", + "and let the runtime manage the wrapper agent.\n", "```" ] }, @@ -59,7 +67,7 @@ }, { "cell_type": "code", - "execution_count": 1, + "execution_count": 11, "metadata": {}, "outputs": [], "source": [ @@ -79,7 +87,7 @@ "\n", " @message_handler\n", " async def handle_my_message_type(self, message: MyMessageType, ctx: MessageContext) -> None:\n", - " print(f\"Received message: {message.content}\") # type: ignore" + " print(f\"{self.id.type} received message: {message.content}\")" ] }, { @@ -90,6 +98,55 @@ "See the next section on [message and communication](./message-and-communication.ipynb)." ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Using an AgentChat Agent\n", + "\n", + "If you have an [AgentChat](../../agentchat-user-guide/index.md) agent and want to use it in the Core API, you can create\n", + "a wrapper {py:class}`~autogen_core.RoutedAgent` that delegates messages to the AgentChat agent.\n", + "The following example shows how to create a wrapper agent for the {py:class}`~autogen_agentchat.agents.AssistantAgent`\n", + "in AgentChat." + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "metadata": {}, + "outputs": [], + "source": [ + "from autogen_agentchat.agents import AssistantAgent\n", + "from autogen_agentchat.messages import TextMessage\n", + "from autogen_ext.models.openai import OpenAIChatCompletionClient\n", + "\n", + "\n", + "class MyAssistant(RoutedAgent):\n", + " def __init__(self, name: str) -> None:\n", + " super().__init__(name)\n", + " model_client = OpenAIChatCompletionClient(model=\"gpt-4o\")\n", + " self._delegate = AssistantAgent(name, model_client=model_client)\n", + "\n", + " @message_handler\n", + " async def handle_my_message_type(self, message: MyMessageType, ctx: MessageContext) -> None:\n", + " print(f\"{self.id.type} received message: {message.content}\")\n", + " response = await self._delegate.on_messages(\n", + " [TextMessage(content=message.content, source=\"user\")], ctx.cancellation_token\n", + " )\n", + " print(f\"{self.id.type} responded: {response.chat_message.content}\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "For how to use model client, see the [Model Client](./model-clients.ipynb) section.\n", + "\n", + "Since the Core API is unopinionated,\n", + "you are not required to use the AgentChat API to use the Core API.\n", + "You can implement your own agents or use another agent framework." + ] + }, { "cell_type": "markdown", "metadata": {}, @@ -106,7 +163,7 @@ "when they are needed.\n", "\n", "Agent type ({py:class}`~autogen_core.AgentType`) is not the same as the agent class. In this example,\n", - "the agent type is `AgentType(\"my_agent\")` and the agent class is the Python class `MyAgent`.\n", + "the agent type is `AgentType(\"my_agent\")` or `AgentType(\"my_assistant\")` and the agent class is the Python class `MyAgent` or `MyAssistantAgent`.\n", "The factory function is expected to return an instance of the agent class \n", "on which the {py:meth}`~autogen_core.BaseAgent.register` class method is invoked.\n", "Read [Agent Identity and Lifecycles](../core-concepts/agent-identity-and-lifecycle.md)\n", @@ -119,23 +176,23 @@ "can be used to create different instances of the same agent class.\n", "```\n", "\n", - "To register an agent type with the \n", + "To register our agent types with the \n", "{py:class}`~autogen_core.SingleThreadedAgentRuntime`,\n", "the following code can be used:" ] }, { "cell_type": "code", - "execution_count": 2, + "execution_count": 13, "metadata": {}, "outputs": [ { "data": { "text/plain": [ - "AgentType(type='my_agent')" + "AgentType(type='my_assistant')" ] }, - "execution_count": 2, + "execution_count": 13, "metadata": {}, "output_type": "execute_result" } @@ -144,7 +201,8 @@ "from autogen_core import SingleThreadedAgentRuntime\n", "\n", "runtime = SingleThreadedAgentRuntime()\n", - "await MyAgent.register(runtime, \"my_agent\", lambda: MyAgent())" + "await MyAgent.register(runtime, \"my_agent\", lambda: MyAgent())\n", + "await MyAssistant.register(runtime, \"my_assistant\", lambda: MyAssistant(\"my_assistant\"))" ] }, { @@ -159,21 +217,23 @@ }, { "cell_type": "code", - "execution_count": 3, + "execution_count": 14, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "Received message: Hello, World!\n" + "my_agent received message: Hello, World!\n", + "my_assistant received message: Hello, World!\n", + "my_assistant responded: Hello! How can I assist you today?\n" ] } ], "source": [ - "agent_id = AgentId(\"my_agent\", \"default\")\n", "runtime.start() # Start processing messages in the background.\n", - "await runtime.send_message(MyMessageType(\"Hello, World!\"), agent_id)\n", + "await runtime.send_message(MyMessageType(\"Hello, World!\"), AgentId(\"my_agent\", \"default\"))\n", + "await runtime.send_message(MyMessageType(\"Hello, World!\"), AgentId(\"my_assistant\", \"default\"))\n", "await runtime.stop() # Stop processing messages in the background." ] }, @@ -203,7 +263,7 @@ }, { "cell_type": "code", - "execution_count": 4, + "execution_count": 15, "metadata": {}, "outputs": [], "source": [ @@ -228,7 +288,7 @@ }, { "cell_type": "code", - "execution_count": 5, + "execution_count": 16, "metadata": {}, "outputs": [], "source": [ @@ -246,7 +306,7 @@ }, { "cell_type": "code", - "execution_count": 6, + "execution_count": 17, "metadata": {}, "outputs": [], "source": [ diff --git a/python/packages/autogen-core/docs/src/user-guide/core-user-guide/installation.md b/python/packages/autogen-core/docs/src/user-guide/core-user-guide/installation.md index ff468a1775db..8b7d0dc2dfeb 100644 --- a/python/packages/autogen-core/docs/src/user-guide/core-user-guide/installation.md +++ b/python/packages/autogen-core/docs/src/user-guide/core-user-guide/installation.md @@ -1,5 +1,53 @@ # Installation +## Create a Virtual Environment (optional) + +When installing AgentChat locally, we recommend using a virtual environment for the installation. This will ensure that the dependencies for AgentChat are isolated from the rest of your system. + +``````{tab-set} + +`````{tab-item} venv + +Create and activate: + +```bash +python3 -m venv .venv +source .venv/bin/activate +``` + +To deactivate later, run: + +```bash +deactivate +``` + +````` + +`````{tab-item} conda + +[Install Conda](https://docs.conda.io/projects/conda/en/stable/user-guide/install/index.html) if you have not already. + + +Create and activate: + +```bash +conda create -n autogen python=3.12 +conda activate autogen +``` + +To deactivate later, run: + +```bash +conda deactivate +``` + + +````` + + + +`````` + ## Install using pip Install the `autogen-core` package using pip: @@ -12,3 +60,26 @@ pip install "autogen-core==0.4.0.dev13" ```{note} Python 3.10 or later is required. ``` + +## Install OpenAI for Model Client + +To use the OpenAI and Azure OpenAI models, you need to install the following +extensions: + +```bash +pip install "autogen-ext[openai]==0.4.0.dev13" +``` + +If you are using Azure OpenAI with AAD authentication, you need to install the following: + +```bash +pip install "autogen-ext[azure]==0.4.0.dev13" +``` + +## Install Docker for Code Execution (Optional) + +We recommend using Docker to use {py:class}`~autogen_ext.code_executors.docker.DockerCommandLineCodeExecutor` for execution of model-generated code. +To install Docker, follow the instructions for your operating system on the [Docker website](https://docs.docker.com/get-docker/). + +To learn more code execution, see [Command Line Code Executors](./framework/command-line-code-executors.ipynb) +and [Code Execution](./design-patterns/code-execution-groupchat.ipynb). From 79b0b6d0588fc3e1a4390e28dab45c797e51847f Mon Sep 17 00:00:00 2001 From: Jack Gerrits Date: Thu, 9 Jan 2025 09:35:41 -0500 Subject: [PATCH 05/23] Override linguist file classifications (#4952) --- .gitattributes | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/.gitattributes b/.gitattributes index 877d0a1fb12e..eb4fe7fb047f 100644 --- a/.gitattributes +++ b/.gitattributes @@ -87,3 +87,12 @@ makefile text eol=lf *.jpeg filter=lfs diff=lfs merge=lfs -text python/packages/autogen-magentic-one/imgs/autogen-magentic-one-example.png filter=lfs diff=lfs merge=lfs -text python/packages/autogen-magentic-one/imgs/autogen-magentic-one-agents.png filter=lfs diff=lfs merge=lfs -text + +python/packages/autogen-magentic-one/tests/browser_utils/test_files/test_blog.html linguist-vendored +python/packages/autogen-ext/src/autogen_ext/runtimes/grpc/protos/*.py linguist-generated +python/packages/autogen-ext/src/autogen_ext/runtimes/grpc/protos/*.pyi linguist-generated +python/packages/autogen-ext/tests/protos/*.py linguist-generated +python/packages/autogen-ext/tests/protos/*.pyi linguist-generated +docs/** linguist-documentation +python/packages/autogen-core/docs/** linguist-documentation +dotnet/website/** linguist-documentation From 02ad98bcb35b5fc84eb5462f9bff419904d7734b Mon Sep 17 00:00:00 2001 From: Jack Gerrits Date: Thu, 9 Jan 2025 11:02:15 -0500 Subject: [PATCH 06/23] Console async printing and optional stats (#4956) * async printing * Make stats output option --- .../src/autogen_agentchat/ui/_console.py | 61 ++++++++++--------- 1 file changed, 32 insertions(+), 29 deletions(-) diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/ui/_console.py b/python/packages/autogen-agentchat/src/autogen_agentchat/ui/_console.py index 6315b504977c..79d39d6add7f 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/ui/_console.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/ui/_console.py @@ -3,6 +3,7 @@ import time from typing import AsyncGenerator, List, Optional, TypeVar, cast +from aioconsole import aprint # type: ignore from autogen_core import Image from autogen_core.models import RequestUsage @@ -25,6 +26,7 @@ async def Console( stream: AsyncGenerator[AgentEvent | ChatMessage | T, None], *, no_inline_images: bool = False, + output_stats: bool = True, ) -> T: """ Consumes the message stream from :meth:`~autogen_agentchat.base.TaskRunner.run_stream` @@ -35,6 +37,7 @@ async def Console( stream (AsyncGenerator[AgentEvent | ChatMessage | TaskResult, None] | AsyncGenerator[AgentEvent | ChatMessage | Response, None]): Message stream to render. This can be from :meth:`~autogen_agentchat.base.TaskRunner.run_stream` or :meth:`~autogen_agentchat.base.ChatAgent.on_messages_stream`. no_inline_images (bool, optional): If terminal is iTerm2 will render images inline. Use this to disable this behavior. Defaults to False. + output_stats (bool, optional): If True, will output a summary of the messages and inline token usage info. Defaults to True. Returns: last_processed: A :class:`~autogen_agentchat.base.TaskResult` if the stream is from :meth:`~autogen_agentchat.base.TaskRunner.run_stream` @@ -49,16 +52,16 @@ async def Console( async for message in stream: if isinstance(message, TaskResult): duration = time.time() - start_time - output = ( - f"{'-' * 10} Summary {'-' * 10}\n" - f"Number of messages: {len(message.messages)}\n" - f"Finish reason: {message.stop_reason}\n" - f"Total prompt tokens: {total_usage.prompt_tokens}\n" - f"Total completion tokens: {total_usage.completion_tokens}\n" - f"Duration: {duration:.2f} seconds\n" - ) - sys.stdout.write(output) - sys.stdout.flush() + if output_stats: + output = ( + f"{'-' * 10} Summary {'-' * 10}\n" + f"Number of messages: {len(message.messages)}\n" + f"Finish reason: {message.stop_reason}\n" + f"Total prompt tokens: {total_usage.prompt_tokens}\n" + f"Total completion tokens: {total_usage.completion_tokens}\n" + f"Duration: {duration:.2f} seconds\n" + ) + await aprint(output, end="") # mypy ignore last_processed = message # type: ignore @@ -68,26 +71,26 @@ async def Console( # Print final response. output = f"{'-' * 10} {message.chat_message.source} {'-' * 10}\n{_message_to_str(message.chat_message, render_image_iterm=render_image_iterm)}\n" if message.chat_message.models_usage: - output += f"[Prompt tokens: {message.chat_message.models_usage.prompt_tokens}, Completion tokens: {message.chat_message.models_usage.completion_tokens}]\n" + if output_stats: + output += f"[Prompt tokens: {message.chat_message.models_usage.prompt_tokens}, Completion tokens: {message.chat_message.models_usage.completion_tokens}]\n" total_usage.completion_tokens += message.chat_message.models_usage.completion_tokens total_usage.prompt_tokens += message.chat_message.models_usage.prompt_tokens - sys.stdout.write(output) - sys.stdout.flush() + await aprint(output, end="") # Print summary. - if message.inner_messages is not None: - num_inner_messages = len(message.inner_messages) - else: - num_inner_messages = 0 - output = ( - f"{'-' * 10} Summary {'-' * 10}\n" - f"Number of inner messages: {num_inner_messages}\n" - f"Total prompt tokens: {total_usage.prompt_tokens}\n" - f"Total completion tokens: {total_usage.completion_tokens}\n" - f"Duration: {duration:.2f} seconds\n" - ) - sys.stdout.write(output) - sys.stdout.flush() + if output_stats: + if message.inner_messages is not None: + num_inner_messages = len(message.inner_messages) + else: + num_inner_messages = 0 + output = ( + f"{'-' * 10} Summary {'-' * 10}\n" + f"Number of inner messages: {num_inner_messages}\n" + f"Total prompt tokens: {total_usage.prompt_tokens}\n" + f"Total completion tokens: {total_usage.completion_tokens}\n" + f"Duration: {duration:.2f} seconds\n" + ) + await aprint(output, end="") # mypy ignore last_processed = message # type: ignore @@ -96,11 +99,11 @@ async def Console( message = cast(AgentEvent | ChatMessage, message) # type: ignore output = f"{'-' * 10} {message.source} {'-' * 10}\n{_message_to_str(message, render_image_iterm=render_image_iterm)}\n" if message.models_usage: - output += f"[Prompt tokens: {message.models_usage.prompt_tokens}, Completion tokens: {message.models_usage.completion_tokens}]\n" + if output_stats: + output += f"[Prompt tokens: {message.models_usage.prompt_tokens}, Completion tokens: {message.models_usage.completion_tokens}]\n" total_usage.completion_tokens += message.models_usage.completion_tokens total_usage.prompt_tokens += message.models_usage.prompt_tokens - sys.stdout.write(output) - sys.stdout.flush() + await aprint(output, end="") if last_processed is None: raise ValueError("No TaskResult or Response was processed.") From 3d6d661f7eeb72c9cbd5c01bd4e8fcad0245d7d3 Mon Sep 17 00:00:00 2001 From: gagb Date: Thu, 9 Jan 2025 10:04:38 -0800 Subject: [PATCH 07/23] Simplify README (#4712) * Simplify README * Update README with improved badge links and section titles * Enhance README with additional AutoGen Studio links and badges * Update README to change autogenstudio badge color to purple * Update README with example of AI agents collaboratively writing a poem * Add Examples section to README with link to examples * Add asyncio import to Minimal Python Example in README * Update README with example of multi-agent system for plotting stock prices * Add Quick Start section to README with installation instructions * Update README to reflect upcoming features with placeholders for installation, quickstart, tutorial, API reference, and packages * Update Tutorial link in README to include additional resource * Update installation link in README to point to the correct user guide * Add landing image to README and enhance visual appeal * Update installation link in README for Autogen Studio user guide * Update README.md Co-authored-by: Jack Gerrits * Update README.md Co-authored-by: Jack Gerrits * Update Studio link in README to point to the correct GitHub directory * Update README.md Co-authored-by: Jack Gerrits * Add migration guide reference for upgrading from AutoGen v0.2 in README * Fix Studio link in README to point to the correct directory * Update README to include links for Core API, AgentChat API, and Extensions API * Update README.md Co-authored-by: Eric Zhu * Update README.md Co-authored-by: Eric Zhu * Fix AutoGen Studio link in README for accurate navigation * Replace PyPi badges with a Documentation badge in README * Update README.md Co-authored-by: Eric Zhu * Update README.md Co-authored-by: Eric Zhu * Update README.md Co-authored-by: Eric Zhu * Improve README.md: clarify installation instructions, enhance descriptions of AutoGen features, and format content for better readability. * Update README.md: add AutoGen Bench section for benchmarking agent performance * Update README.md: clarify AutoGen framework description and add developer tools section * Update README.md: enhance AutoGen framework description and clarify cross-language support * Update README.md: clarify AgentChat API description and its relation to Core API * Update README.md: refine descriptions of AutoGen framework and ecosystem, enhancing clarity and readability * Update README.md: rename "Quick Start" section to "Installation" and enhance developer tools descriptions * Update readme * Update example * Update quickstart --------- Co-authored-by: Jack Gerrits Co-authored-by: Eric Zhu --- FAQ.md | 92 +++++++++++ README.md | 380 ++++++++------------------------------------ autogen-landing.jpg | 3 + 3 files changed, 162 insertions(+), 313 deletions(-) create mode 100644 FAQ.md create mode 100644 autogen-landing.jpg diff --git a/FAQ.md b/FAQ.md new file mode 100644 index 000000000000..fdc0f959428a --- /dev/null +++ b/FAQ.md @@ -0,0 +1,92 @@ +## AutoGen FAQs + +### What is AutoGen 0.4? + +AutoGen v0.4 is a rewrite of AutoGen from the ground up to create a more robust, +scalable, easier to use, cross-language library for building AI Agents. +Some key features include asynchronous messaging, support for scalable distributed agents, +modular extensible design (bring your own agents, implement behaviors however you like), +cross-language support, improved observability, and full typing integration. +It is a breaking change. + +### Why these changes? + +We listened to our AutoGen users, learned from what was working, and adapted to fix what wasn't. +We brought together wide-ranging teams working on many different types of AI Agents +and collaborated to design an improved framework with a more flexible +programming model and better scalability. + +### Is this project still maintained? + +We want to reaffirm our commitment to supporting both the original version of AutoGen (0.2) and the redesign (0.4). AutoGen 0.4 is still work-in-progress, and we shared the code now to build with the community. There are no plans to deprecate the original AutoGen anytime soon, and both versions will be actively maintained. + +### Who should use it 0.4? + +This code is still experimental, so expect changes and bugs while we work towards a stable 0.4 release. We encourage early adopters to +try it out, give us feedback, and contribute. +For those looking for a stable version we recommend to continue using 0.2 + +### I'm using AutoGen 0.2, should I upgrade? + +If you consider yourself an early adopter, you are comfortable making some +changes to your code, and are willing to try it out, then yes. + +### How do I still use AutoGen 0.2? + +AutoGen 0.2 can be installed with: + +```sh +pip install autogen-agentchat~=0.2 +``` + +### Will AutoGen Studio be supported in 0.4? + +Yes, this is on the [roadmap](#roadmap). +Our current plan is to enable an implementation of AutoGen Studio +on the AgentChat high level API which implements a set of agent functionalities +(agents, teams, etc). + +### How do I migrate? + +For users familiar with AutoGen, the AgentChat library in 0.4 provides similar concepts. +We are working on a migration guide. + +### Is 0.4 done? + +We are still actively developing AutoGen 0.4. One exciting new feature is the emergence of new SDKs for .NET. The python SDKs are further ahead at this time but our goal is to achieve parity. We aim to add additional languages in future releases. + +### What is happening next? When will this release be ready? + +We are still working on improving the documentation, samples, and enhancing the code. We are hoping to release before the end of the year when things are ready. + +### What is the history of this project? + +The rearchitecture of the framework started with multiple Microsoft teams coming together +to address the gaps and learnings from AutoGen 0.2 - merging ideas from several predecessor projects. +The team worked on this internally for some time to ensure alignment before moving work back to the open in October 2024. + +### What is the official channel for support? + +Use GitHub [Issues](https://github.com/microsoft/autogen/issues) for bug reports and feature requests. +Use GitHub [Discussions](https://github.com/microsoft/autogen/discussions) for general questions and discussions. + +### Do you use Discord for communications? + +We are unable to use the old Discord for project discussions, many of the maintainers no longer have viewing or posting rights there. Therefore, we request that all discussions take place on or the [new discord server](https://aka.ms/autogen-discord). + +### What about forks? + + remains the only official repo for development and support of AutoGen. +We are aware that there are thousands of forks of AutoGen, including many for personal development and startups building with or on top of the library. We are not involved with any of these forks and are not aware of any plans related to them. + +### What is the status of the license and open source? + +Our project remains fully open-source and accessible to everyone. We understand that some forks use different licenses to align with different interests. We will continue to use the most permissive license (MIT) for the project. + +### Can you clarify the current state of the packages? + +Currently, we are unable to make releases to the `pyautogen` package via Pypi due to a change to package ownership that was done without our involvement. Additionally, we are moving to using multiple packages to align with the new design. Please see details [here](https://microsoft.github.io/autogen/dev/packages/index.html). + +### Can I still be involved? + +We are grateful to all the contributors to AutoGen 0.2 and we look forward to continuing to collaborate with everyone in the AutoGen community. diff --git a/README.md b/README.md index 883dff1ccea4..f2911f4950ec 100644 --- a/README.md +++ b/README.md @@ -3,361 +3,115 @@
AutoGen Logo -[![Twitter](https://img.shields.io/twitter/url/https/twitter.com/cloudposse.svg?style=social&label=Follow%20%40pyautogen)](https://twitter.com/pyautogen) [![LinkedIn](https://img.shields.io/badge/LinkedIn-Company?style=flat&logo=linkedin&logoColor=white)](https://www.linkedin.com/company/105812540) [![Discord](https://img.shields.io/badge/discord-chat-green?logo=discord)](https://aka.ms/autogen-discord) [![GitHub Discussions](https://img.shields.io/badge/Discussions-Q%26A-green?logo=github)](https://github.com/microsoft/autogen/discussions) [![0.2 Docs](https://img.shields.io/badge/Docs-0.2-blue)](https://microsoft.github.io/autogen/0.2/) [![0.4 Docs](https://img.shields.io/badge/Docs-0.4-blue)](https://microsoft.github.io/autogen/dev/) - -[![PyPi autogen-core](https://img.shields.io/badge/PyPi-autogen--core-blue?logo=pypi)](https://pypi.org/project/autogen-core/0.4.0.dev13/) [![PyPi autogen-agentchat](https://img.shields.io/badge/PyPi-autogen--agentchat-blue?logo=pypi)](https://pypi.org/project/autogen-agentchat/0.4.0.dev13/) [![PyPi autogen-ext](https://img.shields.io/badge/PyPi-autogen--ext-blue?logo=pypi)](https://pypi.org/project/autogen-ext/0.4.0.dev13/) +[![Twitter](https://img.shields.io/twitter/url/https/twitter.com/cloudposse.svg?style=social&label=Follow%20%40pyautogen)](https://twitter.com/pyautogen) +[![LinkedIn](https://img.shields.io/badge/LinkedIn-Company?style=flat&logo=linkedin&logoColor=white)](https://www.linkedin.com/company/105812540) +[![Discord](https://img.shields.io/badge/discord-chat-green?logo=discord)](https://aka.ms/autogen-discord) +[![Documentation](https://img.shields.io/badge/Documentation-AutoGen-blue?logo=read-the-docs)](https://microsoft.github.io/autogen/)
# AutoGen -> [!IMPORTANT] -> -> - (12/19/24) Hello! -The majority of the AutoGen Team members will be resting and recharging with family and friends over the holiday period. Activity/responses on the project may be delayed during the period of Dec 20-Jan 06. We will be excited to engage with you in the new year! -> - (12/11/24) We have created a new Discord server for the AutoGen community. Join us at [aka.ms/autogen-discord](https://aka.ms/autogen-discord). -> - (11/14/24) ⚠️ In response to a number of asks to clarify and distinguish between official AutoGen and its forks that created confusion, we issued a [clarification statement](https://github.com/microsoft/autogen/discussions/4217). -> - (10/13/24) Interested in the standard AutoGen as a prior user? Find it at the actively-maintained *AutoGen* [0.2 branch](https://github.com/microsoft/autogen/tree/0.2) and `autogen-agentchat~=0.2` PyPi package. -> - (10/02/24) [AutoGen 0.4](https://microsoft.github.io/autogen/dev) is a from-the-ground-up rewrite of AutoGen. Learn more about the history, goals and future at [this blog post](https://microsoft.github.io/autogen/blog). We’re excited to work with the community to gather feedback, refine, and improve the project before we officially release 0.4. This is a big change, so AutoGen 0.2 is still available, maintained, and developed in the [0.2 branch](https://github.com/microsoft/autogen/tree/0.2). -> - *[Join us for Community Office Hours](https://github.com/microsoft/autogen/discussions/4059)* We will host a weekly open discussion to answer questions, talk about Roadmap, etc. - -AutoGen is an open-source framework for building AI agent systems. -It simplifies the creation of event-driven, distributed, scalable, and resilient agentic applications. -It allows you to quickly build systems where AI agents collaborate and perform tasks autonomously -or with human oversight. - -- [Key Features](#key-features) -- [API Layering](#api-layering) -- [Quickstart](#quickstart) -- [Roadmap](#roadmap) -- [FAQs](#faqs) - -AutoGen streamlines AI development and research, enabling the use of multiple large language models (LLMs), integrated tools, and advanced multi-agent design patterns. You can develop and test your agent systems locally, then deploy to a distributed cloud environment as your needs grow. - -## Key Features - -AutoGen offers the following key features: - -- **Asynchronous Messaging**: Agents communicate via asynchronous messages, supporting both event-driven and request/response interaction patterns. -- **Full type support**: use types in all interfaces and enforced type check on build, with a focus on quality and cohesiveness -- **Scalable & Distributed**: Design complex, distributed agent networks that can operate across organizational boundaries. -- **Modular & Extensible**: Customize your system with pluggable components: custom agents, tools, memory, and models. -- **Cross-Language Support**: Interoperate agents across different programming languages. Currently supports Python and .NET, with more languages coming soon. -- **Observability & Debugging**: Built-in features and tools for tracking, tracing, and debugging agent interactions and workflows, including support for industry standard observability with OpenTelemetry - -

- - ↑ Back to Top ↑ - -

- -# API Layering - -AutoGen has several packages and is built upon a layered architecture. -Currently, there are three main APIs your application can target: - -- [Core](https://microsoft.github.io/autogen/dev/user-guide/core-user-guide/index.html) -- [AgentChat](https://microsoft.github.io/autogen/dev/user-guide/agentchat-user-guide/index.html) -- [Extensions](https://microsoft.github.io/autogen/dev/user-guide/extensions-user-guide/index.html) - -## Core - -- [Installation](https://microsoft.github.io/autogen/dev/user-guide/core-user-guide/installation.html) -- [Quickstart](https://microsoft.github.io/autogen/dev/user-guide/core-user-guide/quickstart.html) - -The core API of AutoGen, `autogen-core`, is built following the -[actor model](https://en.wikipedia.org/wiki/Actor_model). -It supports asynchronous message passing between agents and event-based workflows. -Agents in the core layer handle and produce typed messages, using either direct messaging, -which functions like RPC, or via broadcasting to topics, which is pub-sub. -Agents can be distributed and implemented in different programming languages, -while still communicating with one another. -**Start here if you are building scalable, event-driven agentic systems.** - -## AgentChat - -- [Installation](https://microsoft.github.io/autogen/dev/user-guide/agentchat-user-guide/installation.html) -- [Quickstart](https://microsoft.github.io/autogen/dev/user-guide/agentchat-user-guide/quickstart.html) - -The AgentChat API, `autogen-agentchat`, is task driven and at a high level like AutoGen 0.2. -It allows you to define conversational agents, compose them into teams and then -use them to solve tasks. -AgentChat itself is built on the core layer, but it abstracts away much of its -low-level system concepts. -If your workflows don't fit into the AgentChat API, target core instead. -**Start here if you just want to focus on quickly getting started with multi-agents workflows.** - -## Extensions - -The extension package `autogen-ext` contains implementations of the core interfaces using 3rd party systems, -such as OpenAI model client and Azure code executors. -Besides the built-in extensions, the package accommodates community-contributed -extensions through namespace sub-packages. -We look forward to your contributions! +**AutoGen** is a framework for creating multi-agent AI applications that can act autonomously or work alongside humans. -

- - ↑ Back to Top ↑ - -

- -## Quickstart - -### Python (AgentChat) - -First install the packages: +## Installation ```bash -pip install "autogen-agentchat==0.4.0.dev13" "autogen-ext[openai]==0.4.0.dev13" +# Install AgentChat and OpenAI client from Extensions +pip install "autogen-agentchat" "autogen-ext[openai]" ``` -The following code uses OpenAI's GPT-4o model and you need to provide your -API key to run. -To use Azure OpenAI models, follow the instruction -[here](https://microsoft.github.io/autogen/dev/user-guide/core-user-guide/cookbook/azure-openai-with-aad-auth.html). +The current stable version is v0.4. If you are upgrading from AutoGen v0.2, please refer to the [Migration Guide](https://microsoft.github.io/autogen/dev/user-guide/agentchat-user-guide/migration-guide.html) for detailed instructions on how to update your code and configurations. + +### Hello World + +Create an assistant agent using OpenAI's GPT-4o model. ```python import asyncio from autogen_agentchat.agents import AssistantAgent -from autogen_agentchat.ui import Console -from autogen_agentchat.conditions import TextMentionTermination -from autogen_agentchat.teams import RoundRobinGroupChat from autogen_ext.models.openai import OpenAIChatCompletionClient -# Define a tool -async def get_weather(city: str) -> str: - return f"The weather in {city} is 73 degrees and Sunny." - async def main() -> None: - # Define an agent - weather_agent = AssistantAgent( - name="weather_agent", - model_client=OpenAIChatCompletionClient( - model="gpt-4o-2024-08-06", - # api_key="YOUR_API_KEY", - ), - tools=[get_weather], - ) - - # Define termination condition - termination = TextMentionTermination("TERMINATE") - - # Define a team - agent_team = RoundRobinGroupChat([weather_agent], termination_condition=termination) - - # Run the team and stream messages to the console - stream = agent_team.run_stream(task="What is the weather in New York?") - await Console(stream) + agent = AssistantAgent("assistant", OpenAIChatCompletionClient(model="gpt-4o")) + print(agent.run(task="Say 'Hello World!'")) asyncio.run(main()) ``` -### C\# - -The .NET SDK does not yet support all of the interfaces that the python SDK offers but we are working on bringing them to parity. -To use the .NET SDK, you need to add a package reference to the src in your project. -We will release nuget packages soon and will update these instructions when that happens. - -``` -git clone https://github.com/microsoft/autogen.git -cd autogen -# Switch to the branch that has this code -git switch staging-dev -# Build the project -cd dotnet && dotnet build AutoGen.sln -# In your source code, add AutoGen to your project -dotnet add reference /dotnet/src/Microsoft.AutoGen/Core/Microsoft.AutoGen.Core.csproj -``` - -Then, define and run your first agent: - -```csharp -using Microsoft.AutoGen.Contracts; -using Microsoft.AutoGen.Core; -using Microsoft.Extensions.DependencyInjection; -using Microsoft.Extensions.Hosting; - -// send a message to the agent -var app = await App.PublishMessageAsync("HelloAgents", new NewMessageReceived -{ - Message = "World" -}, local: true); - -await App.RuntimeApp!.WaitForShutdownAsync(); -await app.WaitForShutdownAsync(); - -[TopicSubscription("agents")] -public class HelloAgent( - IAgentContext worker, - [FromKeyedServices("EventTypes")] EventTypes typeRegistry) : ConsoleAgent( - worker, - typeRegistry), - ISayHello, - IHandle, - IHandle -{ - public async Task Handle(NewMessageReceived item) - { - var response = await SayHello(item.Message).ConfigureAwait(false); - var evt = new Output - { - Message = response - }.ToCloudEvent(this.AgentId.Key); - await PublishEventAsync(evt).ConfigureAwait(false); - var goodbye = new ConversationClosed - { - UserId = this.AgentId.Key, - UserMessage = "Goodbye" - }.ToCloudEvent(this.AgentId.Key); - await PublishEventAsync(goodbye).ConfigureAwait(false); - } - public async Task Handle(ConversationClosed item) - { - var goodbye = $"********************* {item.UserId} said {item.UserMessage} ************************"; - var evt = new Output - { - Message = goodbye - }.ToCloudEvent(this.AgentId.Key); - await PublishEventAsync(evt).ConfigureAwait(false); - await Task.Delay(60000); - await App.ShutdownAsync(); - } - public async Task SayHello(string ask) - { - var response = $"\n\n\n\n***************Hello {ask}**********************\n\n\n\n"; - return response; - } -} -public interface ISayHello -{ - public Task SayHello(string ask); -} -``` - -```bash -dotnet run -``` - -

- - ↑ Back to Top ↑ - -

- -## Roadmap - -- AutoGen 0.2 - This is the current stable release of AutoGen. We will continue to accept bug fixes and minor enhancements to this version. -- AutoGen 0.4 - This is the first release of the new architecture. This release is still in *preview*. We will be focusing on the stability of the interfaces, documentation, tutorials, samples, and a collection of built-in agents which you can use. We are excited to work with our community to define the future of AutoGen. We are looking for feedback and contributions to help shape the future of this project. Here are some major planned items: - - More programming languages (e.g., TypeScript) - - More built-in agents and multi-agent workflows - - Deployment of distributed agents - - Re-implementation/migration of AutoGen Studio - - Integration with other agent frameworks and data sources - - Advanced RAG techniques and memory services - -

- - ↑ Back to Top ↑ - -

- -## FAQs - -### What is AutoGen 0.4? - -AutoGen v0.4 is a rewrite of AutoGen from the ground up to create a more robust, -scalable, easier to use, cross-language library for building AI Agents. -Some key features include asynchronous messaging, support for scalable distributed agents, -modular extensible design (bring your own agents, implement behaviors however you like), -cross-language support, improved observability, and full typing integration. -It is a breaking change. - -### Why these changes? - -We listened to our AutoGen users, learned from what was working, and adapted to fix what wasn't. -We brought together wide-ranging teams working on many different types of AI Agents -and collaborated to design an improved framework with a more flexible -programming model and better scalability. +### Team -### Is this project still maintained? +Create a group chat team with an assistant agent, a web surfer agent, and a user proxy agent +for web browsing tasks. You need to install [playwright](https://playwright.dev/python/docs/library). -We want to reaffirm our commitment to supporting both the original version of AutoGen (0.2) and the redesign (0.4) . AutoGen 0.4 is still work-in-progress, and we shared the code now to build with the community. There are no plans to deprecate the original AutoGen anytime soon, and both versions will be actively maintained. - -### Who should use it 0.4? - -This code is still experimental, so expect changes and bugs while we work towards a stable 0.4 release. We encourage early adopters to -try it out, give us feedback, and contribute. -For those looking for a stable version we recommend to continue using 0.2 - -### I'm using AutoGen 0.2, should I upgrade? - -If you consider yourself an early adopter, you are comfortable making some -changes to your code, and are willing to try it out, then yes. - -### How do I still use AutoGen 0.2? +```python +# pip install autogen-agentchat autogen-ext[openai,web-surfer] +# playwright install +import asyncio +from autogen_agentchat.agents import AssistantAgent, UserProxyAgent +from autogen_agentchat.conditions import TextMentionTermination +from autogen_agentchat.teams import RoundRobinGroupChat +from autogen_agentchat.ui import Console +from autogen_ext.models.openai import OpenAIChatCompletionClient +from autogen_ext.agents.web_surfer import MultimodalWebSurfer -AutoGen 0.2 can be installed with: +async def main() -> None: + model_client = OpenAIChatCompletionClient(model="gpt-4o") + assistant = AssistantAgent("assistant", model_client) + web_surfer = MultimodalWebSurfer("web_surfer", model_client) + user_proxy = UserProxyAgent("user_proxy") + termination = TextMentionTermination("exit") # Type 'exit' to end the conversation. + team = RoundRobinGroupChat([web_surfer, assistant, user_proxy], termination_condition=termination) + await Console(team.run_stream(task="Find information about AutoGen and write a short summary.")) -```sh -pip install autogen-agentchat~=0.2 +asyncio.run(main()) ``` -### Will AutoGen Studio be supported in 0.4? - -Yes, this is on the [roadmap](#roadmap). -Our current plan is to enable an implementation of AutoGen Studio -on the AgentChat high level API which implements a set of agent functionalities -(agents, teams, etc). - -### How do I migrate? - -For users familiar with AutoGen, the AgentChat library in 0.4 provides similar concepts. -We are working on a migration guide. - -### Is 0.4 done? - -We are still actively developing AutoGen 0.4. One exciting new feature is the emergence of new SDKs for .NET. The python SDKs are further ahead at this time but our goal is to achieve parity. We aim to add additional languages in future releases. +## Why Use AutoGen? -### What is happening next? When will this release be ready? - -We are still working on improving the documentation, samples, and enhancing the code. We are hoping to release before the end of the year when things are ready. +
+ AutoGen Landing +
-### What is the history of this project? +The AutoGen ecosystem provides everything you need to create AI agents, especially multi-agent workflows -- framework, developer tools, and applications. -The rearchitecture of the framework started with multiple Microsoft teams coming together -to address the gaps and learnings from AutoGen 0.2 - merging ideas from several predecessor projects. -The team worked on this internally for some time to ensure alignment before moving work back to the open in October 2024. +The *framework* uses a layered and extensible design. Layers have clearly divided responsibilities and build on top of layers below. This design enables you to use the framework at different levels of abstraction, from high-level APIs to low-level components. -### What is the official channel for support? +- [Core API](./python/packages/autogen-core/) implements message passing, event-driven agents, and local and distributed runtime for flexibility and power. It also support cross-language support for .NET and Python. +- [AgentChat API](./python/packages/autogen-agentchat/) implements a simpler but opinionated API rapid for prototyping. This API is built on top of the Core API and is closest to what users of v0.2 are familiar with and supports familiar multi-agent patterns such as two-agent chat or group chats. +- [Extensions API](./python/packages/autogen-ext/) enables first- and third-party extensions continuously expanding framework capabilities. It support specific implementation of LLM clients (e.g., OpenAI, AzureOpenAI), and capabilities such as code execution. -Use GitHub [Issues](https://github.com/microsoft/autogen/issues) for bug reports and feature requests. -Use GitHub [Discussions](https://github.com/microsoft/autogen/discussions) for general questions and discussions. +The ecosystem also supports two essential *developer tools*: -### Do you use Discord for communications? +
+ AutoGen Studio Screenshot +
-We are unable to use the old Discord for project discussions, many of the maintainers no longer have viewing or posting rights there. Therefore, we request that all discussions take place on or the [new discord server](https://aka.ms/autogen-discord). +- [AutoGen Studio](./python/packages/autogen-studio/) provides a no-code GUI for building multi-agent applications. +- [AutoGen Bench](./python/packages/agbench/) provides a benchmarking suite for evaluating agent performance. -### What about forks? +You can use the AutoGen framework and developer tools to create applications for your domain. For example, [Magentic-One](./python/packages/magentic-one-cli/) is a state-of-art multi-agent team built using AgentChat API and Extensions API that can handle variety of tasks that require web browsing, code execution, and file handling. - remains the only official repo for development and support of AutoGen. -We are aware that there are thousands of forks of AutoGen, including many for personal development and startups building with or on top of the library. We are not involved with any of these forks and are not aware of any plans related to them. +With AutoGen you get to join and contribute to a thriving ecosystem. We host weekly office hours and talks with maintainers and community. We also have a [Discord server](https://aka.ms/autogen-discord) for real-time chat, GitHub Discussions for Q&A, and a blog for tutorials and updates. -### What is the status of the license and open source? +## Where to go next? -Our project remains fully open-source and accessible to everyone. We understand that some forks use different licenses to align with different interests. We will continue to use the most permissive license (MIT) for the project. +
-### Can you clarify the current state of the packages? +| | [![Python](https://img.shields.io/badge/AutoGen-Python-blue?logo=python&logoColor=white)](./python) | [![.NET](https://img.shields.io/badge/AutoGen-.NET-green?logo=.net&logoColor=white)](./dotnet) | [![Studio](https://img.shields.io/badge/AutoGen-Studio-purple?logo=visual-studio&logoColor=white)](./python/packages/autogen-studio) | +|----------------------|--------------------------------------------------------------------------------------------|-------------------|-------------------| +| Installation | [![Installation](https://img.shields.io/badge/Install-blue)](https://microsoft.github.io/autogen/dev/user-guide/agentchat-user-guide/installation.html) | * | [![Install](https://img.shields.io/badge/Install-purple)](https://microsoft.github.io/autogen/dev/user-guide/autogenstudio-user-guide/installation.html) | +| Quickstart | [![Quickstart](https://img.shields.io/badge/Quickstart-blue)](https://microsoft.github.io/autogen/dev/user-guide/agentchat-user-guide/quickstart.html#) | * | * | +| Tutorial | [![Tutorial](https://img.shields.io/badge/Tutorial-blue)](https://microsoft.github.io/autogen/dev/user-guide/agentchat-user-guide/tutorial/models.html) | *| * | +| API Reference | [![API](https://img.shields.io/badge/Docs-blue)](https://microsoft.github.io/autogen/dev/reference/index.html#) | * | [![API](https://img.shields.io/badge/Docs-purple)](https://microsoft.github.io/autogen/dev/user-guide/autogenstudio-user-guide/usage.html) | +| Packages | [![PyPi autogen-core](https://img.shields.io/badge/PyPi-autogen--core-blue?logo=pypi)](https://pypi.org/project/autogen-core/)
[![PyPi autogen-agentchat](https://img.shields.io/badge/PyPi-autogen--agentchat-blue?logo=pypi)](https://pypi.org/project/autogen-agentchat/)
[![PyPi autogen-ext](https://img.shields.io/badge/PyPi-autogen--ext-blue?logo=pypi)](https://pypi.org/project/autogen-ext/) | * | [![PyPi autogenstudio](https://img.shields.io/badge/PyPi-autogenstudio-purple?logo=pypi)](https://pypi.org/project/autogenstudio/) | -Currently, we are unable to make releases to the `pyautogen` package via Pypi due to a change to package ownership that was done without our involvement. Additionally, we are moving to using multiple packages to align with the new design. Please see details [here](https://microsoft.github.io/autogen/dev/packages/index.html). +
-### Can I still be involved? +**Releasing soon* -We are grateful to all the contributors to AutoGen 0.2 and we look forward to continuing to collaborate with everyone in the AutoGen community. +Interested in contributing? See [CONTRIBUTING.md](./CONTRIBUTING.md) for guidelines on how to get started. We welcome contributions of all kinds, including bug fixes, new features, and documentation improvements. Join our community and help us make AutoGen better! -

- - ↑ Back to Top ↑ - -

+Have questions? Check out our [Frequently Asked Questions (FAQ)](./FAQ.md) for answers to common queries. If you don't find what you're looking for, feel free to ask in our [GitHub Discussions](https://github.com/microsoft/autogen/discussions) or join our [Discord server](https://aka.ms/autogen-discord) for real-time support. ## Legal Notices diff --git a/autogen-landing.jpg b/autogen-landing.jpg new file mode 100644 index 000000000000..c8572e4dd060 --- /dev/null +++ b/autogen-landing.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:149a1ab7bec4917c445992c0bff2d4402cb194207a03d4bec573d74d52aac5e8 +size 269405 From b07c1662b33a96af5c44dadbfc4eaa5e188e715e Mon Sep 17 00:00:00 2001 From: afourney Date: Thu, 9 Jan 2025 10:33:56 -0800 Subject: [PATCH 08/23] Disable usage stats on m1 command. (#4960) Co-authored-by: Eric Zhu --- python/packages/magentic-one-cli/src/magentic_one_cli/_m1.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/packages/magentic-one-cli/src/magentic_one_cli/_m1.py b/python/packages/magentic-one-cli/src/magentic_one_cli/_m1.py index d2698d23f598..e5a07b164939 100644 --- a/python/packages/magentic-one-cli/src/magentic_one_cli/_m1.py +++ b/python/packages/magentic-one-cli/src/magentic_one_cli/_m1.py @@ -39,7 +39,7 @@ def main() -> None: async def run_task(task: str, hil_mode: bool) -> None: client = OpenAIChatCompletionClient(model="gpt-4o") m1 = MagenticOne(client=client, hil_mode=hil_mode) - await Console(m1.run_stream(task=task)) + await Console(m1.run_stream(task=task), output_stats=False) task = args.task[0] asyncio.run(run_task(task, not args.no_hil)) From c293b931f57a59231a789890883dffc7f7e67cfe Mon Sep 17 00:00:00 2001 From: Jack Gerrits Date: Thu, 9 Jan 2025 13:37:13 -0500 Subject: [PATCH 09/23] Make API reference TOC visible (#4962) Co-authored-by: Eric Zhu --- .../autogen-core/docs/src/reference/index.md | 30 ++----------------- 1 file changed, 3 insertions(+), 27 deletions(-) diff --git a/python/packages/autogen-core/docs/src/reference/index.md b/python/packages/autogen-core/docs/src/reference/index.md index 3f1374643931..cbd580884c8a 100644 --- a/python/packages/autogen-core/docs/src/reference/index.md +++ b/python/packages/autogen-core/docs/src/reference/index.md @@ -8,8 +8,8 @@ myst: # API Reference ```{toctree} -:hidden: :caption: AutoGen AgentChat +:maxdepth: 2 python/autogen_agentchat python/autogen_agentchat.messages @@ -22,8 +22,8 @@ python/autogen_agentchat.state ``` ```{toctree} -:hidden: :caption: AutoGen Core +:maxdepth: 2 python/autogen_core python/autogen_core.code_executor @@ -36,8 +36,8 @@ python/autogen_core.logging ``` ```{toctree} -:hidden: :caption: AutoGen Extensions +:maxdepth: 2 python/autogen_ext.agents.magentic_one python/autogen_ext.agents.openai @@ -56,27 +56,3 @@ python/autogen_ext.code_executors.docker python/autogen_ext.code_executors.azure python/autogen_ext.runtimes.grpc ``` - - From ecdf18d3f6ae406bb96859358e94edf9eee4ea06 Mon Sep 17 00:00:00 2001 From: Jack Gerrits Date: Thu, 9 Jan 2025 13:44:13 -0500 Subject: [PATCH 10/23] Make package readmes slightly less empty (#4961) * Make package readmes slightly less empty * Update python/packages/autogen-ext/README.md --------- Co-authored-by: Eric Zhu --- python/packages/autogen-agentchat/README.md | 9 +++++++++ python/packages/autogen-core/README.md | 2 ++ python/packages/autogen-ext/README.md | 6 ++++-- 3 files changed, 15 insertions(+), 2 deletions(-) diff --git a/python/packages/autogen-agentchat/README.md b/python/packages/autogen-agentchat/README.md index 07fa5d8c5208..4ada6f98280f 100644 --- a/python/packages/autogen-agentchat/README.md +++ b/python/packages/autogen-agentchat/README.md @@ -1,3 +1,12 @@ # AutoGen AgentChat - [Documentation](https://microsoft.github.io/autogen/stable/user-guide/agentchat-user-guide/index.html) + +AgentChat is a high-level API for building multi-agent applications. +It is built on top of the [`autogen-core`](https://microsoft.github.io/autogen/stable/user-guide/core-user-guide/index.html) package. +For beginner users, AgentChat is the recommended starting point. +For advanced users, [`autogen-core`](https://microsoft.github.io/autogen/stable/user-guide/core-user-guide/index.html)'s event-driven +programming model provides more flexibility and control over the underlying components. + +AgentChat provides intuitive defaults, such as **Agents** with preset +behaviors and **Teams** with predefined [multi-agent design patterns](https://microsoft.github.io/autogen/stable/user-guide/core-user-guide/design-patterns/intro.html). diff --git a/python/packages/autogen-core/README.md b/python/packages/autogen-core/README.md index b09e22b90a4e..8cebb616922c 100644 --- a/python/packages/autogen-core/README.md +++ b/python/packages/autogen-core/README.md @@ -1,3 +1,5 @@ # AutoGen Core - [Documentation](https://microsoft.github.io/autogen/stable/user-guide/core-user-guide/index.html) + +AutoGen core offers an easy way to quickly build event-driven, distributed, scalable, resilient AI agent systems. Agents are developed by using the [Actor model](https://en.wikipedia.org/wiki/Actor_model). You can build and run your agent system locally and easily move to a distributed system in the cloud when you are ready. diff --git a/python/packages/autogen-ext/README.md b/python/packages/autogen-ext/README.md index 402c411e224f..99f3138dfff9 100644 --- a/python/packages/autogen-ext/README.md +++ b/python/packages/autogen-ext/README.md @@ -1,3 +1,5 @@ -# autogen-ext +# AutoGen Extensions -[Documentation](https://microsoft.github.io/autogen/stable/user-guide/extensions-user-guide/index.html) +- [Documentation](https://microsoft.github.io/autogen/stable/user-guide/extensions-user-guide/index.html) + +AutoGen is designed to be extensible. The `autogen-ext` package contains many different component implementations maintained by the AutoGen project. However, we strongly encourage others to build their own components and publish them as part of the ecosytem. From f3ed7ae14781f834000fa5a4ac16df59b76827aa Mon Sep 17 00:00:00 2001 From: afourney Date: Thu, 9 Jan 2025 10:57:07 -0800 Subject: [PATCH 11/23] Fixed a failure in the MagenticOne test CI (#4966) Fixed CI --- .../tests/browser_utils/test_requests_markdown_browser.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/packages/autogen-magentic-one/tests/browser_utils/test_requests_markdown_browser.py b/python/packages/autogen-magentic-one/tests/browser_utils/test_requests_markdown_browser.py index b9919abf7a8e..72310c79ba2c 100644 --- a/python/packages/autogen-magentic-one/tests/browser_utils/test_requests_markdown_browser.py +++ b/python/packages/autogen-magentic-one/tests/browser_utils/test_requests_markdown_browser.py @@ -11,7 +11,7 @@ from autogen_magentic_one.markdown_browser import BingMarkdownSearch, RequestsMarkdownBrowser BLOG_POST_URL = "https://microsoft.github.io/autogen/0.2/blog/2023/04/21/LLM-tuning-math" -BLOG_POST_TITLE = "Does Model and Inference Parameter Matter in LLM Applications? - A Case Study for MATH | AutoGen" +BLOG_POST_TITLE = "Does Model and Inference Parameter Matter in LLM Applications? - A Case Study for MATH | AutoGen 0.2" BLOG_POST_STRING = "Large language models (LLMs) are powerful tools that can generate natural language texts for various applications, such as chatbots, summarization, translation, and more. GPT-4 is currently the state of the art LLM in the world. Is model selection irrelevant? What about inference parameters?" BLOG_POST_FIND_ON_PAGE_QUERY = "an example where high * complex" BLOG_POST_FIND_ON_PAGE_MATCH = "an example where high cost can easily prevent a generic complex" From 0446ce924fa80e07a99b8dceac8526d695e48442 Mon Sep 17 00:00:00 2001 From: Griffin Bassman Date: Thu, 9 Jan 2025 14:05:20 -0500 Subject: [PATCH 12/23] feat: Add o1-2024-12-17 model (#4965) Co-authored-by: Jack Gerrits --- .../src/autogen_ext/models/openai/_model_info.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/python/packages/autogen-ext/src/autogen_ext/models/openai/_model_info.py b/python/packages/autogen-ext/src/autogen_ext/models/openai/_model_info.py index d67beb44e1d8..d1c2bdf4241a 100644 --- a/python/packages/autogen-ext/src/autogen_ext/models/openai/_model_info.py +++ b/python/packages/autogen-ext/src/autogen_ext/models/openai/_model_info.py @@ -5,6 +5,7 @@ # Based on: https://platform.openai.com/docs/models/continuous-model-upgrades # This is a moving target, so correctness is checked by the model value returned by openai against expected values at runtime`` _MODEL_POINTERS = { + "o1": "o1-2024-12-17", "o1-preview": "o1-preview-2024-09-12", "o1-mini": "o1-mini-2024-09-12", "gpt-4o": "gpt-4o-2024-08-06", @@ -18,6 +19,12 @@ } _MODEL_INFO: Dict[str, ModelInfo] = { + "o1-2024-12-17": { + "vision": False, + "function_calling": False, + "json_output": False, + "family": ModelFamily.O1, + }, "o1-preview-2024-09-12": { "vision": False, "function_calling": False, @@ -117,6 +124,7 @@ } _MODEL_TOKEN_LIMITS: Dict[str, int] = { + "o1-2024-12-17": 200000, "o1-preview-2024-09-12": 128000, "o1-mini-2024-09-12": 128000, "gpt-4o-2024-08-06": 128000, From 5b841e26d6284e7a3bd2a1c2ef2ce712a177c43e Mon Sep 17 00:00:00 2001 From: Eric Zhu Date: Thu, 9 Jan 2025 11:19:25 -0800 Subject: [PATCH 13/23] update landing page example (#4968) --- python/packages/autogen-core/docs/src/index.md | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/python/packages/autogen-core/docs/src/index.md b/python/packages/autogen-core/docs/src/index.md index 763da49b622a..4c3b3b222ae6 100644 --- a/python/packages/autogen-core/docs/src/index.md +++ b/python/packages/autogen-core/docs/src/index.md @@ -112,18 +112,15 @@ A programming framework for building conversational single and multi-agent appli Built on Core. ```python -# pip install "autogen-agentchat==0.4.0.dev13" "autogen-ext[openai]==0.4.0.dev13" "yfinance" "matplotlib" +# pip install -U "autogen-agentchat" "autogen-ext[openai]" import asyncio from autogen_agentchat.agents import AssistantAgent -from autogen_agentchat.ui import Console from autogen_ext.models.openai import OpenAIChatCompletionClient -from autogen_ext.code_executors.local import LocalCommandLineCodeExecutor -from autogen_ext.tools.code_execution import PythonCodeExecutionTool async def main() -> None: - tool = PythonCodeExecutionTool(LocalCommandLineCodeExecutor(work_dir="coding")) - agent = AssistantAgent("assistant", OpenAIChatCompletionClient(model="gpt-4o"), tools=[tool], reflect_on_tool_use=True) - await Console(agent.run_stream(task="Create a plot of MSFT stock prices in 2024 and save it to a file. Use yfinance and matplotlib.")) + agent = AssistantAgent("assistant", OpenAIChatCompletionClient(model="gpt-4o")) + print(agent.run(task="Say 'Hello World!'")) + asyncio.run(main()) ``` From 0122d44aa33977b2c9c1d47e5de9fc23fe41395a Mon Sep 17 00:00:00 2001 From: Jack Gerrits Date: Thu, 9 Jan 2025 15:06:01 -0500 Subject: [PATCH 14/23] OpenAI assistant fixes (#4969) --- python/packages/autogen-core/pyproject.toml | 9 ++- .../agents/openai/_openai_assistant_agent.py | 70 ++++++++----------- 2 files changed, 34 insertions(+), 45 deletions(-) diff --git a/python/packages/autogen-core/pyproject.toml b/python/packages/autogen-core/pyproject.toml index 9c15908f3b50..7f06fc66d02a 100644 --- a/python/packages/autogen-core/pyproject.toml +++ b/python/packages/autogen-core/pyproject.toml @@ -78,17 +78,16 @@ dev = [ [tool.ruff] extend = "../../pyproject.toml" -exclude = ["build", "dist", "src/autogen_core/application/protos", "tests/protos", "samples/protos"] -include = ["src/**", "samples/*.py", "docs/**/*.ipynb", "tests/**"] +exclude = ["build", "dist", "src/autogen_core/application/protos", "tests/protos"] +include = ["src/**", "docs/**/*.ipynb", "tests/**"] [tool.ruff.lint.per-file-ignores] -"samples/**.py" = ["T20"] "docs/**.ipynb" = ["T20"] [tool.pyright] extends = "../../pyproject.toml" -include = ["src", "tests", "samples"] -exclude = ["src/autogen_core/application/protos", "tests/protos", "samples/protos"] +include = ["src", "tests"] +exclude = ["src/autogen_core/application/protos", "tests/protos"] reportDeprecated = true [tool.pytest.ini_options] diff --git a/python/packages/autogen-ext/src/autogen_ext/agents/openai/_openai_assistant_agent.py b/python/packages/autogen-ext/src/autogen_ext/agents/openai/_openai_assistant_agent.py index 72c7ef568775..496b68fd3e0b 100644 --- a/python/packages/autogen-ext/src/autogen_ext/agents/openai/_openai_assistant_agent.py +++ b/python/packages/autogen-ext/src/autogen_ext/agents/openai/_openai_assistant_agent.py @@ -3,7 +3,6 @@ import logging import os from typing import ( - TYPE_CHECKING, Any, AsyncGenerator, Awaitable, @@ -19,6 +18,7 @@ cast, ) +import aiofiles from autogen_agentchat import EVENT_LOGGER_NAME from autogen_agentchat.agents import BaseChatAgent from autogen_agentchat.base import Response @@ -33,50 +33,31 @@ ToolCallRequestEvent, ) from autogen_core import CancellationToken, FunctionCall +from autogen_core.models._model_client import ChatCompletionClient from autogen_core.models._types import FunctionExecutionResult from autogen_core.tools import FunctionTool, Tool -_has_openai_dependencies: bool = True -try: - import aiofiles - - from openai import NOT_GIVEN - from openai.resources.beta.threads import AsyncMessages, AsyncRuns, AsyncThreads - from openai.types.beta.code_interpreter_tool_param import CodeInterpreterToolParam - from openai.types.beta.file_search_tool_param import FileSearchToolParam - from openai.types.beta.function_tool_param import FunctionToolParam - from openai.types.shared_params.function_definition import FunctionDefinition -except ImportError: - _has_openai_dependencies = False - -if TYPE_CHECKING: - import aiofiles - - from openai import NOT_GIVEN, AsyncClient, NotGiven - from openai.pagination import AsyncCursorPage - from openai.resources.beta.threads import AsyncMessages, AsyncRuns, AsyncThreads - from openai.types import FileObject - from openai.types.beta import thread_update_params - from openai.types.beta.assistant import Assistant - from openai.types.beta.assistant_response_format_option_param import AssistantResponseFormatOptionParam - from openai.types.beta.assistant_tool_param import AssistantToolParam - from openai.types.beta.code_interpreter_tool_param import CodeInterpreterToolParam - from openai.types.beta.file_search_tool_param import FileSearchToolParam - from openai.types.beta.function_tool_param import FunctionToolParam - from openai.types.beta.thread import Thread, ToolResources, ToolResourcesCodeInterpreter - from openai.types.beta.threads import Message, MessageDeleted, Run - from openai.types.beta.vector_store import VectorStore - from openai.types.shared_params.function_definition import FunctionDefinition +from openai import NOT_GIVEN, AsyncClient, NotGiven +from openai.pagination import AsyncCursorPage +from openai.resources.beta.threads import AsyncMessages, AsyncRuns, AsyncThreads +from openai.types import FileObject +from openai.types.beta import thread_update_params +from openai.types.beta.assistant import Assistant +from openai.types.beta.assistant_response_format_option_param import AssistantResponseFormatOptionParam +from openai.types.beta.assistant_tool_param import AssistantToolParam +from openai.types.beta.code_interpreter_tool_param import CodeInterpreterToolParam +from openai.types.beta.file_search_tool_param import FileSearchToolParam +from openai.types.beta.function_tool_param import FunctionToolParam +from openai.types.beta.thread import Thread, ToolResources, ToolResourcesCodeInterpreter +from openai.types.beta.threads import Message, MessageDeleted, Run +from openai.types.beta.vector_store import VectorStore +from openai.types.shared_params.function_definition import FunctionDefinition event_logger = logging.getLogger(EVENT_LOGGER_NAME) def _convert_tool_to_function_param(tool: Tool) -> "FunctionToolParam": """Convert an autogen Tool to an OpenAI Assistant function tool parameter.""" - if not _has_openai_dependencies: - raise RuntimeError( - "Missing dependecies for OpenAIAssistantAgent. Please ensure the autogen-ext package was installed with the 'openai' extra." - ) schema = tool.schema parameters: Dict[str, object] = {} @@ -158,10 +139,12 @@ async def example(): await assistant.on_upload_for_code_interpreter("data.csv", cancellation_token) # Get response from the assistant - _response = await assistant.on_messages( + response = await assistant.on_messages( [TextMessage(source="user", content="Analyze the data in data.csv")], cancellation_token ) + print(response) + # Clean up resources await assistant.delete_uploaded_files(cancellation_token) await assistant.delete_assistant(cancellation_token) @@ -207,9 +190,9 @@ def __init__( tool_resources: Optional["ToolResources"] = None, top_p: Optional[float] = None, ) -> None: - if not _has_openai_dependencies: - raise RuntimeError( - "Missing dependecies for OpenAIAssistantAgent. Please ensure the autogen-ext package was installed with the 'openai' extra." + if isinstance(client, ChatCompletionClient): + raise ValueError( + "Incorrect client passed to OpenAIAssistantAgent. Please use an OpenAI AsyncClient instance instead of an AutoGen ChatCompletionClient instance." ) super().__init__(name, description) @@ -510,6 +493,8 @@ async def on_reset(self, cancellation_token: CancellationToken) -> None: async def _upload_files(self, file_paths: str | Iterable[str], cancellation_token: CancellationToken) -> List[str]: """Upload files and return their IDs.""" + await self._ensure_initialized() + if isinstance(file_paths, str): file_paths = [file_paths] @@ -531,6 +516,8 @@ async def on_upload_for_code_interpreter( self, file_paths: str | Iterable[str], cancellation_token: CancellationToken ) -> None: """Handle file uploads for the code interpreter.""" + await self._ensure_initialized() + file_ids = await self._upload_files(file_paths, cancellation_token) # Update thread with the new files @@ -596,6 +583,7 @@ async def on_upload_for_file_search( async def delete_uploaded_files(self, cancellation_token: CancellationToken) -> None: """Delete all files that were uploaded by this agent instance.""" + await self._ensure_initialized() for file_id in self._uploaded_file_ids: try: await cancellation_token.link_future(asyncio.ensure_future(self._client.files.delete(file_id=file_id))) @@ -605,6 +593,7 @@ async def delete_uploaded_files(self, cancellation_token: CancellationToken) -> async def delete_assistant(self, cancellation_token: CancellationToken) -> None: """Delete the assistant if it was created by this instance.""" + await self._ensure_initialized() if self._assistant is not None and not self._assistant_id: try: await cancellation_token.link_future( @@ -616,6 +605,7 @@ async def delete_assistant(self, cancellation_token: CancellationToken) -> None: async def delete_vector_store(self, cancellation_token: CancellationToken) -> None: """Delete the vector store if it was created by this instance.""" + await self._ensure_initialized() if self._vector_store_id is not None: try: await cancellation_token.link_future( From 99e2e39281084ec96eccfe4ddd6b39bbfbc81e7d Mon Sep 17 00:00:00 2001 From: SeryioGonzalez Date: Thu, 9 Jan 2025 21:28:50 +0100 Subject: [PATCH 15/23] Update swarm.ipynb (#4958) Small typo Co-authored-by: Jack Gerrits --- .../docs/src/user-guide/agentchat-user-guide/swarm.ipynb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/swarm.ipynb b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/swarm.ipynb index 99bb02710ffe..dfc08fa9e50c 100644 --- a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/swarm.ipynb +++ b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/swarm.ipynb @@ -28,7 +28,7 @@ "where agents take turn to generate a response. \n", "Similar to {py:class}`~autogen_agentchat.teams.SelectorGroupChat`\n", "and {py:class}`~autogen_agentchat.teams.RoundRobinGroupChat`, participant agents\n", - "broadcast their responses so all agents share the same mesasge context.\n", + "broadcast their responses so all agents share the same message context.\n", "\n", "Different from the other two group chat teams, at each turn,\n", "**the speaker agent is selected based on the most recent\n", From 7c31ee057394629a8b71d8913d5bd681d8e678fd Mon Sep 17 00:00:00 2001 From: SeryioGonzalez Date: Thu, 9 Jan 2025 21:29:13 +0100 Subject: [PATCH 16/23] Update swarm.ipynb (#4959) Small typo in docs Co-authored-by: Jack Gerrits --- .../docs/src/user-guide/agentchat-user-guide/swarm.ipynb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/swarm.ipynb b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/swarm.ipynb index dfc08fa9e50c..1cd48486abee 100644 --- a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/swarm.ipynb +++ b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/swarm.ipynb @@ -83,7 +83,7 @@ " - For information needed from the customer, either agent can hand off to the `\"user\"`.\n", "3. The **Flights Refunder** processes refunds using the `refund_flight` tool when appropriate.\n", "4. If an agent hands off to the `\"user\"`, the team execution will stop and wait for the user to input a response.\n", - "5. When the user provides input, it's sent back to the team as a {py:class}`~autogen_agentchat.messages.HandaffMessage`. This message is directed to the agent that originally requested user input.\n", + "5. When the user provides input, it's sent back to the team as a {py:class}`~autogen_agentchat.messages.HandoffMessage`. This message is directed to the agent that originally requested user input.\n", "6. The process continues until the Travel Agent determines the task is complete and terminates the workflow." ] }, From c4302eecab6c34fb14e301fa2579d21aeedabf34 Mon Sep 17 00:00:00 2001 From: Jack Gerrits Date: Thu, 9 Jan 2025 15:29:36 -0500 Subject: [PATCH 17/23] Fixes for azure-container-code-executor.ipynb (#4970) Fixes for azure-container-code-executor.ipyn --- .../azure-container-code-executor.ipynb | 38 +++++++++++++------ 1 file changed, 26 insertions(+), 12 deletions(-) diff --git a/python/packages/autogen-core/docs/src/user-guide/extensions-user-guide/azure-container-code-executor.ipynb b/python/packages/autogen-core/docs/src/user-guide/extensions-user-guide/azure-container-code-executor.ipynb index 7bc7ef4da275..c71ee58e9118 100644 --- a/python/packages/autogen-core/docs/src/user-guide/extensions-user-guide/azure-container-code-executor.ipynb +++ b/python/packages/autogen-core/docs/src/user-guide/extensions-user-guide/azure-container-code-executor.ipynb @@ -18,9 +18,9 @@ "\n", "Alternatively, you can use the [Azure CLI to create a session pool.](https://learn.microsoft.com/en-us/azure/container-apps/sessions-code-interpreter#create-a-session-pool-with-azure-cli)\n", "\n", - "## AzureContainerCodeExecutor\n", + "## ACADynamicSessionsCodeExecutor\n", "\n", - "The {py:class}`~autogen_ext.code_executor.aca_dynamic_sessions.AzureContainerCodeExecutor` class is a python code executor that creates and executes arbitrary python code on a default Serverless code interpreter session. Its interface is as follows\n", + "The {py:class}`~autogen_ext.code_executors.azure.ACADynamicSessionsCodeExecutor` class is a python code executor that creates and executes arbitrary python code on a default Serverless code interpreter session. Its interface is as follows\n", "\n", "### Initialization\n", "\n", @@ -53,7 +53,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 2, "metadata": {}, "outputs": [], "source": [ @@ -62,8 +62,8 @@ "\n", "from anyio import open_file\n", "from autogen_core import CancellationToken\n", - "from autogen_core.components.code_executor import CodeBlock\n", - "from autogen_ext.code_executor.aca_dynamic_sessions import AzureContainerCodeExecutor\n", + "from autogen_core.code_executor import CodeBlock\n", + "from autogen_ext.code_executors.azure import ACADynamicSessionsCodeExecutor\n", "from azure.identity import DefaultAzureCredential" ] }, @@ -84,7 +84,7 @@ "POOL_MANAGEMENT_ENDPOINT = \"...\"\n", "\n", "with tempfile.TemporaryDirectory() as temp_dir:\n", - " executor = AzureContainerCodeExecutor(\n", + " executor = ACADynamicSessionsCodeExecutor(\n", " pool_management_endpoint=POOL_MANAGEMENT_ENDPOINT, credential=DefaultAzureCredential(), work_dir=temp_dir\n", " )\n", "\n", @@ -120,7 +120,7 @@ " assert os.path.isfile(os.path.join(temp_dir, test_file_1))\n", " assert os.path.isfile(os.path.join(temp_dir, test_file_2))\n", "\n", - " executor = AzureContainerCodeExecutor(\n", + " executor = ACADynamicSessionsCodeExecutor(\n", " pool_management_endpoint=POOL_MANAGEMENT_ENDPOINT, credential=DefaultAzureCredential(), work_dir=temp_dir\n", " )\n", " await executor.upload_files([test_file_1, test_file_2], cancellation_token)\n", @@ -168,7 +168,7 @@ " assert not os.path.isfile(os.path.join(temp_dir, test_file_1))\n", " assert not os.path.isfile(os.path.join(temp_dir, test_file_2))\n", "\n", - " executor = AzureContainerCodeExecutor(\n", + " executor = ACADynamicSessionsCodeExecutor(\n", " pool_management_endpoint=POOL_MANAGEMENT_ENDPOINT, credential=DefaultAzureCredential(), work_dir=temp_dir\n", " )\n", "\n", @@ -208,7 +208,7 @@ "source": [ "### New Sessions\n", "\n", - "Every instance of the {py:class}`~autogen_ext.code_executors.azure.AzureContainerCodeExecutor` class will have a unique session ID. Every call to a particular code executor will be executed on the same session until the {py:meth}`~autogen_ext.code_executors.azure.AzureContainerCodeExecutor.restart` function is called on it. Previous sessions cannot be reused.\n", + "Every instance of the {py:class}`~autogen_ext.code_executors.azure.ACADynamicSessionsCodeExecutor` class will have a unique session ID. Every call to a particular code executor will be executed on the same session until the {py:meth}`~autogen_ext.code_executors.azure.ACADynamicSessionsCodeExecutor.restart` function is called on it. Previous sessions cannot be reused.\n", "\n", "Here we'll run some code on the code session, restart it, then verify that a new session has been opened." ] @@ -219,7 +219,7 @@ "metadata": {}, "outputs": [], "source": [ - "executor = AzureContainerCodeExecutor(\n", + "executor = ACADynamicSessionsCodeExecutor(\n", " pool_management_endpoint=POOL_MANAGEMENT_ENDPOINT, credential=DefaultAzureCredential()\n", ")\n", "\n", @@ -243,7 +243,7 @@ "source": [ "### Available Packages\n", "\n", - "Each code execution instance is pre-installed with most of the commonly used packages. However, the list of available packages and versions are not available outside of the execution environment. The packages list on the environment can be retrieved by calling the {py:meth}`~autogen_ext.code_executors.azure.AzureContainerCodeExecutor.get_available_packages` function on the code executor." + "Each code execution instance is pre-installed with most of the commonly used packages. However, the list of available packages and versions are not available outside of the execution environment. The packages list on the environment can be retrieved by calling the {py:meth}`~autogen_ext.code_executors.azure.ACADynamicSessionsCodeExecutor.get_available_packages` function on the code executor." ] }, { @@ -257,8 +257,22 @@ } ], "metadata": { + "kernelspec": { + "display_name": ".venv", + "language": "python", + "name": "python3" + }, "language_info": { - "name": "python" + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.5" } }, "nbformat": 4, From c2721ff65b745da03a0aa92f7e93eea028de17d7 Mon Sep 17 00:00:00 2001 From: Jack Gerrits Date: Thu, 9 Jan 2025 15:29:54 -0500 Subject: [PATCH 18/23] Update all versions to 0.4.0 (#4941) * Update all versions to 0.4.0 * update redirect * install with upgrade for agentchat --- README.md | 4 ++-- python/README.md | 2 +- python/packages/autogen-agentchat/pyproject.toml | 4 ++-- .../src/_templates/sidebar-nav-bs-agentchat.html | 2 +- .../docs/src/_templates/sidebar-nav-bs-core.html | 2 +- .../src/_templates/sidebar-nav-bs-extensions.html | 2 +- python/packages/autogen-core/docs/src/index.md | 6 +++--- .../user-guide/agentchat-user-guide/installation.md | 4 ++-- .../user-guide/agentchat-user-guide/magentic-one.md | 2 +- .../user-guide/agentchat-user-guide/quickstart.ipynb | 2 +- .../agentchat-user-guide/tutorial/models.ipynb | 4 ++-- .../framework/distributed-agent-runtime.ipynb | 4 ++-- .../src/user-guide/core-user-guide/installation.md | 2 +- .../user-guide/extensions-user-guide/installation.md | 2 +- python/packages/autogen-core/pyproject.toml | 4 ++-- python/packages/autogen-ext/pyproject.toml | 12 ++++++------ .../autogen_ext/agents/file_surfer/_file_surfer.py | 2 +- .../agents/openai/_openai_assistant_agent.py | 2 +- .../autogen_ext/agents/video_surfer/_video_surfer.py | 2 +- .../agents/web_surfer/_multimodal_web_surfer.py | 2 +- .../azure/_azure_container_code_executor.py | 2 +- .../code_executors/docker/_docker_code_executor.py | 2 +- .../src/autogen_ext/models/openai/__init__.py | 4 ++-- .../src/autogen_ext/models/openai/_openai_client.py | 4 ++-- .../src/autogen_ext/teams/magentic_one.py | 2 +- .../tools/code_execution/_code_execution.py | 2 +- .../packages/autogen-studio/autogenstudio/version.py | 2 +- python/packages/autogen-studio/pyproject.toml | 6 +++--- python/samples/agentchat_chainlit/requirements.txt | 2 +- .../samples/core_async_human_in_the_loop/README.md | 2 +- python/samples/core_chess_game/README.md | 2 +- python/uv.lock | 8 ++++---- 32 files changed, 52 insertions(+), 52 deletions(-) diff --git a/README.md b/README.md index f2911f4950ec..807f13077b29 100644 --- a/README.md +++ b/README.md @@ -17,7 +17,7 @@ ```bash # Install AgentChat and OpenAI client from Extensions -pip install "autogen-agentchat" "autogen-ext[openai]" +pip install -U "autogen-agentchat" "autogen-ext[openai]" ``` The current stable version is v0.4. If you are upgrading from AutoGen v0.2, please refer to the [Migration Guide](https://microsoft.github.io/autogen/dev/user-guide/agentchat-user-guide/migration-guide.html) for detailed instructions on how to update your code and configurations. @@ -44,7 +44,7 @@ Create a group chat team with an assistant agent, a web surfer agent, and a user for web browsing tasks. You need to install [playwright](https://playwright.dev/python/docs/library). ```python -# pip install autogen-agentchat autogen-ext[openai,web-surfer] +# pip install -U autogen-agentchat autogen-ext[openai,web-surfer] # playwright install import asyncio from autogen_agentchat.agents import AssistantAgent, UserProxyAgent diff --git a/python/README.md b/python/README.md index 53859a0132b7..4c8b53fdadbc 100644 --- a/python/README.md +++ b/python/README.md @@ -1,7 +1,7 @@ # AutoGen Python packages [![0.4 Docs](https://img.shields.io/badge/Docs-0.4-blue)](https://microsoft.github.io/autogen/dev/) -[![PyPi autogen-core](https://img.shields.io/badge/PyPi-autogen--core-blue?logo=pypi)](https://pypi.org/project/autogen-core/0.4.0.dev13/) [![PyPi autogen-agentchat](https://img.shields.io/badge/PyPi-autogen--agentchat-blue?logo=pypi)](https://pypi.org/project/autogen-agentchat/0.4.0.dev13/) [![PyPi autogen-ext](https://img.shields.io/badge/PyPi-autogen--ext-blue?logo=pypi)](https://pypi.org/project/autogen-ext/0.4.0.dev13/) +[![PyPi autogen-core](https://img.shields.io/badge/PyPi-autogen--core-blue?logo=pypi)](https://pypi.org/project/autogen-core/) [![PyPi autogen-agentchat](https://img.shields.io/badge/PyPi-autogen--agentchat-blue?logo=pypi)](https://pypi.org/project/autogen-agentchat/) [![PyPi autogen-ext](https://img.shields.io/badge/PyPi-autogen--ext-blue?logo=pypi)](https://pypi.org/project/autogen-ext/) This directory works as a single `uv` workspace containing all project packages. See [`packages`](./packages/) to discover all project packages. diff --git a/python/packages/autogen-agentchat/pyproject.toml b/python/packages/autogen-agentchat/pyproject.toml index b8b694d9651d..2aad2d35b1b7 100644 --- a/python/packages/autogen-agentchat/pyproject.toml +++ b/python/packages/autogen-agentchat/pyproject.toml @@ -4,7 +4,7 @@ build-backend = "hatchling.build" [project] name = "autogen-agentchat" -version = "0.4.0.dev13" +version = "0.4.0" license = {file = "LICENSE-CODE"} description = "AutoGen agents and teams library" readme = "README.md" @@ -15,7 +15,7 @@ classifiers = [ "Operating System :: OS Independent", ] dependencies = [ - "autogen-core==0.4.0.dev13", + "autogen-core==0.4.0", "aioconsole>=0.8.1" ] diff --git a/python/packages/autogen-core/docs/src/_templates/sidebar-nav-bs-agentchat.html b/python/packages/autogen-core/docs/src/_templates/sidebar-nav-bs-agentchat.html index 3351908bfd67..afbaff852b8f 100644 --- a/python/packages/autogen-core/docs/src/_templates/sidebar-nav-bs-agentchat.html +++ b/python/packages/autogen-core/docs/src/_templates/sidebar-nav-bs-agentchat.html @@ -20,7 +20,7 @@
  • + href="https://pypi.org/project/autogen-agentchat/"> PyPi diff --git a/python/packages/autogen-core/docs/src/_templates/sidebar-nav-bs-core.html b/python/packages/autogen-core/docs/src/_templates/sidebar-nav-bs-core.html index d5b18efd3312..d6288526d322 100644 --- a/python/packages/autogen-core/docs/src/_templates/sidebar-nav-bs-core.html +++ b/python/packages/autogen-core/docs/src/_templates/sidebar-nav-bs-core.html @@ -19,7 +19,7 @@
  • - + PyPi diff --git a/python/packages/autogen-core/docs/src/_templates/sidebar-nav-bs-extensions.html b/python/packages/autogen-core/docs/src/_templates/sidebar-nav-bs-extensions.html index 9ae28dd1a109..b2c27fdfc811 100644 --- a/python/packages/autogen-core/docs/src/_templates/sidebar-nav-bs-extensions.html +++ b/python/packages/autogen-core/docs/src/_templates/sidebar-nav-bs-extensions.html @@ -20,7 +20,7 @@
  • - + PyPi diff --git a/python/packages/autogen-core/docs/src/index.md b/python/packages/autogen-core/docs/src/index.md index 4c3b3b222ae6..e62b398dce58 100644 --- a/python/packages/autogen-core/docs/src/index.md +++ b/python/packages/autogen-core/docs/src/index.md @@ -105,7 +105,7 @@ Get Started
    {fas}`people-group;pst-color-primary` AgentChat -[![PyPi autogen-agentchat](https://img.shields.io/badge/PyPi-autogen--agentchat-blue?logo=pypi)](https://pypi.org/project/autogen-agentchat/0.4.0.dev13/) +[![PyPi autogen-agentchat](https://img.shields.io/badge/PyPi-autogen--agentchat-blue?logo=pypi)](https://pypi.org/project/autogen-agentchat/)
    A programming framework for building conversational single and multi-agent applications. @@ -136,7 +136,7 @@ Get Started ::: -:::{grid-item-card} {fas}`cube;pst-color-primary` Core [![PyPi autogen-core](https://img.shields.io/badge/PyPi-autogen--core-blue?logo=pypi)](https://pypi.org/project/autogen-core/0.4.0.dev13/) +:::{grid-item-card} {fas}`cube;pst-color-primary` Core [![PyPi autogen-core](https://img.shields.io/badge/PyPi-autogen--core-blue?logo=pypi)](https://pypi.org/project/autogen-core/) :shadow: none :margin: 2 0 0 0 :columns: 12 12 12 12 @@ -159,7 +159,7 @@ Get Started ::: -:::{grid-item-card} {fas}`puzzle-piece;pst-color-primary` Extensions [![PyPi autogen-ext](https://img.shields.io/badge/PyPi-autogen--ext-blue?logo=pypi)](https://pypi.org/project/autogen-ext/0.4.0.dev13/) +:::{grid-item-card} {fas}`puzzle-piece;pst-color-primary` Extensions [![PyPi autogen-ext](https://img.shields.io/badge/PyPi-autogen--ext-blue?logo=pypi)](https://pypi.org/project/autogen-ext/) :shadow: none :margin: 2 0 0 0 :columns: 12 12 12 12 diff --git a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/installation.md b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/installation.md index b55fcccd54ca..e4e49591df95 100644 --- a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/installation.md +++ b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/installation.md @@ -61,7 +61,7 @@ Install the `autogen-agentchat` package using pip: ```bash -pip install "autogen-agentchat==0.4.0.dev13" +pip install -U "autogen-agentchat" ``` ```{note} @@ -74,7 +74,7 @@ To use the OpenAI and Azure OpenAI models, you need to install the following extensions: ```bash -pip install "autogen-ext[openai]==0.4.0.dev13" +pip install "autogen-ext[openai]" ``` If you are using Azure OpenAI with AAD authentication, you need to install the following: diff --git a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/magentic-one.md b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/magentic-one.md index 48c7afc0b646..556952605618 100644 --- a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/magentic-one.md +++ b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/magentic-one.md @@ -41,7 +41,7 @@ Be aware that agents may occasionally attempt risky actions, such as recruiting Install the required packages: ```bash -pip install autogen-agentchat==0.4.0.dev13 autogen-ext[magentic-one,openai]==0.4.0.dev13 +pip install autogen-agentchat autogen-ext[magentic-one,openai] # If using the MultimodalWebSurfer, you also need to install playwright dependencies: playwright install --with-deps chromium diff --git a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/quickstart.ipynb b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/quickstart.ipynb index 94dbb7528d64..06e265b1429d 100644 --- a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/quickstart.ipynb +++ b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/quickstart.ipynb @@ -29,7 +29,7 @@ }, "outputs": [], "source": [ - "pip install \"autogen-agentchat==0.4.0.dev13\" \"autogen-ext[openai,azure]==0.4.0.dev13\"" + "pip install -U \"autogen-agentchat\" \"autogen-ext[openai,azure]\"" ] }, { diff --git a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/models.ipynb b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/models.ipynb index ce1c21077d43..d7aed4fc5cda 100644 --- a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/models.ipynb +++ b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/models.ipynb @@ -28,7 +28,7 @@ }, "outputs": [], "source": [ - "pip install \"autogen-ext[openai]==0.4.0.dev13\"" + "pip install \"autogen-ext[openai]\"" ] }, { @@ -108,7 +108,7 @@ }, "outputs": [], "source": [ - "pip install \"autogen-ext[openai,azure]==0.4.0.dev13\"" + "pip install \"autogen-ext[openai,azure]\"" ] }, { diff --git a/python/packages/autogen-core/docs/src/user-guide/core-user-guide/framework/distributed-agent-runtime.ipynb b/python/packages/autogen-core/docs/src/user-guide/core-user-guide/framework/distributed-agent-runtime.ipynb index 96a80a2f08cb..c67c998c0a65 100644 --- a/python/packages/autogen-core/docs/src/user-guide/core-user-guide/framework/distributed-agent-runtime.ipynb +++ b/python/packages/autogen-core/docs/src/user-guide/core-user-guide/framework/distributed-agent-runtime.ipynb @@ -24,7 +24,7 @@ "````{note}\n", "The distributed agent runtime requires extra dependencies, install them using:\n", "```bash\n", - "pip install \"autogen-ext[grpc]==0.4.0.dev13\"\n", + "pip install \"autogen-ext[grpc]\"\n", "```\n", "````\n", "\n", @@ -222,4 +222,4 @@ }, "nbformat": 4, "nbformat_minor": 2 -} \ No newline at end of file +} diff --git a/python/packages/autogen-core/docs/src/user-guide/core-user-guide/installation.md b/python/packages/autogen-core/docs/src/user-guide/core-user-guide/installation.md index 8b7d0dc2dfeb..3fd181c7feeb 100644 --- a/python/packages/autogen-core/docs/src/user-guide/core-user-guide/installation.md +++ b/python/packages/autogen-core/docs/src/user-guide/core-user-guide/installation.md @@ -54,7 +54,7 @@ Install the `autogen-core` package using pip: ```bash -pip install "autogen-core==0.4.0.dev13" +pip install "autogen-core" ``` ```{note} diff --git a/python/packages/autogen-core/docs/src/user-guide/extensions-user-guide/installation.md b/python/packages/autogen-core/docs/src/user-guide/extensions-user-guide/installation.md index bbec3120d6dd..7a59605b19ec 100644 --- a/python/packages/autogen-core/docs/src/user-guide/extensions-user-guide/installation.md +++ b/python/packages/autogen-core/docs/src/user-guide/extensions-user-guide/installation.md @@ -10,7 +10,7 @@ myst: First-part maintained extensions are available in the `autogen-ext` package. ```sh -pip install "autogen-ext==0.4.0.dev13" +pip install "autogen-ext" ``` Extras: diff --git a/python/packages/autogen-core/pyproject.toml b/python/packages/autogen-core/pyproject.toml index 7f06fc66d02a..4d6aa4ba6410 100644 --- a/python/packages/autogen-core/pyproject.toml +++ b/python/packages/autogen-core/pyproject.toml @@ -4,7 +4,7 @@ build-backend = "hatchling.build" [project] name = "autogen-core" -version = "0.4.0.dev13" +version = "0.4.0" license = {file = "LICENSE-CODE"} description = "Foundational interfaces and agent runtime implementation for AutoGen" readme = "README.md" @@ -69,7 +69,7 @@ dev = [ "pygments", "sphinxext-rediraffe", - "autogen_ext==0.4.0.dev13", + "autogen_ext==0.4.0", # Documentation tooling "sphinx-autobuild", diff --git a/python/packages/autogen-ext/pyproject.toml b/python/packages/autogen-ext/pyproject.toml index 8b972e98b8b0..a2ace335f172 100644 --- a/python/packages/autogen-ext/pyproject.toml +++ b/python/packages/autogen-ext/pyproject.toml @@ -4,7 +4,7 @@ build-backend = "hatchling.build" [project] name = "autogen-ext" -version = "0.4.0.dev13" +version = "0.4.0" license = {file = "LICENSE-CODE"} description = "AutoGen extensions library" readme = "README.md" @@ -15,7 +15,7 @@ classifiers = [ "Operating System :: OS Independent", ] dependencies = [ - "autogen-core==0.4.0.dev13", + "autogen-core==0.4.0", ] [project.optional-dependencies] @@ -24,23 +24,23 @@ azure = ["azure-core", "azure-identity"] docker = ["docker~=7.0"] openai = ["openai>=1.52.2", "tiktoken>=0.8.0", "aiofiles"] file-surfer = [ - "autogen-agentchat==0.4.0.dev13", + "autogen-agentchat==0.4.0", "markitdown>=0.0.1a2", ] web-surfer = [ - "autogen-agentchat==0.4.0.dev13", + "autogen-agentchat==0.4.0", "playwright>=1.48.0", "pillow>=11.0.0", "markitdown>=0.0.1a2", ] magentic-one = [ - "autogen-agentchat==0.4.0.dev13", + "autogen-agentchat==0.4.0", "markitdown>=0.0.1a2", "playwright>=1.48.0", "pillow>=11.0.0", ] video-surfer = [ - "autogen-agentchat==0.4.0.dev13", + "autogen-agentchat==0.4.0", "opencv-python>=4.5", "ffmpeg-python", "openai-whisper", diff --git a/python/packages/autogen-ext/src/autogen_ext/agents/file_surfer/_file_surfer.py b/python/packages/autogen-ext/src/autogen_ext/agents/file_surfer/_file_surfer.py index 7297a88e6979..0f389313057a 100644 --- a/python/packages/autogen-ext/src/autogen_ext/agents/file_surfer/_file_surfer.py +++ b/python/packages/autogen-ext/src/autogen_ext/agents/file_surfer/_file_surfer.py @@ -37,7 +37,7 @@ class FileSurfer(BaseChatAgent): .. code-block:: bash - pip install "autogen-ext[file-surfer]==0.4.0.dev13" + pip install "autogen-ext[file-surfer]" Args: name (str): The agent's name diff --git a/python/packages/autogen-ext/src/autogen_ext/agents/openai/_openai_assistant_agent.py b/python/packages/autogen-ext/src/autogen_ext/agents/openai/_openai_assistant_agent.py index 496b68fd3e0b..e4d359cf3eff 100644 --- a/python/packages/autogen-ext/src/autogen_ext/agents/openai/_openai_assistant_agent.py +++ b/python/packages/autogen-ext/src/autogen_ext/agents/openai/_openai_assistant_agent.py @@ -84,7 +84,7 @@ class OpenAIAssistantAgent(BaseChatAgent): .. code-block:: bash - pip install "autogen-ext[openai]==0.4.0.dev13" + pip install "autogen-ext[openai]" This agent leverages the OpenAI Assistant API to create AI assistants with capabilities like: diff --git a/python/packages/autogen-ext/src/autogen_ext/agents/video_surfer/_video_surfer.py b/python/packages/autogen-ext/src/autogen_ext/agents/video_surfer/_video_surfer.py index 43be974b1ea4..34887779310c 100644 --- a/python/packages/autogen-ext/src/autogen_ext/agents/video_surfer/_video_surfer.py +++ b/python/packages/autogen-ext/src/autogen_ext/agents/video_surfer/_video_surfer.py @@ -22,7 +22,7 @@ class VideoSurfer(AssistantAgent): .. code-block:: bash - pip install "autogen-ext[video-surfer]==0.4.0.dev13" + pip install "autogen-ext[video-surfer]" This agent utilizes various tools to extract information from the video, such as its length, screenshots at specific timestamps, and audio transcriptions. It processes these elements to provide detailed answers to user queries. diff --git a/python/packages/autogen-ext/src/autogen_ext/agents/web_surfer/_multimodal_web_surfer.py b/python/packages/autogen-ext/src/autogen_ext/agents/web_surfer/_multimodal_web_surfer.py index c6bcf2c85d7b..d266a2086529 100644 --- a/python/packages/autogen-ext/src/autogen_ext/agents/web_surfer/_multimodal_web_surfer.py +++ b/python/packages/autogen-ext/src/autogen_ext/agents/web_surfer/_multimodal_web_surfer.py @@ -66,7 +66,7 @@ class MultimodalWebSurfer(BaseChatAgent): .. code-block:: bash - pip install "autogen-ext[web-surfer]==0.4.0.dev13" + pip install "autogen-ext[web-surfer]" It launches a chromium browser and allows the playwright to interact with the web browser and can perform a variety of actions. The browser is launched on the first call to the agent and is reused for subsequent calls. diff --git a/python/packages/autogen-ext/src/autogen_ext/code_executors/azure/_azure_container_code_executor.py b/python/packages/autogen-ext/src/autogen_ext/code_executors/azure/_azure_container_code_executor.py index c877d91f69e9..3971d93665c1 100644 --- a/python/packages/autogen-ext/src/autogen_ext/code_executors/azure/_azure_container_code_executor.py +++ b/python/packages/autogen-ext/src/autogen_ext/code_executors/azure/_azure_container_code_executor.py @@ -51,7 +51,7 @@ class ACADynamicSessionsCodeExecutor(CodeExecutor): .. code-block:: bash - pip install "autogen-ext[azure]==0.4.0.dev13" + pip install "autogen-ext[azure]" .. caution:: diff --git a/python/packages/autogen-ext/src/autogen_ext/code_executors/docker/_docker_code_executor.py b/python/packages/autogen-ext/src/autogen_ext/code_executors/docker/_docker_code_executor.py index d608d389d1d9..05924e186643 100644 --- a/python/packages/autogen-ext/src/autogen_ext/code_executors/docker/_docker_code_executor.py +++ b/python/packages/autogen-ext/src/autogen_ext/code_executors/docker/_docker_code_executor.py @@ -59,7 +59,7 @@ class DockerCommandLineCodeExecutor(CodeExecutor): .. code-block:: bash - pip install "autogen-ext[docker]==0.4.0.dev13" + pip install "autogen-ext[docker]" The executor first saves each code block in a file in the working diff --git a/python/packages/autogen-ext/src/autogen_ext/models/openai/__init__.py b/python/packages/autogen-ext/src/autogen_ext/models/openai/__init__.py index dbe2eb65e045..366ad831175e 100644 --- a/python/packages/autogen-ext/src/autogen_ext/models/openai/__init__.py +++ b/python/packages/autogen-ext/src/autogen_ext/models/openai/__init__.py @@ -1,9 +1,9 @@ -from ._openai_client import AzureOpenAIChatCompletionClient, OpenAIChatCompletionClient, BaseOpenAIChatCompletionClient +from ._openai_client import AzureOpenAIChatCompletionClient, BaseOpenAIChatCompletionClient, OpenAIChatCompletionClient from .config import ( AzureOpenAIClientConfigurationConfigModel, - OpenAIClientConfigurationConfigModel, BaseOpenAIClientConfigurationConfigModel, CreateArgumentsConfigModel, + OpenAIClientConfigurationConfigModel, ) __all__ = [ diff --git a/python/packages/autogen-ext/src/autogen_ext/models/openai/_openai_client.py b/python/packages/autogen-ext/src/autogen_ext/models/openai/_openai_client.py index 0a811dacce83..5b9f51129a88 100644 --- a/python/packages/autogen-ext/src/autogen_ext/models/openai/_openai_client.py +++ b/python/packages/autogen-ext/src/autogen_ext/models/openai/_openai_client.py @@ -931,7 +931,7 @@ class OpenAIChatCompletionClient(BaseOpenAIChatCompletionClient, Component[OpenA .. code-block:: bash - pip install "autogen-ext[openai]==0.4.0.dev13" + pip install "autogen-ext[openai]" The following code snippet shows how to use the client with an OpenAI model: @@ -1062,7 +1062,7 @@ class AzureOpenAIChatCompletionClient( .. code-block:: bash - pip install "autogen-ext[openai,azure]==0.4.0.dev13" + pip install "autogen-ext[openai,azure]" To use the client, you need to provide your deployment id, Azure Cognitive Services endpoint, api version, and model capabilities. diff --git a/python/packages/autogen-ext/src/autogen_ext/teams/magentic_one.py b/python/packages/autogen-ext/src/autogen_ext/teams/magentic_one.py index fb59332be213..23aca97014c3 100644 --- a/python/packages/autogen-ext/src/autogen_ext/teams/magentic_one.py +++ b/python/packages/autogen-ext/src/autogen_ext/teams/magentic_one.py @@ -23,7 +23,7 @@ class MagenticOne(MagenticOneGroupChat): .. code-block:: bash - pip install "autogen-ext[magentic-one]==0.4.0.dev13" + pip install "autogen-ext[magentic-one]" Args: diff --git a/python/packages/autogen-ext/src/autogen_ext/tools/code_execution/_code_execution.py b/python/packages/autogen-ext/src/autogen_ext/tools/code_execution/_code_execution.py index a0669e5c71fe..3b72940f2445 100644 --- a/python/packages/autogen-ext/src/autogen_ext/tools/code_execution/_code_execution.py +++ b/python/packages/autogen-ext/src/autogen_ext/tools/code_execution/_code_execution.py @@ -30,7 +30,7 @@ class PythonCodeExecutionTool(BaseTool[CodeExecutionInput, CodeExecutionResult]) .. code-block:: bash - pip install "autogen-agentchat==0.4.0.dev13" "autogen-ext[openai]==0.4.0.dev13" "yfinance" "matplotlib" + pip install -U "autogen-agentchat" "autogen-ext[openai]" "yfinance" "matplotlib" .. code-block:: python diff --git a/python/packages/autogen-studio/autogenstudio/version.py b/python/packages/autogen-studio/autogenstudio/version.py index 525ab752dcd4..171811d227da 100644 --- a/python/packages/autogen-studio/autogenstudio/version.py +++ b/python/packages/autogen-studio/autogenstudio/version.py @@ -1,3 +1,3 @@ -VERSION = "0.4.0.dev41" +VERSION = "0.4.0" __version__ = VERSION APP_NAME = "autogenstudio" diff --git a/python/packages/autogen-studio/pyproject.toml b/python/packages/autogen-studio/pyproject.toml index 5ebbc086b7b0..5fa6676198a6 100644 --- a/python/packages/autogen-studio/pyproject.toml +++ b/python/packages/autogen-studio/pyproject.toml @@ -33,9 +33,9 @@ dependencies = [ "alembic", "loguru", "pyyaml", - "autogen-core==0.4.0.dev13", - "autogen-agentchat==0.4.0.dev13", - "autogen-ext[magentic-one]==0.4.0.dev13" + "autogen-core==0.4.0", + "autogen-agentchat==0.4.0", + "autogen-ext[magentic-one]==0.4.0" ] optional-dependencies = {web = ["fastapi", "uvicorn"], database = ["psycopg"]} diff --git a/python/samples/agentchat_chainlit/requirements.txt b/python/samples/agentchat_chainlit/requirements.txt index c7dd4ca40348..db122ba31ce4 100644 --- a/python/samples/agentchat_chainlit/requirements.txt +++ b/python/samples/agentchat_chainlit/requirements.txt @@ -1,2 +1,2 @@ chainlit -autogen-agentchat==0.4.0.dev13 +autogen-agentchat==0.4.0 diff --git a/python/samples/core_async_human_in_the_loop/README.md b/python/samples/core_async_human_in_the_loop/README.md index af8d9e4a1d79..7ed54cc92416 100644 --- a/python/samples/core_async_human_in_the_loop/README.md +++ b/python/samples/core_async_human_in_the_loop/README.md @@ -9,7 +9,7 @@ An example showing human-in-the-loop which waits for human input before making t First, you need a shell with AutoGen core and required dependencies installed. ```bash -pip install "autogen-core==0.4.0.dev13" "autogen-ext[openai,azure]==0.4.0.dev13" +pip install "autogen-core" "autogen-ext[openai,azure]" ``` ### Model Configuration diff --git a/python/samples/core_chess_game/README.md b/python/samples/core_chess_game/README.md index b0f5a9194a2d..a27c5cb99025 100644 --- a/python/samples/core_chess_game/README.md +++ b/python/samples/core_chess_game/README.md @@ -9,7 +9,7 @@ An example with two chess player agents that executes its own tools to demonstra First, you need a shell with AutoGen core and required dependencies installed. ```bash -pip install "autogen-core==0.4.0.dev13" "autogen-ext[openai,azure]==0.4.0.dev13" "chess" +pip install "autogen-core" "autogen-ext[openai,azure]" "chess" ``` ### Model Configuration diff --git a/python/uv.lock b/python/uv.lock index 330f570ef432..435d9d210ebe 100644 --- a/python/uv.lock +++ b/python/uv.lock @@ -347,7 +347,7 @@ wheels = [ [[package]] name = "autogen-agentchat" -version = "0.4.0.dev13" +version = "0.4.0" source = { editable = "packages/autogen-agentchat" } dependencies = [ { name = "aioconsole" }, @@ -362,7 +362,7 @@ requires-dist = [ [[package]] name = "autogen-core" -version = "0.4.0.dev13" +version = "0.4.0" source = { editable = "packages/autogen-core" } dependencies = [ { name = "jsonref" }, @@ -477,7 +477,7 @@ dev = [ [[package]] name = "autogen-ext" -version = "0.4.0.dev13" +version = "0.4.0" source = { editable = "packages/autogen-ext" } dependencies = [ { name = "autogen-core" }, @@ -652,7 +652,7 @@ requires-dist = [ [[package]] name = "autogenstudio" -version = "0.4.0.dev41" +version = "0.4.0" source = { editable = "packages/autogen-studio" } dependencies = [ { name = "aiofiles" }, From 388a402243964de8eddd48a82c406cd9f19b6e25 Mon Sep 17 00:00:00 2001 From: Jack Gerrits Date: Thu, 9 Jan 2025 15:31:46 -0500 Subject: [PATCH 19/23] Update magentic-one-cli dep bounds (#4971) --- python/packages/magentic-one-cli/pyproject.toml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/python/packages/magentic-one-cli/pyproject.toml b/python/packages/magentic-one-cli/pyproject.toml index b5db07ed79e5..036cb33b787d 100644 --- a/python/packages/magentic-one-cli/pyproject.toml +++ b/python/packages/magentic-one-cli/pyproject.toml @@ -15,8 +15,8 @@ classifiers = [ "Operating System :: OS Independent", ] dependencies = [ - "autogen-agentchat", #>=0.4.0<0.5 - "autogen-ext[openai,magentic-one]", #>=0.4.0<0.5 + "autogen-agentchat>=0.4.0<0.5", + "autogen-ext[openai,magentic-one]>=0.4.0<0.5", ] [project.scripts] From 78ac9f8507d9f7c8e3d87b4db67b57e116ade1b7 Mon Sep 17 00:00:00 2001 From: Jack Gerrits Date: Thu, 9 Jan 2025 15:40:19 -0500 Subject: [PATCH 20/23] Fix magentic-one-cli version bound (#4972) --- python/packages/magentic-one-cli/pyproject.toml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/python/packages/magentic-one-cli/pyproject.toml b/python/packages/magentic-one-cli/pyproject.toml index 036cb33b787d..cbbc48867e8c 100644 --- a/python/packages/magentic-one-cli/pyproject.toml +++ b/python/packages/magentic-one-cli/pyproject.toml @@ -15,8 +15,8 @@ classifiers = [ "Operating System :: OS Independent", ] dependencies = [ - "autogen-agentchat>=0.4.0<0.5", - "autogen-ext[openai,magentic-one]>=0.4.0<0.5", + "autogen-agentchat>=0.4.0,<0.5", + "autogen-ext[openai,magentic-one]>=0.4.0,<0.5", ] [project.scripts] From 4dab09cabe78c5360cc31832055d72f78a67877b Mon Sep 17 00:00:00 2001 From: Jack Gerrits Date: Thu, 9 Jan 2025 15:54:36 -0500 Subject: [PATCH 21/23] Update magentic-one-cli version to 0.2.0 (#4973) * Update magentic-one-cli version to 0.2.0 * lock --- python/packages/magentic-one-cli/pyproject.toml | 2 +- python/uv.lock | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/python/packages/magentic-one-cli/pyproject.toml b/python/packages/magentic-one-cli/pyproject.toml index cbbc48867e8c..5b14ed6f73d7 100644 --- a/python/packages/magentic-one-cli/pyproject.toml +++ b/python/packages/magentic-one-cli/pyproject.toml @@ -4,7 +4,7 @@ build-backend = "hatchling.build" [project] name = "magentic-one-cli" -version = "0.1.0" +version = "0.2.0" license = {file = "LICENSE-CODE"} description = "Magentic-One is a generalist multi-agent system, built on `AutoGen-AgentChat`, for solving complex web and file-based tasks. This package installs the `m1` command-line utility to quickly get started with Magentic-One." readme = "README.md" diff --git a/python/uv.lock b/python/uv.lock index 435d9d210ebe..219b5176d2d1 100644 --- a/python/uv.lock +++ b/python/uv.lock @@ -2468,7 +2468,7 @@ wheels = [ [[package]] name = "magentic-one-cli" -version = "0.1.0" +version = "0.2.0" source = { editable = "packages/magentic-one-cli" } dependencies = [ { name = "autogen-agentchat" }, From 623e0185c7f33eb43e073d33bb74c21e59522dad Mon Sep 17 00:00:00 2001 From: Jack Gerrits Date: Thu, 9 Jan 2025 15:58:45 -0500 Subject: [PATCH 22/23] Update switcher versions and make 0.4.0 stable (#4940) * Update switcher versions and make 0.4.0 stable * update versions * update switcher --- .github/workflows/docs.yml | 3 ++- docs/switcher.json | 16 +++++++++++----- 2 files changed, 13 insertions(+), 6 deletions(-) diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml index 0afb1745ba10..40b66a54471c 100644 --- a/.github/workflows/docs.yml +++ b/.github/workflows/docs.yml @@ -33,7 +33,7 @@ jobs: [ # For main use the workflow target { ref: "${{github.ref}}", dest-dir: dev, uv-version: "0.5.13" }, - { ref: "${{github.ref}}", dest-dir: stable, uv-version: "0.5.13" }, + { ref: "v0.4.0", dest-dir: stable, uv-version: "0.5.13" }, { ref: "v0.4.0.dev0", dest-dir: "0.4.0.dev0", uv-version: "0.5.11" }, { ref: "v0.4.0.dev1", dest-dir: "0.4.0.dev1", uv-version: "0.5.11" }, { ref: "v0.4.0.dev2", dest-dir: "0.4.0.dev2", uv-version: "0.5.11" }, @@ -48,6 +48,7 @@ jobs: { ref: "v0.4.0.dev11", dest-dir: "0.4.0.dev11", uv-version: "0.5.11" }, { ref: "v0.4.0.dev12", dest-dir: "0.4.0.dev12", uv-version: "0.5.13" }, { ref: "v0.4.0.dev13", dest-dir: "0.4.0.dev13", uv-version: "0.5.13" }, + { ref: "v0.4.0", dest-dir: "0.4.0", uv-version: "0.5.13" }, ] steps: - name: Checkout diff --git a/docs/switcher.json b/docs/switcher.json index 2e6bcd63f8b9..f90f52ef0847 100644 --- a/docs/switcher.json +++ b/docs/switcher.json @@ -1,13 +1,20 @@ [ { - "name": "0.2 (stable)", - "version": "0.2-stable", - "url": "/autogen/0.2/" + "name": "0.4.0 (stable)", + "version": "stable", + "url": "/autogen/stable/", + "preferred": true }, { + "name": "dev (main)", "version": "dev", "url": "/autogen/dev/" }, + { + "name": "0.2", + "version": "0.2", + "url": "/autogen/0.2/" + }, { "name": "0.4.0.dev0", "version": "0.4.0.dev0", @@ -76,7 +83,6 @@ { "name": "0.4.0.dev13", "version": "0.4.0.dev13", - "url": "/autogen/0.4.0.dev13/", - "preferred": true + "url": "/autogen/0.4.0.dev13/" } ] From 90112e1162e13dadfd6c0a8375cdb9d705d1c984 Mon Sep 17 00:00:00 2001 From: Jack Gerrits Date: Thu, 9 Jan 2025 16:09:14 -0500 Subject: [PATCH 23/23] Update version of preferred docs URL --- docs/switcher.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/switcher.json b/docs/switcher.json index f90f52ef0847..3d94443e6a4e 100644 --- a/docs/switcher.json +++ b/docs/switcher.json @@ -1,7 +1,7 @@ [ { "name": "0.4.0 (stable)", - "version": "stable", + "version": "0.4.0", "url": "/autogen/stable/", "preferred": true },