From 2ff543e8768957a3030802c269f263e4bcc6aa5d Mon Sep 17 00:00:00 2001 From: Eric Zhu Date: Mon, 6 Jan 2025 10:06:54 -0800 Subject: [PATCH 01/61] Add missing API doc for Python code execution tool (#4901) * Add missing API doc for Python code execution tool * wip * Add API doc for the executor tool --------- Co-authored-by: Jack Gerrits --- .../autogen-core/docs/src/reference/index.md | 1 + .../autogen_ext.tools.code_execution.rst | 8 ++++ .../tools/code_execution/_code_execution.py | 43 +++++++++++++++++++ 3 files changed, 52 insertions(+) create mode 100644 python/packages/autogen-core/docs/src/reference/python/autogen_ext.tools.code_execution.rst diff --git a/python/packages/autogen-core/docs/src/reference/index.md b/python/packages/autogen-core/docs/src/reference/index.md index cfe36eded2c2..2742ddbc383e 100644 --- a/python/packages/autogen-core/docs/src/reference/index.md +++ b/python/packages/autogen-core/docs/src/reference/index.md @@ -49,6 +49,7 @@ python/autogen_ext.teams.magentic_one python/autogen_ext.models.openai python/autogen_ext.models.replay python/autogen_ext.tools.langchain +python/autogen_ext.tools.code_execution python/autogen_ext.code_executors.local python/autogen_ext.code_executors.docker python/autogen_ext.code_executors.azure diff --git a/python/packages/autogen-core/docs/src/reference/python/autogen_ext.tools.code_execution.rst b/python/packages/autogen-core/docs/src/reference/python/autogen_ext.tools.code_execution.rst new file mode 100644 index 000000000000..18dce6613bfd --- /dev/null +++ b/python/packages/autogen-core/docs/src/reference/python/autogen_ext.tools.code_execution.rst @@ -0,0 +1,8 @@ +autogen\_ext.tools.code\_execution +================================== + + +.. automodule:: autogen_ext.tools.code_execution + :members: + :undoc-members: + :show-inheritance: diff --git a/python/packages/autogen-ext/src/autogen_ext/tools/code_execution/_code_execution.py b/python/packages/autogen-ext/src/autogen_ext/tools/code_execution/_code_execution.py index 95e8fa34aac8..a0669e5c71fe 100644 --- a/python/packages/autogen-ext/src/autogen_ext/tools/code_execution/_code_execution.py +++ b/python/packages/autogen-ext/src/autogen_ext/tools/code_execution/_code_execution.py @@ -18,6 +18,49 @@ def ser_model(self) -> str: class PythonCodeExecutionTool(BaseTool[CodeExecutionInput, CodeExecutionResult]): + """A tool that executes Python code in a code executor and returns output. + + Example executors: + + * :class:`autogen_ext.code_executors.local.LocalCommandLineCodeExecutor` + * :class:`autogen_ext.code_executors.docker.DockerCommandLineCodeExecutor` + * :class:`autogen_ext.code_executors.azure.ACADynamicSessionsCodeExecutor` + + Example usage: + + .. code-block:: bash + + pip install "autogen-agentchat==0.4.0.dev13" "autogen-ext[openai]==0.4.0.dev13" "yfinance" "matplotlib" + + .. code-block:: python + + import asyncio + from autogen_agentchat.agents import AssistantAgent + from autogen_agentchat.ui import Console + from autogen_ext.models.openai import OpenAIChatCompletionClient + from autogen_ext.code_executors.local import LocalCommandLineCodeExecutor + from autogen_ext.tools.code_execution import PythonCodeExecutionTool + + + async def main() -> None: + tool = PythonCodeExecutionTool(LocalCommandLineCodeExecutor(work_dir="coding")) + agent = AssistantAgent( + "assistant", OpenAIChatCompletionClient(model="gpt-4o"), tools=[tool], reflect_on_tool_use=True + ) + await Console( + agent.run_stream( + task="Create a plot of MSFT stock prices in 2024 and save it to a file. Use yfinance and matplotlib." + ) + ) + + + asyncio.run(main()) + + + Args: + executor (CodeExecutor): The code executor that will be used to execute the code blocks. + """ + def __init__(self, executor: CodeExecutor): super().__init__(CodeExecutionInput, CodeExecutionResult, "CodeExecutor", "Execute Python code blocks.") self._executor = executor From d55b8c9044e334c853bb6ee400c2c8209c2748b0 Mon Sep 17 00:00:00 2001 From: Eric Zhu Date: Mon, 6 Jan 2025 14:13:22 -0800 Subject: [PATCH 02/61] Update index.md for better clarity and relationship between packages (#4896) * Improve index page for better clarity * Add markdown lint check * Update info * Update * typo * tagline * tag line update --- .../packages/autogen-core/docs/src/index.md | 107 +++++++++++------- python/pyproject.toml | 2 +- 2 files changed, 68 insertions(+), 41 deletions(-) diff --git a/python/packages/autogen-core/docs/src/index.md b/python/packages/autogen-core/docs/src/index.md index 1e0c5332afe5..e729679e055c 100644 --- a/python/packages/autogen-core/docs/src/index.md +++ b/python/packages/autogen-core/docs/src/index.md @@ -35,7 +35,7 @@ sd_hide_title: true AutoGen

-A framework for building AI agents and multi-agent applications +A framework for building AI agents and applications

@@ -49,42 +49,44 @@ A framework for building AI agents and multi-agent applications :::{grid-item-card} :shadow: none :margin: 2 0 0 0 -:columns: 12 12 12 12 +:columns: 12 12 6 6
-{fas}`people-group;pst-color-primary` -AgentChat
-High-level API that includes preset agents and teams for building multi-agent systems. +{fas}`book;pst-color-primary` +Magentic-One +A multi-agent assistant for web and file-based tasks. +Built on AgentChat. -```sh -pip install "autogen-agentchat==0.4.0.dev13" +```bash +% m1 "Find flights from Seattle to Paris and format the result in a table" ``` -šŸ’” *Start here if you are looking for an API similar to AutoGen 0.2.* - +++ -Get Started -Migration Guide (0.2.x to 0.4.x) +```{button-ref} user-guide/agentchat-user-guide/magentic-one +:color: secondary + +Get Started +``` ::: -:::{grid-item-card} +:::{grid-item-card} {fas}`palette;pst-color-primary` Studio [![PyPi autogenstudio](https://img.shields.io/badge/PyPi-autogen--studio-blue?logo=pypi)](https://pypi.org/project/autogenstudio/) :shadow: none :margin: 2 0 0 0 -:columns: 12 12 12 12 +:columns: 12 12 6 6 -
+An app for prototyping and managing agents without writing code. +Built on AgentChat. -{fas}`book;pst-color-primary` -Magentic-One
-Magentic-One is a generalist multi-agent system for solving open-ended web and file-based tasks across a variety of domains. +```bash +% autogenstudio ui --port 8080 +``` +++ - -```{button-ref} user-guide/agentchat-user-guide/magentic-one +```{button-ref} user-guide/autogenstudio-user-guide/index :color: secondary Get Started @@ -92,17 +94,41 @@ Get Started ::: - -:::{grid-item-card} {fas}`palette;pst-color-primary` Studio +:::{grid-item-card} :shadow: none :margin: 2 0 0 0 :columns: 12 12 12 12 -No-code platform for authoring and interacting with multi-agent teams. +
+ +{fas}`people-group;pst-color-primary` AgentChat +[![PyPi autogen-agentchat](https://img.shields.io/badge/PyPi-autogen--agentchat-blue?logo=pypi)](https://pypi.org/project/autogen-agentchat/0.4.0.dev13/) + +
+A programming framework for building conversational single and multi-agent applications. +Built on Core. + +```python +# pip install "autogen-agentchat==0.4.0.dev13" "autogen-ext[openai]==0.4.0.dev13" "yfinance" "matplotlib" +import asyncio +from autogen_agentchat.agents import AssistantAgent +from autogen_agentchat.ui import Console +from autogen_ext.models.openai import OpenAIChatCompletionClient +from autogen_ext.code_executors.local import LocalCommandLineCodeExecutor +from autogen_ext.tools.code_execution import PythonCodeExecutionTool + +async def main() -> None: + tool = PythonCodeExecutionTool(LocalCommandLineCodeExecutor(work_dir="coding")) + agent = AssistantAgent("assistant", OpenAIChatCompletionClient(model="gpt-4o"), tools=[tool], reflect_on_tool_use=True) + await Console(agent.run_stream(task="Create a plot of MSFT stock prices in 2024 and save it to a file. Use yfinance and matplotlib.")) +asyncio.run(main()) +``` + +_Start here if you are building conversational agents. [Migrating from AutoGen 0.2?](./user-guide/agentchat-user-guide/migration-guide.md)._ +++ -```{button-ref} user-guide/autogenstudio-user-guide/index +```{button-ref} user-guide/agentchat-user-guide/quickstart :color: secondary Get Started @@ -110,16 +136,18 @@ Get Started ::: -:::{grid-item-card} {fas}`cube;pst-color-primary` Core +:::{grid-item-card} {fas}`cube;pst-color-primary` Core [![PyPi autogen-core](https://img.shields.io/badge/PyPi-autogen--core-blue?logo=pypi)](https://pypi.org/project/autogen-core/0.4.0.dev13/) :shadow: none :margin: 2 0 0 0 -:columns: 12 12 6 6 +:columns: 12 12 12 12 -Provides building blocks for creating asynchronous, event driven multi-agent systems. +An event-driven programming framework for building scalable multi-agent AI systems. Example scenarios: -```sh -pip install "autogen-core==0.4.0.dev13" -``` +* Deterministic and dynamic agentic workflows for business processes. +* Research on multi-agent collaboration. +* Distributed agents for multi-language applications. + +_Start here if you are building workflows or distributed agent systems._ +++ @@ -131,24 +159,23 @@ Get Started ::: -:::{grid-item-card} {fas}`puzzle-piece;pst-color-primary` Extensions +:::{grid-item-card} {fas}`puzzle-piece;pst-color-primary` Extensions [![PyPi autogen-ext](https://img.shields.io/badge/PyPi-autogen--ext-blue?logo=pypi)](https://pypi.org/project/autogen-ext/0.4.0.dev13/) :shadow: none :margin: 2 0 0 0 -:columns: 12 12 6 6 +:columns: 12 12 12 12 -Implementations of core components that interface with external services, or use extra dependencies. For example, Docker based code execution. +Implementations of Core and AgentChat components that interface with external services or other libraries. +You can find and use community extensions or create your own. Examples of built-in extensions: -```sh -pip install "autogen-ext==0.4.0.dev13" -``` +* {py:class}`~autogen_ext.tools.langchain.LangChainToolAdapter` for using LangChain tools. +* {py:class}`~autogen_ext.agents.openai.OpenAIAssistantAgent` for using Assistant API. +* {py:class}`~autogen_ext.code_executors.docker.DockerCommandLineCodeExecutor` for running model-generated code in a Docker container. +* {py:class}`~autogen_ext.runtimes.grpc.GrpcWorkerAgentRuntime` for distributed agents. +++ -```{button-ref} user-guide/extensions-user-guide/index -:color: secondary - -Get Started -``` +Discover Community Extensions +Create New Extension ::: diff --git a/python/pyproject.toml b/python/pyproject.toml index fb58777f40a6..59c3e523f16d 100644 --- a/python/pyproject.toml +++ b/python/pyproject.toml @@ -89,7 +89,7 @@ gen-proto = "python -m grpc_tools.protoc --python_out=./packages/autogen-ext/src gen-proto-samples = "python -m grpc_tools.protoc --python_out=./packages/autogen-core/samples/protos --grpc_python_out=./packages/autogen-core/samples/protos --mypy_out=./packages/autogen-core/samples/protos --mypy_grpc_out=./packages/autogen-core/samples/protos --proto_path ../protos/ agent_events.proto" -markdown-code-lint = "python check_md_code_blocks.py ../README.md ./packages/autogen-core/docs/src/user-guide/agentchat-user-guide/*.md" +markdown-code-lint = "python check_md_code_blocks.py ../README.md ./packages/autogen-core/docs/src/user-guide/agentchat-user-guide/*.md ./packages/autogen-core/docs/src/index.md" [[tool.poe.tasks.gen-test-proto.sequence]] cmd = "python -m grpc_tools.protoc --python_out=./packages/autogen-core/tests/protos --grpc_python_out=./packages/autogen-core/tests/protos --mypy_out=./packages/autogen-core/tests/protos --mypy_grpc_out=./packages/autogen-core/tests/protos --proto_path ./packages/autogen-core/tests/protos serialization_test.proto" From e11fd8353d4042af28c37ee6439966d11a06f94b Mon Sep 17 00:00:00 2001 From: Eric Zhu Date: Mon, 6 Jan 2025 14:19:17 -0800 Subject: [PATCH 03/61] Improve markdown code linting command and update logging documentation and (#4902) Update logging documentation and improve markdown code linting command --- .../user-guide/core-user-guide/framework/logging.md | 10 +++------- python/pyproject.toml | 2 +- 2 files changed, 4 insertions(+), 8 deletions(-) diff --git a/python/packages/autogen-core/docs/src/user-guide/core-user-guide/framework/logging.md b/python/packages/autogen-core/docs/src/user-guide/core-user-guide/framework/logging.md index 481906ed51cb..b261e8bbe2f1 100644 --- a/python/packages/autogen-core/docs/src/user-guide/core-user-guide/framework/logging.md +++ b/python/packages/autogen-core/docs/src/user-guide/core-user-guide/framework/logging.md @@ -75,21 +75,17 @@ logger = logging.getLogger(f"{TRACE_LOGGER_NAME}.my_module") ### Emitting structured logs -If your event looks like: +If your event is a dataclass, then it could be emitted in code like this: ```python +import logging from dataclasses import dataclass +from autogen_core import EVENT_LOGGER_NAME @dataclass class MyEvent: timestamp: str message: str -``` - -Then it could be emitted in code like this: - -```python -from autogen_core import EVENT_LOGGER_NAME logger = logging.getLogger(EVENT_LOGGER_NAME + ".my_module") logger.info(MyEvent("timestamp", "message")) diff --git a/python/pyproject.toml b/python/pyproject.toml index 59c3e523f16d..da884484083a 100644 --- a/python/pyproject.toml +++ b/python/pyproject.toml @@ -89,7 +89,7 @@ gen-proto = "python -m grpc_tools.protoc --python_out=./packages/autogen-ext/src gen-proto-samples = "python -m grpc_tools.protoc --python_out=./packages/autogen-core/samples/protos --grpc_python_out=./packages/autogen-core/samples/protos --mypy_out=./packages/autogen-core/samples/protos --mypy_grpc_out=./packages/autogen-core/samples/protos --proto_path ../protos/ agent_events.proto" -markdown-code-lint = "python check_md_code_blocks.py ../README.md ./packages/autogen-core/docs/src/user-guide/agentchat-user-guide/*.md ./packages/autogen-core/docs/src/index.md" +markdown-code-lint = """python check_md_code_blocks.py ../README.md ./packages/autogen-core/docs/src/**/*.md""" [[tool.poe.tasks.gen-test-proto.sequence]] cmd = "python -m grpc_tools.protoc --python_out=./packages/autogen-core/tests/protos --grpc_python_out=./packages/autogen-core/tests/protos --mypy_out=./packages/autogen-core/tests/protos --mypy_grpc_out=./packages/autogen-core/tests/protos --proto_path ./packages/autogen-core/tests/protos serialization_test.proto" From a6612e683d0feeb9bcffb4e6b25739a262bbe530 Mon Sep 17 00:00:00 2001 From: afourney Date: Mon, 6 Jan 2025 16:54:14 -0800 Subject: [PATCH 04/61] Added tests for FileSurfer. (#4913) --- .../tests/test_filesurfer_agent.py | 147 +++++ python/uv.lock | 611 +++++++++--------- 2 files changed, 442 insertions(+), 316 deletions(-) create mode 100644 python/packages/autogen-ext/tests/test_filesurfer_agent.py diff --git a/python/packages/autogen-ext/tests/test_filesurfer_agent.py b/python/packages/autogen-ext/tests/test_filesurfer_agent.py new file mode 100644 index 000000000000..513a1fe8444c --- /dev/null +++ b/python/packages/autogen-ext/tests/test_filesurfer_agent.py @@ -0,0 +1,147 @@ +import asyncio +import json +import logging +import os +from datetime import datetime +from typing import Any, AsyncGenerator, List + +import aiofiles +import pytest +from autogen_agentchat import EVENT_LOGGER_NAME +from autogen_ext.agents.file_surfer import FileSurfer +from autogen_ext.models.openai import OpenAIChatCompletionClient +from openai.resources.chat.completions import AsyncCompletions +from openai.types.chat.chat_completion import ChatCompletion, Choice +from openai.types.chat.chat_completion_chunk import ChatCompletionChunk +from openai.types.chat.chat_completion_message import ChatCompletionMessage +from openai.types.chat.chat_completion_message_tool_call import ChatCompletionMessageToolCall, Function +from openai.types.completion_usage import CompletionUsage +from pydantic import BaseModel + + +class FileLogHandler(logging.Handler): + def __init__(self, filename: str) -> None: + super().__init__() + self.filename = filename + self.file_handler = logging.FileHandler(filename) + + def emit(self, record: logging.LogRecord) -> None: + ts = datetime.fromtimestamp(record.created).isoformat() + if isinstance(record.msg, BaseModel): + record.msg = json.dumps( + { + "timestamp": ts, + "message": record.msg.model_dump(), + "type": record.msg.__class__.__name__, + }, + ) + self.file_handler.emit(record) + + +class _MockChatCompletion: + def __init__(self, chat_completions: List[ChatCompletion]) -> None: + self._saved_chat_completions = chat_completions + self._curr_index = 0 + + async def mock_create( + self, *args: Any, **kwargs: Any + ) -> ChatCompletion | AsyncGenerator[ChatCompletionChunk, None]: + await asyncio.sleep(0.1) + completion = self._saved_chat_completions[self._curr_index] + self._curr_index += 1 + return completion + + +logger = logging.getLogger(EVENT_LOGGER_NAME) +logger.setLevel(logging.DEBUG) +logger.addHandler(FileLogHandler("test_filesurfer_agent.log")) + + +@pytest.mark.asyncio +async def test_run_filesurfer(monkeypatch: pytest.MonkeyPatch) -> None: + # Create a test file + test_file = os.path.abspath("test_filesurfer_agent.html") + async with aiofiles.open(test_file, "wt") as file: + await file.write(""" + + FileSurfer test file + + +

FileSurfer test H1

+

FileSurfer test body

+ +""") + + # Mock the API calls + model = "gpt-4o-2024-05-13" + chat_completions = [ + ChatCompletion( + id="id1", + choices=[ + Choice( + finish_reason="tool_calls", + index=0, + message=ChatCompletionMessage( + content=None, + tool_calls=[ + ChatCompletionMessageToolCall( + id="1", + type="function", + function=Function( + name="open_path", + arguments=json.dumps({"path": test_file}), + ), + ) + ], + role="assistant", + ), + ) + ], + created=0, + model=model, + object="chat.completion", + usage=CompletionUsage(prompt_tokens=10, completion_tokens=5, total_tokens=0), + ), + ChatCompletion( + id="id2", + choices=[ + Choice( + finish_reason="tool_calls", + index=0, + message=ChatCompletionMessage( + content=None, + tool_calls=[ + ChatCompletionMessageToolCall( + id="1", + type="function", + function=Function( + name="open_path", + arguments=json.dumps({"path": os.path.dirname(test_file)}), + ), + ) + ], + role="assistant", + ), + ) + ], + created=0, + model=model, + object="chat.completion", + usage=CompletionUsage(prompt_tokens=10, completion_tokens=5, total_tokens=0), + ), + ] + mock = _MockChatCompletion(chat_completions) + monkeypatch.setattr(AsyncCompletions, "create", mock.mock_create) + agent = FileSurfer( + "FileSurfer", + model_client=OpenAIChatCompletionClient(model=model, api_key=""), + ) + + # Get the FileSurfer to read the file, and the directory + assert agent._name == "FileSurfer" # pyright: ignore[reportPrivateUsage] + result = await agent.run(task="Please read the test file") + assert "# FileSurfer test H1" in result.messages[1].content + + result = await agent.run(task="Please read the test directory") + assert "# Index of " in result.messages[1].content + assert "test_filesurfer_agent.html" in result.messages[1].content diff --git a/python/uv.lock b/python/uv.lock index 067520cb3e8f..5df8962713cb 100644 --- a/python/uv.lock +++ b/python/uv.lock @@ -2,6 +2,7 @@ version = 1 requires-python = ">=3.10, <3.13" resolution-markers = [ "python_full_version < '3.11' and sys_platform == 'darwin'", + "python_version < '0'", "python_full_version < '3.11' and platform_machine == 'aarch64' and sys_platform == 'linux'", "(python_full_version < '3.11' and platform_machine != 'aarch64' and sys_platform == 'linux') or (python_full_version < '3.11' and sys_platform != 'darwin' and sys_platform != 'linux')", "python_full_version == '3.11.*' and sys_platform == 'darwin'", @@ -11,7 +12,6 @@ resolution-markers = [ "python_full_version >= '3.12' and python_full_version < '3.12.4' and platform_machine == 'aarch64' and sys_platform == 'linux'", "(python_full_version >= '3.12' and python_full_version < '3.12.4' and platform_machine != 'aarch64' and sys_platform == 'linux') or (python_full_version >= '3.12' and python_full_version < '3.12.4' and sys_platform != 'darwin' and sys_platform != 'linux')", "python_full_version >= '3.12.4' and sys_platform == 'darwin'", - "python_version < '0'", "python_full_version >= '3.12.4' and platform_machine == 'aarch64' and sys_platform == 'linux'", "(python_full_version >= '3.12.4' and platform_machine != 'aarch64' and sys_platform == 'linux') or (python_full_version >= '3.12.4' and sys_platform != 'darwin' and sys_platform != 'linux')", ] @@ -284,14 +284,11 @@ wheels = [ [[package]] name = "asttokens" -version = "2.4.1" +version = "3.0.0" source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "six" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/45/1d/f03bcb60c4a3212e15f99a56085d93093a497718adf828d050b9d675da81/asttokens-2.4.1.tar.gz", hash = "sha256:b03869718ba9a6eb027e134bfdf69f38a236d681c83c160d510768af11254ba0", size = 62284 } +sdist = { url = "https://files.pythonhosted.org/packages/4a/e7/82da0a03e7ba5141f05cce0d302e6eed121ae055e0456ca228bf693984bc/asttokens-3.0.0.tar.gz", hash = "sha256:0dcd8baa8d62b0c1d118b399b2ddba3c4aff271d0d7a9e0d4c1681c79035bbc7", size = 61978 } wheels = [ - { url = "https://files.pythonhosted.org/packages/45/86/4736ac618d82a20d87d2f92ae19441ebc7ac9e7a581d7e58bbe79233b24a/asttokens-2.4.1-py2.py3-none-any.whl", hash = "sha256:051ed49c3dcae8913ea7cd08e46a606dba30b79993209636c4875bc1d637bc24", size = 27764 }, + { url = "https://files.pythonhosted.org/packages/25/8a/c46dcc25341b5bce5472c718902eb3d38600a903b14fa6aeecef3f21a46f/asttokens-3.0.0-py3-none-any.whl", hash = "sha256:e3078351a059199dd5138cb1c706e6430c05eff2ff136af5eb4790f9d28932e2", size = 26918 }, ] [[package]] @@ -1089,23 +1086,23 @@ wheels = [ [[package]] name = "debugpy" -version = "1.8.7" +version = "1.8.11" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/6d/00/5a8b5dc8f52617c5e41845e26290ebea1ba06377cc08155b6d245c27b386/debugpy-1.8.7.zip", hash = "sha256:18b8f731ed3e2e1df8e9cdaa23fb1fc9c24e570cd0081625308ec51c82efe42e", size = 4957835 } +sdist = { url = "https://files.pythonhosted.org/packages/bc/e7/666f4c9b0e24796af50aadc28d36d21c2e01e831a934535f956e09b3650c/debugpy-1.8.11.tar.gz", hash = "sha256:6ad2688b69235c43b020e04fecccdf6a96c8943ca9c2fb340b8adc103c655e57", size = 1640124 } wheels = [ - { url = "https://files.pythonhosted.org/packages/46/50/1850a5a0cab6f65a21e452166ec60bac5f8a995184d17e18bb9dc3789c72/debugpy-1.8.7-cp310-cp310-macosx_14_0_x86_64.whl", hash = "sha256:95fe04a573b8b22896c404365e03f4eda0ce0ba135b7667a1e57bd079793b96b", size = 2090182 }, - { url = "https://files.pythonhosted.org/packages/87/51/ef4d5c55c06689b377678bdee870e3df8eb2a3d9cf0e618b4d7255413c8a/debugpy-1.8.7-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:628a11f4b295ffb4141d8242a9bb52b77ad4a63a2ad19217a93be0f77f2c28c9", size = 3547569 }, - { url = "https://files.pythonhosted.org/packages/eb/df/a4ea1f95022f93522b59b71ec42d6703abe3e0bee753070118816555fee9/debugpy-1.8.7-cp310-cp310-win32.whl", hash = "sha256:85ce9c1d0eebf622f86cc68618ad64bf66c4fc3197d88f74bb695a416837dd55", size = 5153144 }, - { url = "https://files.pythonhosted.org/packages/47/f7/912408b69e83659bd62fa29ebb7984efe81aed4f5e08bfe10e31a1dc3c3a/debugpy-1.8.7-cp310-cp310-win_amd64.whl", hash = "sha256:29e1571c276d643757ea126d014abda081eb5ea4c851628b33de0c2b6245b037", size = 5185605 }, - { url = "https://files.pythonhosted.org/packages/f6/0a/4a4516ef4c07891542cb25620085507cab3c6b23a42b5630c17788fff83e/debugpy-1.8.7-cp311-cp311-macosx_14_0_universal2.whl", hash = "sha256:caf528ff9e7308b74a1749c183d6808ffbedbb9fb6af78b033c28974d9b8831f", size = 2204794 }, - { url = "https://files.pythonhosted.org/packages/46/6f/2bb0bba20b8b74b7c341379dd99275cf6aa7722c1948fa99728716aad1b9/debugpy-1.8.7-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cba1d078cf2e1e0b8402e6bda528bf8fda7ccd158c3dba6c012b7897747c41a0", size = 3122160 }, - { url = "https://files.pythonhosted.org/packages/c0/ce/833351375cef971f0caa63fa82adf3f6949ad85410813026a4a436083a71/debugpy-1.8.7-cp311-cp311-win32.whl", hash = "sha256:171899588bcd412151e593bd40d9907133a7622cd6ecdbdb75f89d1551df13c2", size = 5078675 }, - { url = "https://files.pythonhosted.org/packages/7d/e1/e9ac2d546143a4defbaa2e609e173c912fb989cdfb5385c9771770a6bf5c/debugpy-1.8.7-cp311-cp311-win_amd64.whl", hash = "sha256:6e1c4ffb0c79f66e89dfd97944f335880f0d50ad29525dc792785384923e2211", size = 5102927 }, - { url = "https://files.pythonhosted.org/packages/59/4b/9f52ca1a799601a10cd2673503658bd8c8ecc4a7a43302ee29cf062474ec/debugpy-1.8.7-cp312-cp312-macosx_14_0_universal2.whl", hash = "sha256:4d27d842311353ede0ad572600c62e4bcd74f458ee01ab0dd3a1a4457e7e3706", size = 2529803 }, - { url = "https://files.pythonhosted.org/packages/80/79/8bba39190d2ea17840925d287f1c6c3a7c60b58f5090444e9ecf176c540f/debugpy-1.8.7-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:703c1fd62ae0356e194f3e7b7a92acd931f71fe81c4b3be2c17a7b8a4b546ec2", size = 4170911 }, - { url = "https://files.pythonhosted.org/packages/3b/19/5b3d312936db8eb281310fa27903459328ed722d845d594ba5feaeb2f0b3/debugpy-1.8.7-cp312-cp312-win32.whl", hash = "sha256:2f729228430ef191c1e4df72a75ac94e9bf77413ce5f3f900018712c9da0aaca", size = 5195476 }, - { url = "https://files.pythonhosted.org/packages/9f/49/ad20b29f8c921fd5124530d3d39b8f2077efd51b71339a2eff02bba693e9/debugpy-1.8.7-cp312-cp312-win_amd64.whl", hash = "sha256:45c30aaefb3e1975e8a0258f5bbd26cd40cde9bfe71e9e5a7ac82e79bad64e39", size = 5235031 }, - { url = "https://files.pythonhosted.org/packages/51/b1/a0866521c71a6ae3d3ca320e74835163a4671b1367ba360a55a0a51e5a91/debugpy-1.8.7-py2.py3-none-any.whl", hash = "sha256:57b00de1c8d2c84a61b90880f7e5b6deaf4c312ecbde3a0e8912f2a56c4ac9ae", size = 5210683 }, + { url = "https://files.pythonhosted.org/packages/26/e6/4cf7422eaa591b4c7d6a9fde224095dac25283fdd99d90164f28714242b0/debugpy-1.8.11-cp310-cp310-macosx_14_0_x86_64.whl", hash = "sha256:2b26fefc4e31ff85593d68b9022e35e8925714a10ab4858fb1b577a8a48cb8cd", size = 2075100 }, + { url = "https://files.pythonhosted.org/packages/83/3a/e163de1df5995d95760a4d748b02fbefb1c1bf19e915b664017c40435dbf/debugpy-1.8.11-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:61bc8b3b265e6949855300e84dc93d02d7a3a637f2aec6d382afd4ceb9120c9f", size = 3559724 }, + { url = "https://files.pythonhosted.org/packages/27/6c/327e19fd1bf428a1efe1a6f97b306689c54c2cebcf871b66674ead718756/debugpy-1.8.11-cp310-cp310-win32.whl", hash = "sha256:c928bbf47f65288574b78518449edaa46c82572d340e2750889bbf8cd92f3737", size = 5178068 }, + { url = "https://files.pythonhosted.org/packages/49/80/359ff8aa388f0bd4a48f0fa9ce3606396d576657ac149c6fba3cc7de8adb/debugpy-1.8.11-cp310-cp310-win_amd64.whl", hash = "sha256:8da1db4ca4f22583e834dcabdc7832e56fe16275253ee53ba66627b86e304da1", size = 5210109 }, + { url = "https://files.pythonhosted.org/packages/7c/58/8e3f7ec86c1b7985a232667b5df8f3b1b1c8401028d8f4d75e025c9556cd/debugpy-1.8.11-cp311-cp311-macosx_14_0_universal2.whl", hash = "sha256:85de8474ad53ad546ff1c7c7c89230db215b9b8a02754d41cb5a76f70d0be296", size = 2173656 }, + { url = "https://files.pythonhosted.org/packages/d2/03/95738a68ade2358e5a4d63a2fd8e7ed9ad911001cfabbbb33a7f81343945/debugpy-1.8.11-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8ffc382e4afa4aee367bf413f55ed17bd91b191dcaf979890af239dda435f2a1", size = 3132464 }, + { url = "https://files.pythonhosted.org/packages/ca/f4/18204891ab67300950615a6ad09b9de236203a9138f52b3b596fa17628ca/debugpy-1.8.11-cp311-cp311-win32.whl", hash = "sha256:40499a9979c55f72f4eb2fc38695419546b62594f8af194b879d2a18439c97a9", size = 5103637 }, + { url = "https://files.pythonhosted.org/packages/3b/90/3775e301cfa573b51eb8a108285681f43f5441dc4c3916feed9f386ef861/debugpy-1.8.11-cp311-cp311-win_amd64.whl", hash = "sha256:987bce16e86efa86f747d5151c54e91b3c1e36acc03ce1ddb50f9d09d16ded0e", size = 5127862 }, + { url = "https://files.pythonhosted.org/packages/c6/ae/2cf26f3111e9d94384d9c01e9d6170188b0aeda15b60a4ac6457f7c8a26f/debugpy-1.8.11-cp312-cp312-macosx_14_0_universal2.whl", hash = "sha256:84e511a7545d11683d32cdb8f809ef63fc17ea2a00455cc62d0a4dbb4ed1c308", size = 2498756 }, + { url = "https://files.pythonhosted.org/packages/b0/16/ec551789d547541a46831a19aa15c147741133da188e7e6acf77510545a7/debugpy-1.8.11-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ce291a5aca4985d82875d6779f61375e959208cdf09fcec40001e65fb0a54768", size = 4219136 }, + { url = "https://files.pythonhosted.org/packages/72/6f/b2b3ce673c55f882d27a6eb04a5f0c68bcad6b742ac08a86d8392ae58030/debugpy-1.8.11-cp312-cp312-win32.whl", hash = "sha256:28e45b3f827d3bf2592f3cf7ae63282e859f3259db44ed2b129093ca0ac7940b", size = 5224440 }, + { url = "https://files.pythonhosted.org/packages/77/09/b1f05be802c1caef5b3efc042fc6a7cadd13d8118b072afd04a9b9e91e06/debugpy-1.8.11-cp312-cp312-win_amd64.whl", hash = "sha256:44b1b8e6253bceada11f714acf4309ffb98bfa9ac55e4fce14f9e5d4484287a1", size = 5264578 }, + { url = "https://files.pythonhosted.org/packages/77/0a/d29a5aacf47b4383ed569b8478c02d59ee3a01ad91224d2cff8562410e43/debugpy-1.8.11-py2.py3-none-any.whl", hash = "sha256:0e22f846f4211383e6a416d04b4c13ed174d24cc5d43f5fd52e7821d0ebc8920", size = 5226874 }, ] [[package]] @@ -1222,11 +1219,11 @@ wheels = [ [[package]] name = "fastjsonschema" -version = "2.20.0" +version = "2.21.1" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/03/3f/3ad5e7be13b4b8b55f4477141885ab2364f65d5f6ad5f7a9daffd634d066/fastjsonschema-2.20.0.tar.gz", hash = "sha256:3d48fc5300ee96f5d116f10fe6f28d938e6008f59a6a025c2649475b87f76a23", size = 373056 } +sdist = { url = "https://files.pythonhosted.org/packages/8b/50/4b769ce1ac4071a1ef6d86b1a3fb56cdc3a37615e8c5519e1af96cdac366/fastjsonschema-2.21.1.tar.gz", hash = "sha256:794d4f0a58f848961ba16af7b9c85a3e88cd360df008c59aac6fc5ae9323b5d4", size = 373939 } wheels = [ - { url = "https://files.pythonhosted.org/packages/6d/ca/086311cdfc017ec964b2436fe0c98c1f4efcb7e4c328956a22456e497655/fastjsonschema-2.20.0-py3-none-any.whl", hash = "sha256:5875f0b0fa7a0043a91e93a9b8f793bcbbba9691e7fd83dca95c28ba26d21f0a", size = 23543 }, + { url = "https://files.pythonhosted.org/packages/90/2b/0817a2b257fe88725c25589d89aec060581aabf668707a8d03b2e9e0cb2a/fastjsonschema-2.21.1-py3-none-any.whl", hash = "sha256:c9e5b7e908310918cf494a434eeb31384dd84a98b57a30bcb1f535015b554667", size = 23924 }, ] [[package]] @@ -1273,6 +1270,15 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/b9/f8/feced7779d755758a52d1f6635d990b8d98dc0a29fa568bbe0625f18fdf3/filelock-3.16.1-py3-none-any.whl", hash = "sha256:2082e5703d51fbf98ea75855d9d5527e33d8ff23099bec374a134febee6946b0", size = 16163 }, ] +[[package]] +name = "filetype" +version = "1.2.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/bb/29/745f7d30d47fe0f251d3ad3dc2978a23141917661998763bebb6da007eb1/filetype-1.2.0.tar.gz", hash = "sha256:66b56cd6474bf41d8c54660347d37afcc3f7d1970648de365c102ef77548aadb", size = 998020 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/18/79/1b8fa1bb3568781e84c9200f951c735f3f157429f44be0495da55894d620/filetype-1.2.0-py2.py3-none-any.whl", hash = "sha256:7ce71b6880181241cf7ac8697a2f1eb6a8bd9b429f7ad6d27b8db9ba5f1c2d25", size = 19970 }, +] + [[package]] name = "frozenlist" version = "1.5.0" @@ -1588,7 +1594,7 @@ wheels = [ [[package]] name = "ipython" -version = "8.29.0" +version = "8.31.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "colorama", marker = "sys_platform == 'win32'" }, @@ -1603,21 +1609,21 @@ dependencies = [ { name = "traitlets" }, { name = "typing-extensions", marker = "python_full_version < '3.12'" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/85/e0/a3f36dde97e12121106807d80485423ae4c5b27ce60d40d4ab0bab18a9db/ipython-8.29.0.tar.gz", hash = "sha256:40b60e15b22591450eef73e40a027cf77bd652e757523eebc5bd7c7c498290eb", size = 5497513 } +sdist = { url = "https://files.pythonhosted.org/packages/01/35/6f90fdddff7a08b7b715fccbd2427b5212c9525cd043d26fdc45bee0708d/ipython-8.31.0.tar.gz", hash = "sha256:b6a2274606bec6166405ff05e54932ed6e5cfecaca1fc05f2cacde7bb074d70b", size = 5501011 } wheels = [ - { url = "https://files.pythonhosted.org/packages/c5/a5/c15ed187f1b3fac445bb42a2dedd8dec1eee1718b35129242049a13a962f/ipython-8.29.0-py3-none-any.whl", hash = "sha256:0188a1bd83267192123ccea7f4a8ed0a78910535dbaa3f37671dca76ebd429c8", size = 819911 }, + { url = "https://files.pythonhosted.org/packages/04/60/d0feb6b6d9fe4ab89fe8fe5b47cbf6cd936bfd9f1e7ffa9d0015425aeed6/ipython-8.31.0-py3-none-any.whl", hash = "sha256:46ec58f8d3d076a61d128fe517a51eb730e3aaf0c184ea8c17d16e366660c6a6", size = 821583 }, ] [[package]] name = "jedi" -version = "0.19.1" +version = "0.19.2" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "parso" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/d6/99/99b493cec4bf43176b678de30f81ed003fd6a647a301b9c927280c600f0a/jedi-0.19.1.tar.gz", hash = "sha256:cf0496f3651bc65d7174ac1b7d043eff454892c708a87d1b683e57b569927ffd", size = 1227821 } +sdist = { url = "https://files.pythonhosted.org/packages/72/3a/79a912fbd4d8dd6fbb02bf69afd3bb72cf0c729bb3063c6f4498603db17a/jedi-0.19.2.tar.gz", hash = "sha256:4770dc3de41bde3966b02eb84fbcf557fb33cce26ad23da12c742fb50ecb11f0", size = 1231287 } wheels = [ - { url = "https://files.pythonhosted.org/packages/20/9f/bc63f0f0737ad7a60800bfd472a4836661adae21f9c2535f3957b1e54ceb/jedi-0.19.1-py2.py3-none-any.whl", hash = "sha256:e983c654fe5c02867aef4cdfce5a2fbb4a50adc0af145f70504238f18ef5e7e0", size = 1569361 }, + { url = "https://files.pythonhosted.org/packages/c0/5a/9cac0c82afec3d09ccd97c8b6502d48f165f9124db81b4bcb90b4af974ee/jedi-0.19.2-py2.py3-none-any.whl", hash = "sha256:a8ef22bde8490f57fe5c7681a3c83cb58874daf72b4784de3cce5b6ef6edb5b9", size = 1572278 }, ] [[package]] @@ -1750,7 +1756,7 @@ wheels = [ [[package]] name = "jupyter-cache" -version = "1.0.0" +version = "1.0.1" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "attrs" }, @@ -1762,9 +1768,9 @@ dependencies = [ { name = "sqlalchemy" }, { name = "tabulate" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/1b/c1/1276395b634228946bca7da7cf410d133511d150524d2a60b06028a860fe/jupyter_cache-1.0.0.tar.gz", hash = "sha256:d0fa7d7533cd5798198d8889318269a8c1382ed3b22f622c09a9356521f48687", size = 31968 } +sdist = { url = "https://files.pythonhosted.org/packages/bb/f7/3627358075f183956e8c4974603232b03afd4ddc7baf72c2bc9fff522291/jupyter_cache-1.0.1.tar.gz", hash = "sha256:16e808eb19e3fb67a223db906e131ea6e01f03aa27f49a7214ce6a5fec186fb9", size = 32048 } wheels = [ - { url = "https://files.pythonhosted.org/packages/f8/2f/0bb8eacdd1102a20fecc759fb8ace695b9a1048563499a6dff8fa8da32a7/jupyter_cache-1.0.0-py3-none-any.whl", hash = "sha256:594b1c4e29b488b36547e12477645f489dbdc62cc939b2408df5679f79245078", size = 33875 }, + { url = "https://files.pythonhosted.org/packages/64/6b/67b87da9d36bff9df7d0efbd1a325fa372a43be7158effaf43ed7b22341d/jupyter_cache-1.0.1-py3-none-any.whl", hash = "sha256:9c3cafd825ba7da8b5830485343091143dff903e4d8c69db9349b728b140abf6", size = 33907 }, ] [[package]] @@ -1875,16 +1881,16 @@ wheels = [ [[package]] name = "langchain-openai" -version = "0.2.3" +version = "0.2.14" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "langchain-core" }, { name = "openai" }, { name = "tiktoken" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/41/31/82c8a33354dd0a59438973cfdfc771fde0df2c9fb8388e0c23dc36119959/langchain_openai-0.2.3.tar.gz", hash = "sha256:e142031704de1104735f503f76352c53b27ac0a2806466392993c4508c42bf0c", size = 42572 } +sdist = { url = "https://files.pythonhosted.org/packages/e5/fd/8256eba9a159f95a13c5bf7f1f49683de93b3876585b768e6be5dc3a5765/langchain_openai-0.2.14.tar.gz", hash = "sha256:7a514f309e356b182a337c0ed36ab3fbe34d9834a235a3b85cb7f91ae775d978", size = 43647 } wheels = [ - { url = "https://files.pythonhosted.org/packages/66/ea/dcc59d9b818a4d7f25d4d6b3018355a0e0243a351b1d4ef8b26ec107ee00/langchain_openai-0.2.3-py3-none-any.whl", hash = "sha256:f498c94817c980cb302439b95d3f3275cdf2743e022ee674692c75898523cf57", size = 49907 }, + { url = "https://files.pythonhosted.org/packages/ed/54/63c8264d7dbc3bf31ba61bf97740fdd76386b2d4f9a58f58afd3961ce7d7/langchain_openai-0.2.14-py3-none-any.whl", hash = "sha256:d232496662f79ece9a11caf7d798ba863e559c771bc366814f7688e0fe664fe8", size = 50876 }, ] [[package]] @@ -1901,43 +1907,42 @@ wheels = [ [[package]] name = "langgraph" -version = "0.2.39" +version = "0.2.61" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "langchain-core" }, { name = "langgraph-checkpoint" }, { name = "langgraph-sdk" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/2c/ba/eb6da29e5c4608191c00cb9ad1dbfb0686a02441ff52f16c216e6bd2d823/langgraph-0.2.39.tar.gz", hash = "sha256:32af60291f9260c3acb8a3d4bec99e32abd89ddb6b4a10a79aa3dbc90fa920ac", size = 94566 } +sdist = { url = "https://files.pythonhosted.org/packages/34/9b/4138fb378cdca747c719015066303532e8be597318d29f7e1b75d6803578/langgraph-0.2.61.tar.gz", hash = "sha256:355aa6b6b3c19505e8b3c5f7bd813ffd59130d48e6aa017c0edf08273f4f42c2", size = 118141 } wheels = [ - { url = "https://files.pythonhosted.org/packages/72/67/ffad3820b879aa7f6f6956d4499d59017e51e944addaf6121dd3fc06eb21/langgraph-0.2.39-py3-none-any.whl", hash = "sha256:5dfbdeefbf599f16d245799609f2b43c1ec7a7e8ed6e1d7981b1a7979a4ad7fe", size = 113522 }, + { url = "https://files.pythonhosted.org/packages/c5/d8/724cafff83ea749e6eb0ae48103324675e9308a84c4153947028ea76c8e7/langgraph-0.2.61-py3-none-any.whl", hash = "sha256:615f8bd345bf3a6ac3374ce7b4ecc96549301e598eb0f3e933c73b96af229961", size = 137204 }, ] [[package]] name = "langgraph-checkpoint" -version = "2.0.2" +version = "2.0.9" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "langchain-core" }, { name = "msgpack" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/22/49/8596e8f51299fd035515704da305b77965f937cb2175277a7740dde7e492/langgraph_checkpoint-2.0.2.tar.gz", hash = "sha256:c1d033e4e4855f580fa56830327eb86513b64ab5be527245363498e76b19a0b9", size = 20362 } +sdist = { url = "https://files.pythonhosted.org/packages/61/64/ac32516240491107d43b62f83313b320a4d655314badb4ef3f35efc38025/langgraph_checkpoint-2.0.9.tar.gz", hash = "sha256:43847d7e385a2d9d2b684155920998e44ed42d2d1780719e4f6111fe3d6db84c", size = 33299 } wheels = [ - { url = "https://files.pythonhosted.org/packages/d7/8e/78872f64d4b37dddc5ab06ad214a74dab4d56140b09169ae86881392328c/langgraph_checkpoint-2.0.2-py3-none-any.whl", hash = "sha256:6e5dfd90e1fc71b91ccff75939ada1114e5d7f824df5f24c62d39bed69039ee2", size = 23349 }, + { url = "https://files.pythonhosted.org/packages/d8/63/b2ecb322ffc978e6bcf27e3786a0efa3142c57d58daeb4e4397196117030/langgraph_checkpoint-2.0.9-py3-none-any.whl", hash = "sha256:b546ed6129929b8941ac08af6ce5cd26c8ebe1d25883d3c48638d34ade91ce42", size = 37318 }, ] [[package]] name = "langgraph-sdk" -version = "0.1.34" +version = "0.1.48" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "httpx" }, - { name = "httpx-sse" }, { name = "orjson" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/9a/2c/6d0afafac86b3c59726c2b845b392147160f1647221ddbc4f8e648dc8939/langgraph_sdk-0.1.34.tar.gz", hash = "sha256:ee76507018414a08bcf63e0de916e956340ee2e9b5c60d5252d1b2b1fe47c5f3", size = 27625 } +sdist = { url = "https://files.pythonhosted.org/packages/d7/dc/86fd8e3f7f777c7ebfcbcec4bc433a0d4ebc6135e6638d7d856eed60475d/langgraph_sdk-0.1.48.tar.gz", hash = "sha256:a14ddfc0ea444b33fb5a7357ca0359d2d77961b61244a028dd952658d4d6bc92", size = 40536 } wheels = [ - { url = "https://files.pythonhosted.org/packages/9b/00/454f94ab2754392f5cb78bf8f8ea005ae1cfef17d257103126d19e3968c7/langgraph_sdk-0.1.34-py3-none-any.whl", hash = "sha256:3c44967382e073055c1731d9dde004a49ca04a063183747031b8a8286bad0b19", size = 28439 }, + { url = "https://files.pythonhosted.org/packages/b3/88/95ca5e3ca12659c2d2c26d64c5e481f1fca28b3053e15f5f0aafb3cc5244/langgraph_sdk-0.1.48-py3-none-any.whl", hash = "sha256:2c6c01d48e8eddff7e9688bdbda943ddb107831972667eb08a0c30e3a937fa11", size = 43654 }, ] [[package]] @@ -1970,20 +1975,21 @@ wheels = [ [[package]] name = "llama-cloud" -version = "0.1.4" +version = "0.1.7" source = { registry = "https://pypi.org/simple" } dependencies = [ + { name = "certifi" }, { name = "httpx" }, { name = "pydantic" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/b4/da/4d98e8b07356722377c9921ca0ae2ebb91403dfa46f1520c1282c4c562b6/llama_cloud-0.1.4.tar.gz", hash = "sha256:6f0155979bd96160951cb812c48836f1face037bc79ccfd8d185b18ef4c9faf8", size = 65003 } +sdist = { url = "https://files.pythonhosted.org/packages/cb/7f/aba0fadc436a2d8c00116795f2d69317b1a09331eb2b44753c169828a1a7/llama_cloud-0.1.7.tar.gz", hash = "sha256:7c1767cb209905400e894566661a91230bcff83cd4d9c08e782fd2143ca6a646", size = 88868 } wheels = [ - { url = "https://files.pythonhosted.org/packages/e2/c8/550908552364cf77c835f1027c619fc37a12256c896348cce5a71dabcf5e/llama_cloud-0.1.4-py3-none-any.whl", hash = "sha256:cfca6c4e0a87468b922d732f0f313a2ecd3a8e0bf74382ee80829ce49dcbc5e0", size = 176822 }, + { url = "https://files.pythonhosted.org/packages/48/24/5f8064c878188bcd93c8948cefbb8924dceb4ec1bc0977048597c621cfa2/llama_cloud-0.1.7-py3-none-any.whl", hash = "sha256:266db22939c537a2b802eea6a9af2701beff98d5ba46513248011a4f1c17afc6", size = 242090 }, ] [[package]] name = "llama-index" -version = "0.11.20" +version = "0.12.9" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "llama-index-agent-openai" }, @@ -1991,7 +1997,6 @@ dependencies = [ { name = "llama-index-core" }, { name = "llama-index-embeddings-openai" }, { name = "llama-index-indices-managed-llama-cloud" }, - { name = "llama-index-legacy" }, { name = "llama-index-llms-openai" }, { name = "llama-index-multi-modal-llms-openai" }, { name = "llama-index-program-openai" }, @@ -2000,48 +2005,49 @@ dependencies = [ { name = "llama-index-readers-llama-parse" }, { name = "nltk" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/c1/37/b97d994212393f302b87f41783615bc38ce054c702829c6962ff8c1de8c4/llama_index-0.11.20.tar.gz", hash = "sha256:5e8e3fcb5af5b4e4525498b075ff0a54160b00bf0fc0b83801fc7faf1c8a8c1d", size = 7785 } +sdist = { url = "https://files.pythonhosted.org/packages/d6/94/0ec5ebe733886cf481b23d3e425479e6c0c9850c409b2244285f3ae4a6eb/llama_index-0.12.9.tar.gz", hash = "sha256:2f8d671e6ca7e5b33b0f5cbddef8c0a11eb1e39781f1be65e9bd0c4a7a0deb5b", size = 7824 } wheels = [ - { url = "https://files.pythonhosted.org/packages/63/c4/2ea55ee0dba1b86cfaaa54cf0311294714ce12309db389b50bf8c2ecd2ee/llama_index-0.11.20-py3-none-any.whl", hash = "sha256:fc9e5e47e6da3610bc3b788d208bb782c03a342fd71e3b22b37abc83ecebe46e", size = 6819 }, + { url = "https://files.pythonhosted.org/packages/7c/b0/6b726880becc72f94526422f1546f67c80f592c62e1106b49eb99b7f4b21/llama_index-0.12.9-py3-none-any.whl", hash = "sha256:95c39d8055c7d19bd5f099560b53c0971ae9997ebe46f7438766189ed48e4456", size = 6790 }, ] [[package]] name = "llama-index-agent-openai" -version = "0.3.4" +version = "0.4.1" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "llama-index-core" }, { name = "llama-index-llms-openai" }, { name = "openai" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/ef/b2/95121d8ea4da363dabdacca7b340dfae4353b099a6b6b910de948f9684af/llama_index_agent_openai-0.3.4.tar.gz", hash = "sha256:80e3408d97121bebca3fa3ffd14b51285870c1c3c73d4ee04d3d18cfe6040466", size = 10401 } +sdist = { url = "https://files.pythonhosted.org/packages/a1/ea/01144dab025f85f8ff5a3c1ad92dbcd8e48e7d85218c98b486af6a4a3814/llama_index_agent_openai-0.4.1.tar.gz", hash = "sha256:3a89137b228a6e9c2b3f46e367a27b75fb31b458e21777bba819de654707d59e", size = 10595 } wheels = [ - { url = "https://files.pythonhosted.org/packages/9d/69/69857756c139897f209a2c372380509f718fb147170e2f2287cf4d77314a/llama_index_agent_openai-0.3.4-py3-none-any.whl", hash = "sha256:3720ce9bb12417a99a3fe84e52cce23e762b13f88a2dfc4292c76f4df9b26b4a", size = 13036 }, + { url = "https://files.pythonhosted.org/packages/09/28/1f8742ad23d993720eade463b5f3c0f00d643ff03c1a01596e45ae5a16df/llama_index_agent_openai-0.4.1-py3-none-any.whl", hash = "sha256:162507543082f739a8c806911344c8d7f2434d0ee91124cfdd7b0ba5f76d0e57", size = 13184 }, ] [[package]] name = "llama-index-cli" -version = "0.3.1" +version = "0.4.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "llama-index-core" }, { name = "llama-index-embeddings-openai" }, { name = "llama-index-llms-openai" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/5a/d2/5894ccc0f86c4e95d557c8c7ef0c15d19c67e0ad3d4628247684350c7363/llama_index_cli-0.3.1.tar.gz", hash = "sha256:1890dd687cf440f3651365a549e303363162c167b8efbd87a3aa10058d6d5c77", size = 24450 } +sdist = { url = "https://files.pythonhosted.org/packages/0a/52/81e1448d4dcff5beb1453f397f34f9ac769b7fcdb6b7c8fbd4c20b73e836/llama_index_cli-0.4.0.tar.gz", hash = "sha256:d6ab201359962a8a34368aeda3a49bbbe67e9e009c59bd925c4fb2be4ace3906", size = 24710 } wheels = [ - { url = "https://files.pythonhosted.org/packages/28/58/fb9d85d8f29d7379e953caf50278e095d302231a508d3e46dafd3a4bea1e/llama_index_cli-0.3.1-py3-none-any.whl", hash = "sha256:2111fbb6973f5b1eabce0d6cca3986499f0f2f625b13d7f48269a49c64c027d4", size = 27767 }, + { url = "https://files.pythonhosted.org/packages/70/29/2b659e5930ea44253bf99e2afc395daaa2a3edaa579d99e63ea53df03313/llama_index_cli-0.4.0-py3-none-any.whl", hash = "sha256:60d12f89e6b85e80a0cc3a8b531f05a911b5eebaebc37314411476d1ba685904", size = 27785 }, ] [[package]] name = "llama-index-core" -version = "0.11.20" +version = "0.12.10.post1" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "aiohttp" }, { name = "dataclasses-json" }, { name = "deprecated" }, { name = "dirtyjson" }, + { name = "filetype" }, { name = "fsspec" }, { name = "httpx" }, { name = "nest-asyncio" }, @@ -2060,83 +2066,54 @@ dependencies = [ { name = "typing-inspect" }, { name = "wrapt" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/07/68/22bb16497be9556322f78f001742f0d3e8e847b007c5896c1da09dc2b27c/llama_index_core-0.11.20.tar.gz", hash = "sha256:6b5eaaf4be5030808b9ba953e8f7aead7ba495b8e72ba0a81dfc7dda96be416f", size = 1323570 } +sdist = { url = "https://files.pythonhosted.org/packages/f9/0b/945ed217995e1ab685eba9a35b530d50b9f5853af88446acaff0ce4f89bb/llama_index_core-0.12.10.post1.tar.gz", hash = "sha256:af27bea4d1494ba84983a649976e60e3de677a73946aa45ed12ce27e3a623ddf", size = 1330881 } wheels = [ - { url = "https://files.pythonhosted.org/packages/fc/ee/3bd7a6037d90c50e8b5600a1d975e7a70e309262952e0097ef74d015c173/llama_index_core-0.11.20-py3-none-any.whl", hash = "sha256:e84daf45e90e4b5d9e135baf40ab9853a1c3169a1076af6d58739d098e70adb1", size = 1574327 }, + { url = "https://files.pythonhosted.org/packages/3f/04/e2bad3ebef965dd0650544cdd869befe63fd02b275a0867f04e15964a0a9/llama_index_core-0.12.10.post1-py3-none-any.whl", hash = "sha256:897e8cd4efeff6842580b043bdf4008ac60f693df1de2bfd975307a4845707c2", size = 1583668 }, ] [[package]] name = "llama-index-embeddings-azure-openai" -version = "0.2.5" +version = "0.3.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "llama-index-core" }, { name = "llama-index-embeddings-openai" }, { name = "llama-index-llms-azure-openai" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/b2/b9/fa9c729c001e062069d3a20b7f03a577187eeb0fb4272fb32fcde59e7bba/llama_index_embeddings_azure_openai-0.2.5.tar.gz", hash = "sha256:d8b2e3134c2b3510214f2260e6c17be18396d0c765f3edd6c3ffe6109528aed0", size = 3053 } +sdist = { url = "https://files.pythonhosted.org/packages/48/db/a35c34ff7863315ac133b4ff0386913cbe9986988e7f1c076e1745dbe015/llama_index_embeddings_azure_openai-0.3.0.tar.gz", hash = "sha256:80b0cf977d8b967a08536d65b8e2d0c6c966eeaf1b8fff084e97f3081fd70c34", size = 3111 } wheels = [ - { url = "https://files.pythonhosted.org/packages/6e/aa/73aafa3bb97d2ba62a5af1ec86b1c5fcb0619ee16101e765e734c4eebf7e/llama_index_embeddings_azure_openai-0.2.5-py3-none-any.whl", hash = "sha256:e3384002618d027c3d188134e7fe09ffb16029202db6b3e6955a9f1f6d591a3e", size = 3430 }, + { url = "https://files.pythonhosted.org/packages/b5/78/eb22765325d03008dae55f98c77053231b9344d2bef6304f3d93121f3468/llama_index_embeddings_azure_openai-0.3.0-py3-none-any.whl", hash = "sha256:2ca61d6b75468d1230cfc1151a878d892b237130b8af09b4434f8c0466d44dfe", size = 3425 }, ] [[package]] name = "llama-index-embeddings-openai" -version = "0.2.5" +version = "0.3.1" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "llama-index-core" }, { name = "openai" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/85/06/35969946f229212c17133ca5aa446824381e309141f8ae952d0d40bfa8f5/llama_index_embeddings_openai-0.2.5.tar.gz", hash = "sha256:0047dd71d747068645ed728c29312aa91b65bbe4c6142180034c64dfc5c6f6e8", size = 5395 } +sdist = { url = "https://files.pythonhosted.org/packages/a1/02/a2604ef3a167131fdd701888f45f16c8efa6d523d02efe8c4e640238f4ea/llama_index_embeddings_openai-0.3.1.tar.gz", hash = "sha256:1368aad3ce24cbaed23d5ad251343cef1eb7b4a06d6563d6606d59cb347fef20", size = 5492 } wheels = [ - { url = "https://files.pythonhosted.org/packages/a4/4e/2cabf16c4ef7dda74c233d14d017ba57e933c4dea8a9807b90d145177e88/llama_index_embeddings_openai-0.2.5-py3-none-any.whl", hash = "sha256:823c8311e556349ba19dda408a64a314fa3dafe0e5759709c54d33a0269aa6ba", size = 6089 }, + { url = "https://files.pythonhosted.org/packages/bb/45/ca55b91c4ac1b6251d4099fa44121a6c012129822906cadcc27b8cfb33a4/llama_index_embeddings_openai-0.3.1-py3-none-any.whl", hash = "sha256:f15a3d13da9b6b21b8bd51d337197879a453d1605e625a1c6d45e741756c0290", size = 6177 }, ] [[package]] name = "llama-index-indices-managed-llama-cloud" -version = "0.4.0" +version = "0.6.3" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "llama-cloud" }, { name = "llama-index-core" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/cf/82/7597547f339209d2abbf17717bea6208f9b380427bd765aee460403a576d/llama_index_indices_managed_llama_cloud-0.4.0.tar.gz", hash = "sha256:fbebff7876a219b6ab96892ae7c432a9299195fab8f67d4a4a0ebf6da210b242", size = 9800 } +sdist = { url = "https://files.pythonhosted.org/packages/ce/58/29afa6086e2080ae27da79949e319f77f08cb7d1b2bd26e56a676dab1338/llama_index_indices_managed_llama_cloud-0.6.3.tar.gz", hash = "sha256:f09e4182cbc2a2bd75ae85cebb1681075247f0d91b931b094cac4315386ce87a", size = 10483 } wheels = [ - { url = "https://files.pythonhosted.org/packages/a8/bf/3c1986159e047306ebdfb32555ef667fe8305ef6ab772f0624ada7537440/llama_index_indices_managed_llama_cloud-0.4.0-py3-none-any.whl", hash = "sha256:c2c54821f1bf17a7810e6c013fbe7ddfef4154b7e5b100f7bf8673098f8004e4", size = 10365 }, -] - -[[package]] -name = "llama-index-legacy" -version = "0.9.48.post3" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "aiohttp" }, - { name = "dataclasses-json" }, - { name = "deprecated" }, - { name = "dirtyjson" }, - { name = "fsspec" }, - { name = "httpx" }, - { name = "nest-asyncio" }, - { name = "networkx" }, - { name = "nltk" }, - { name = "numpy" }, - { name = "openai" }, - { name = "pandas" }, - { name = "requests" }, - { name = "sqlalchemy", extra = ["asyncio"] }, - { name = "tenacity" }, - { name = "tiktoken" }, - { name = "typing-extensions" }, - { name = "typing-inspect" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/35/93/b07dbbee6d1e57dc389d8e66e549dad9205cf569cf7f78fa8ef839598711/llama_index_legacy-0.9.48.post3.tar.gz", hash = "sha256:f6969f1085efb0abebd6367e46f3512020f3f6b9c086f458a519830dd61e8206", size = 781593 } -wheels = [ - { url = "https://files.pythonhosted.org/packages/84/40/f8e42a3c569031f888c46bcdf12b79736d8728c77c4290ed348feaba79ea/llama_index_legacy-0.9.48.post3-py3-none-any.whl", hash = "sha256:04221320d84d96ba9ee3e21e5055bd8527cbd769e8f1c60cf0368ed907e012a2", size = 1200711 }, + { url = "https://files.pythonhosted.org/packages/43/c6/ebb53a15e63c8da3633a595f53fc965509e8c6707da6a8b1bfa9b7923236/llama_index_indices_managed_llama_cloud-0.6.3-py3-none-any.whl", hash = "sha256:7f125602f624a2d321b6a4130cd98df35eb8c15818a159390755b2c13068f4ce", size = 11077 }, ] [[package]] name = "llama-index-llms-azure-openai" -version = "0.2.2" +version = "0.3.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "azure-identity" }, @@ -2144,68 +2121,68 @@ dependencies = [ { name = "llama-index-core" }, { name = "llama-index-llms-openai" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/ce/f4/6659a0b4e4cf3c47f6ebfe8e7dcbc035d046cacf8050d0b340d0e116ddf6/llama_index_llms_azure_openai-0.2.2.tar.gz", hash = "sha256:717bc3bf858e800d66e4f2ddec85a2e7dd503006d55981053d08e98771ec3abc", size = 5466 } +sdist = { url = "https://files.pythonhosted.org/packages/81/d7/21264774d0e0819d869ac2f6527fd6b405340647feb4fef7b6b59c520858/llama_index_llms_azure_openai-0.3.0.tar.gz", hash = "sha256:0feea9319d832c8b5e8e0f397c905e45df54c529b6a778825adcd0d254bd7d63", size = 5557 } wheels = [ - { url = "https://files.pythonhosted.org/packages/58/91/44a6d7c546e8b23be76743768b815a36f27770434108a69b1d08f6884abc/llama_index_llms_azure_openai-0.2.2-py3-none-any.whl", hash = "sha256:c8a7d04a111ceff0b4335dc9273fbdb37fdb5095b6234190ca727736f6466d7b", size = 6306 }, + { url = "https://files.pythonhosted.org/packages/90/49/a90c17bddddb411e0bc2d05bcf393fb03474279fb6fbe20c98db68473d98/llama_index_llms_azure_openai-0.3.0-py3-none-any.whl", hash = "sha256:24091aedf7ba24a7b217d17c4358e62b5d6b43a4d3ca44750d442b02a440d26e", size = 6306 }, ] [[package]] name = "llama-index-llms-openai" -version = "0.2.16" +version = "0.3.12" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "llama-index-core" }, { name = "openai" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/ba/e7/46f16e0f3ad25f49a050f1421a20b738ec312a5003bd07d749095eedb235/llama_index_llms_openai-0.2.16.tar.gz", hash = "sha256:7c666dd27056c278a079ff45d53f1fbfc8ed363764aa7baeee2e03df47f9072a", size = 13437 } +sdist = { url = "https://files.pythonhosted.org/packages/37/f2/4f78b82d93613310800dafb1ff5adf9901f18838eda7375cb07053463ced/llama_index_llms_openai-0.3.12.tar.gz", hash = "sha256:1880273a7e409c05f1dbccdbac5ce3c214771901cd3696aeb556a29dfed8477a", size = 14298 } wheels = [ - { url = "https://files.pythonhosted.org/packages/3b/49/bae3a019eba473a0b9bf21ad911786f86941e86dd0dac3c3e909352eaf54/llama_index_llms_openai-0.2.16-py3-none-any.whl", hash = "sha256:413466acbb894bd81f8dab2037f595e92392d869eec6d8274a16d43123cac8b6", size = 13623 }, + { url = "https://files.pythonhosted.org/packages/d4/84/4678cfbd2e3f8460823c2f6108a7a93312a8288ce328b262a49f327df133/llama_index_llms_openai-0.3.12-py3-none-any.whl", hash = "sha256:08be76b9e649f6085e93292504074728a6531eb7f8930eaf40a2fce70a9f59df", size = 14541 }, ] [[package]] name = "llama-index-multi-modal-llms-openai" -version = "0.2.3" +version = "0.4.2" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "llama-index-core" }, { name = "llama-index-llms-openai" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/03/26/298362f1c9531c637b46466847d8aad967aac3b8561c8a0dc859921f6feb/llama_index_multi_modal_llms_openai-0.2.3.tar.gz", hash = "sha256:8eb9b7f1ff3956ef0979e21bc83e6a885e40987b7199f195e46525d06e3ae402", size = 5098 } +sdist = { url = "https://files.pythonhosted.org/packages/eb/32/6f13d3cb79d71504072041d2e83fa67804c7945d2249f7ccadbcbbe15fdc/llama_index_multi_modal_llms_openai-0.4.2.tar.gz", hash = "sha256:3437a08cec85cebbc212aa73da5c9b8b054b4dc628338568435a7df88489476f", size = 5078 } wheels = [ - { url = "https://files.pythonhosted.org/packages/c6/e2/3e2b639880baf5fd5ca0f88abd68719d2ed7af4d5076698cb5aff612505c/llama_index_multi_modal_llms_openai-0.2.3-py3-none-any.whl", hash = "sha256:96b36beb2c3fca4faca80c59ecf7c6c6629ecdb96c288ef89777b592ec43f872", size = 5886 }, + { url = "https://files.pythonhosted.org/packages/05/18/14772cebd9674772bc605632c92d4675e86d87a3263c35a90865d6c4918b/llama_index_multi_modal_llms_openai-0.4.2-py3-none-any.whl", hash = "sha256:093f60f59fc423abab110810f8f129b96b0212b9737d74480f0e3e1b715e975b", size = 5855 }, ] [[package]] name = "llama-index-program-openai" -version = "0.2.0" +version = "0.3.1" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "llama-index-agent-openai" }, { name = "llama-index-core" }, { name = "llama-index-llms-openai" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/c5/c2/7a136b2f60c281767399149d6bec03f0df7a8a8455ead5da438745236d37/llama_index_program_openai-0.2.0.tar.gz", hash = "sha256:4139935541c011257fbfeb9662b3bf1237b729ef4b1c8f4ddf5b6789d2374ac4", size = 4870 } +sdist = { url = "https://files.pythonhosted.org/packages/7a/b8/24f1103106bfeed04f0e33b587863345c2d7fad001828bb02844a5427fbc/llama_index_program_openai-0.3.1.tar.gz", hash = "sha256:6039a6cdbff62c6388c07e82a157fe2edd3bbef0c5adf292ad8546bf4ec75b82", size = 4818 } wheels = [ - { url = "https://files.pythonhosted.org/packages/1e/67/45422d24aad29191f3a9eb621afa0feb491653f1cb012d51083824d36c7b/llama_index_program_openai-0.2.0-py3-none-any.whl", hash = "sha256:2e10d0c8f21af2e9443eb79e81bb31e7b73835b7c7bbd7ddf20e0a9c846cd368", size = 5295 }, + { url = "https://files.pythonhosted.org/packages/00/59/3f31171c30a08c8ba21155d5241ba174630e57cf43b03d97fd77bf565b51/llama_index_program_openai-0.3.1-py3-none-any.whl", hash = "sha256:93646937395dc5318fd095153d2f91bd632b25215d013d14a87c088887d205f9", size = 5318 }, ] [[package]] name = "llama-index-question-gen-openai" -version = "0.2.0" +version = "0.3.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "llama-index-core" }, { name = "llama-index-llms-openai" }, { name = "llama-index-program-openai" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/96/20/fca16fb6914c2b94b79a1a6191698c489a2a11ade056fac3215624c3242b/llama_index_question_gen_openai-0.2.0.tar.gz", hash = "sha256:3dde1cecbd651000639c20031d7ea23334276aabb181cac40ff424f35e10465e", size = 2605 } +sdist = { url = "https://files.pythonhosted.org/packages/4e/47/c57392e2fb00c0f596f912e7977e3c639ac3314f2aed5d4ac733baa367f1/llama_index_question_gen_openai-0.3.0.tar.gz", hash = "sha256:efd3b468232808e9d3474670aaeab00e41b90f75f52d0c9bfbf11207e0963d62", size = 2608 } wheels = [ - { url = "https://files.pythonhosted.org/packages/35/97/64691f3b3a5be2bea75102b189818276c7b2ddf688050e387954b176623a/llama_index_question_gen_openai-0.2.0-py3-none-any.whl", hash = "sha256:a16e68fc5434e9a793f1dfd0cc0354ee19afd167f1d499403b0085b11c5406c0", size = 2902 }, + { url = "https://files.pythonhosted.org/packages/7c/2c/765b0dfc2c988bbea267e236c836d7a96c60a20df76d842e43e17401f800/llama_index_question_gen_openai-0.3.0-py3-none-any.whl", hash = "sha256:9b60ec114273a63b50349948666e5744a8f58acb645824e07c979041e8fec598", size = 2899 }, ] [[package]] name = "llama-index-readers-file" -version = "0.2.2" +version = "0.4.2" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "beautifulsoup4" }, @@ -2214,27 +2191,27 @@ dependencies = [ { name = "pypdf" }, { name = "striprtf" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/77/3b/e5b9fdef6f773aa0ba42cc2ced42f107412fd32bead6e938acb2702e9a9e/llama_index_readers_file-0.2.2.tar.gz", hash = "sha256:48459f90960b863737147b66ed83afec9ce8984f8eda2561b6d2500214365db2", size = 21936 } +sdist = { url = "https://files.pythonhosted.org/packages/a6/40/787b1cdfac40985c7c6627f0475171bf485cb93de1ff31fa75c724c05e05/llama_index_readers_file-0.4.2.tar.gz", hash = "sha256:d677a2eef0695d00b487ac4ea14c82e6a4eaade3a09c540f8f81626d852e3491", size = 22030 } wheels = [ - { url = "https://files.pythonhosted.org/packages/ad/0c/9cb1a0cd5005a222502995f7fe804c3e03dfe1ef7c7e97da2237f4e26fef/llama_index_readers_file-0.2.2-py3-none-any.whl", hash = "sha256:ffec878771c1e7575afb742887561059bcca77b97a81c1c1be310ebb73f10f46", size = 38887 }, + { url = "https://files.pythonhosted.org/packages/a6/dd/6635bd90f6875e6d165efd5caccbe10a976d971732f1826698e28bdec91b/llama_index_readers_file-0.4.2-py3-none-any.whl", hash = "sha256:9341ff375aae3ab58256af4fc7c6619e08b04a1e78bc5c9d3d1763df3b9223a6", size = 38904 }, ] [[package]] name = "llama-index-readers-llama-parse" -version = "0.3.0" +version = "0.4.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "llama-index-core" }, { name = "llama-parse" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/04/33/dba0313ac42ca5082e2931a6d15ebfd2e0ffb34390da199639ef6ff378e3/llama_index_readers_llama_parse-0.3.0.tar.gz", hash = "sha256:a5feada0895714dcc41d65dd512c1c38cf70d8ae19947cff82b80d58e6aa367e", size = 2471 } +sdist = { url = "https://files.pythonhosted.org/packages/35/30/4611821286f82ba7b5842295607baa876262db86f88b87d83595eed172bf/llama_index_readers_llama_parse-0.4.0.tar.gz", hash = "sha256:e99ec56f4f8546d7fda1a7c1ae26162fb9acb7ebcac343b5abdb4234b4644e0f", size = 2472 } wheels = [ - { url = "https://files.pythonhosted.org/packages/49/b2/174bb131b767f9873b9f95b6c216043ccde4cfbeb3bcaf01fa23594f810a/llama_index_readers_llama_parse-0.3.0-py3-none-any.whl", hash = "sha256:1973cc710dbd5e110c7500c9983ecb45787ad1ff92e6b2113f94a57cf48f3038", size = 2474 }, + { url = "https://files.pythonhosted.org/packages/68/4f/e30d4257fe9e4224f5612b77fe99aaceddae411b2e74ca30534491de3e6f/llama_index_readers_llama_parse-0.4.0-py3-none-any.whl", hash = "sha256:574e48386f28d2c86c3f961ca4a4906910312f3400dd0c53014465bfbc6b32bf", size = 2472 }, ] [[package]] name = "llama-index-readers-web" -version = "0.2.4" +version = "0.3.3" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "aiohttp" }, @@ -2249,47 +2226,48 @@ dependencies = [ { name = "spider-client" }, { name = "urllib3" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/21/3b/cffc65023c5c19062d64438e7bdfbbdf64663603f45bf21e41d372d18e68/llama_index_readers_web-0.2.4.tar.gz", hash = "sha256:7fce3a98c3b3f7621a69161d92677abc69d535a8dd7a43a2411f8e369b0b741e", size = 53837 } +sdist = { url = "https://files.pythonhosted.org/packages/89/a5/14e4277c871194092e014fd893f8f17a7c84f447a254696d7985ab9e603c/llama_index_readers_web-0.3.3.tar.gz", hash = "sha256:740373b17456cc46a9b39810253a3c1adfc8814d40f88798bea42115a10626ce", size = 53969 } wheels = [ - { url = "https://files.pythonhosted.org/packages/ac/62/fcc840717af6760805739fe3f1c37e070fa321de1e142ecffb1d75784a71/llama_index_readers_web-0.2.4-py3-none-any.whl", hash = "sha256:02b13fa546aad5472bffdfc57fb9d074631b68406ebc908bf0bdec06daf7c90e", size = 76427 }, + { url = "https://files.pythonhosted.org/packages/98/40/6e290cac34ac217b47347c7b0268114aef1f0c836998647d0c9f0fabc8f0/llama_index_readers_web-0.3.3-py3-none-any.whl", hash = "sha256:ab166bb14a56f5b10b637d6633c861d86bcaa72e7e123c4e31d304e6b1d88efe", size = 76616 }, ] [[package]] name = "llama-index-readers-wikipedia" -version = "0.2.0" +version = "0.3.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "llama-index-core" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/e2/d3/e16be7ebab3dc1d96b806e5b1064a31ad2bb02711440ad8b492552c5bfb9/llama_index_readers_wikipedia-0.2.0.tar.gz", hash = "sha256:6d8cba820dd4f9400e112cda5aefedcf02c23494651cddefb1cd33955fd0e7a4", size = 2473 } +sdist = { url = "https://files.pythonhosted.org/packages/ae/f1/1bd33ebbd003f1e19e9a77a85d0e77c0dd0c904de50cc9212cc718648813/llama_index_readers_wikipedia-0.3.0.tar.gz", hash = "sha256:77972387cd5410c981bd427699613de63e76889f99816512fc3fce3b2eca440a", size = 2445 } wheels = [ - { url = "https://files.pythonhosted.org/packages/a3/a0/c728c5bb0c12b0303ac88d4c20f03be1bde445c949f02f8132f77c2a8c80/llama_index_readers_wikipedia-0.2.0-py3-none-any.whl", hash = "sha256:01efeefde229dd61bd8110fde161329531f8e609c4e82e11dc447067c6ec969d", size = 2705 }, + { url = "https://files.pythonhosted.org/packages/7b/8a/c85a69d9899fd6b7176bcbf6d19579feb1110e340a48b486f3682bc1bf60/llama_index_readers_wikipedia-0.3.0-py3-none-any.whl", hash = "sha256:1723441901a3a19f323872e3c5a968bbfc98cdc5f35e901c99e79f0e8cb7fa57", size = 2702 }, ] [[package]] name = "llama-index-tools-wikipedia" -version = "0.2.0" +version = "0.3.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "llama-index-core" }, { name = "wikipedia" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/a5/59/9c87dd9fe337c77b8c3c46ff1c10efcd97502ddc834e4828af20e0e0c42d/llama_index_tools_wikipedia-0.2.0.tar.gz", hash = "sha256:73546e72c07ef33551e0bfe131bb1a42969469d6a1dd76e05a23774dbcf1514a", size = 2490 } +sdist = { url = "https://files.pythonhosted.org/packages/86/fc/0ebe0913694a3582c0ae2c96cafb48689a9d012766e5b8a32d59932009de/llama_index_tools_wikipedia-0.3.0.tar.gz", hash = "sha256:8e3fc5ae8a479aacc6640c6c30a66f9848762bf8ebbbc4ceab41e8a4762a664c", size = 2487 } wheels = [ - { url = "https://files.pythonhosted.org/packages/64/c4/81a6174f312ede20b0c97d6c20f70184be1baed5ea365d0c804c25bc844f/llama_index_tools_wikipedia-0.2.0-py3-none-any.whl", hash = "sha256:d2a44ffeedd44fca00fe76fa074acd0c231e3a2d50d7ad81bbc2be7d44de8d75", size = 2717 }, + { url = "https://files.pythonhosted.org/packages/60/89/0d7aa9a41ed0a0768790da770ef057416b81a92ecc35dc9f9d70a86abbb1/llama_index_tools_wikipedia-0.3.0-py3-none-any.whl", hash = "sha256:aa76c39237056b3ed727a23aadc65f34c5b500449ee9ec2efaced055f3ff9938", size = 2712 }, ] [[package]] name = "llama-parse" -version = "0.5.12" +version = "0.5.19" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "click" }, { name = "llama-index-core" }, + { name = "pydantic" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/bb/8b/d784e42f3999a5278dd8a23de35f8fedef559eaa33bc188e42304d5b246b/llama_parse-0.5.12.tar.gz", hash = "sha256:e241606cf3574425df76c0f5d01a31a95c792c6fbef80aaf72f8ed6448bd1715", size = 13584 } +sdist = { url = "https://files.pythonhosted.org/packages/3b/02/63839a55f6f207110400c4f394152fd0290e9f8e450226b02a87cfdbd835/llama_parse-0.5.19.tar.gz", hash = "sha256:db69da70e199a2664705eb983a70fa92b7cee19dd6cff175af7692a0b8a4dd53", size = 16100 } wheels = [ - { url = "https://files.pythonhosted.org/packages/4a/80/ee558246d4a70bb401d768ab60d84001b6c1b7c5914236a4d1d8997fc5e2/llama_parse-0.5.12-py3-none-any.whl", hash = "sha256:6011feb49da5db4bcbeea1cc6688b6ff24b483877fda80b03fe59239cd08b907", size = 13059 }, + { url = "https://files.pythonhosted.org/packages/38/b7/3ff106e8199992bb62e72f195c8f6f2f2fe4a185f5f92746f0ed9db5c5d2/llama_parse-0.5.19-py3-none-any.whl", hash = "sha256:715cc895d183531b4299359d4f4004089b2e522f5f137f316084e7aa04035b62", size = 15421 }, ] [[package]] @@ -2515,14 +2493,14 @@ wheels = [ [[package]] name = "marshmallow" -version = "3.23.0" +version = "3.24.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "packaging" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/b7/41/05580fed5798ba8032341e7e330b866adc88dfca3bc3ec86c04e4ffdc427/marshmallow-3.23.0.tar.gz", hash = "sha256:98d8827a9f10c03d44ead298d2e99c6aea8197df18ccfad360dae7f89a50da2e", size = 177439 } +sdist = { url = "https://files.pythonhosted.org/packages/94/75/39ed76821caae7ff84cb0e1a5b18f6734a11b7defe4db426330bdcc5abe7/marshmallow-3.24.0.tar.gz", hash = "sha256:378572f727e52123d00de1bdd9b7ea7bed18bbfedc7f9bfbcddaf78925a8d602", size = 176103 } wheels = [ - { url = "https://files.pythonhosted.org/packages/9a/9e/f8f0308b66ff5fcc3b351ffa5fcba19ae725dfeda75d3c673f4427f3fc99/marshmallow-3.23.0-py3-none-any.whl", hash = "sha256:82f20a2397834fe6d9611b241f2f7e7b680ed89c49f84728a1ad937be6b4bdf4", size = 49490 }, + { url = "https://files.pythonhosted.org/packages/87/f0/7ccbfe13a24b9129a27b6d676d58ac801155e91e2f248b5f3818b85030e9/marshmallow-3.24.0-py3-none-any.whl", hash = "sha256:459922b7a1fd3d29d5082ddcadfcea0efd98985030e71d3ef0dd8f44f406e41d", size = 49317 }, ] [[package]] @@ -2792,7 +2770,7 @@ wheels = [ [[package]] name = "nbclient" -version = "0.10.0" +version = "0.10.2" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "jupyter-client" }, @@ -2800,9 +2778,9 @@ dependencies = [ { name = "nbformat" }, { name = "traitlets" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/e2/d2/39bc36604f24bccd44d374ac34769bc58c53a1da5acd1e83f0165aa4940e/nbclient-0.10.0.tar.gz", hash = "sha256:4b3f1b7dba531e498449c4db4f53da339c91d449dc11e9af3a43b4eb5c5abb09", size = 62246 } +sdist = { url = "https://files.pythonhosted.org/packages/87/66/7ffd18d58eae90d5721f9f39212327695b749e23ad44b3881744eaf4d9e8/nbclient-0.10.2.tar.gz", hash = "sha256:90b7fc6b810630db87a6d0c2250b1f0ab4cf4d3c27a299b0cde78a4ed3fd9193", size = 62424 } wheels = [ - { url = "https://files.pythonhosted.org/packages/66/e8/00517a23d3eeaed0513e718fbc94aab26eaa1758f5690fc8578839791c79/nbclient-0.10.0-py3-none-any.whl", hash = "sha256:f13e3529332a1f1f81d82a53210322476a168bb7090a0289c795fe9cc11c9d3f", size = 25318 }, + { url = "https://files.pythonhosted.org/packages/34/6d/e7fa07f03a4a7b221d94b4d586edb754a9b0dc3c9e2c93353e9fa4e0d117/nbclient-0.10.2-py3-none-any.whl", hash = "sha256:4ffee11e788b4a27fabeb7955547e4318a5298f34342a4bfd01f2e1faaeadc3d", size = 25434 }, ] [[package]] @@ -2822,7 +2800,7 @@ wheels = [ [[package]] name = "nbqa" -version = "1.9.0" +version = "1.9.1" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "autopep8" }, @@ -2830,9 +2808,9 @@ dependencies = [ { name = "tokenize-rt" }, { name = "tomli" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/53/58/11a261eaf1326f39ac7deadc462ded37af29c279a657a54225c5afba4f0b/nbqa-1.9.0.tar.gz", hash = "sha256:48c0acd83675bd6d086efba3885e1137d9475c0da328c74f46b9acbc664fe36f", size = 38257 } +sdist = { url = "https://files.pythonhosted.org/packages/aa/76/62d2609924cf34445148cd6b5de694cf64c179cc416cac93182579620e57/nbqa-1.9.1.tar.gz", hash = "sha256:a1f4bcf587c597302fed295951001fc4e1be4ce0e77e1ab1b25ac2fbe3db0cdd", size = 38348 } wheels = [ - { url = "https://files.pythonhosted.org/packages/61/b2/2f9bca9d17466258caec88c8eb15069924c0d1db66633869a1ef83861943/nbqa-1.9.0-py3-none-any.whl", hash = "sha256:89da6b55ce0b47b5e8be45c5b94404e1d861e45df36959dd34a5d15cf45141ea", size = 35211 }, + { url = "https://files.pythonhosted.org/packages/28/88/4789719fbbe166d12d345b3ac66b96105f10001b16e00a9765ba29261a21/nbqa-1.9.1-py3-none-any.whl", hash = "sha256:95552d2f6c2c038136252a805aa78d85018aef922586270c3a074332737282e5", size = 35259 }, ] [[package]] @@ -3082,7 +3060,7 @@ wheels = [ [[package]] name = "openai" -version = "1.52.2" +version = "1.59.3" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "anyio" }, @@ -3094,9 +3072,9 @@ dependencies = [ { name = "tqdm" }, { name = "typing-extensions" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/a5/78/1c4658043cdbb7faf7f388cbb3902d5f8b9a307e10f2021b1a8a4b0b8b15/openai-1.52.2.tar.gz", hash = "sha256:87b7d0f69d85f5641678d414b7ee3082363647a5c66a462ed7f3ccb59582da0d", size = 310119 } +sdist = { url = "https://files.pythonhosted.org/packages/73/d0/def3c7620e1cb446947f098aeac9d88fc826b1760d66da279e4712d37666/openai-1.59.3.tar.gz", hash = "sha256:7f7fff9d8729968588edf1524e73266e8593bb6cab09298340efb755755bb66f", size = 344192 } wheels = [ - { url = "https://files.pythonhosted.org/packages/55/4c/906b5b32c4c01402ac3b4c3fc28f601443ac5c6f13c84a95dd178c8d545d/openai-1.52.2-py3-none-any.whl", hash = "sha256:57e9e37bc407f39bb6ec3a27d7e8fb9728b2779936daa1fcf95df17d3edfaccc", size = 386947 }, + { url = "https://files.pythonhosted.org/packages/c7/26/0e0fb582bcb2a7cb6802447a749a2fc938fe4b82324097abccb86abfd5d1/openai-1.59.3-py3-none-any.whl", hash = "sha256:b041887a0d8f3e70d1fc6ffbb2bf7661c3b9a2f3e806c04bf42f572b9ac7bc37", size = 454793 }, ] [[package]] @@ -3388,11 +3366,11 @@ wheels = [ [[package]] name = "pip" -version = "24.2" +version = "24.3.1" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/4d/87/fb90046e096a03aeab235e139436b3fe804cdd447ed2093b0d70eba3f7f8/pip-24.2.tar.gz", hash = "sha256:5b5e490b5e9cb275c879595064adce9ebd31b854e3e803740b72f9ccf34a45b8", size = 1922041 } +sdist = { url = "https://files.pythonhosted.org/packages/f4/b1/b422acd212ad7eedddaf7981eee6e5de085154ff726459cf2da7c5a184c1/pip-24.3.1.tar.gz", hash = "sha256:ebcb60557f2aefabc2e0f918751cd24ea0d56d8ec5445fe1807f1d2109660b99", size = 1931073 } wheels = [ - { url = "https://files.pythonhosted.org/packages/d4/55/90db48d85f7689ec6f81c0db0622d704306c5284850383c090e6c7195a5c/pip-24.2-py3-none-any.whl", hash = "sha256:2cd581cf58ab7fcfca4ce8efa6dcacd0de5bf8d0a3eb9ec927e07405f4d9e2a2", size = 1815170 }, + { url = "https://files.pythonhosted.org/packages/ef/7d/500c9ad20238fcfcb4cb9243eede163594d7020ce87bd9610c9e02771876/pip-24.3.1-py3-none-any.whl", hash = "sha256:3790624780082365f47549d032f3770eeb2b1e8bd1f7b2e02dace1afa361b4ed", size = 1822182 }, ] [[package]] @@ -3433,29 +3411,30 @@ wheels = [ [[package]] name = "poethepoet" -version = "0.29.0" +version = "0.32.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "pastel" }, { name = "pyyaml" }, { name = "tomli", marker = "python_full_version < '3.11'" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/dc/7a/7144e47128022146502e179e259b312335bc1465384025eee92d4c7e16b2/poethepoet-0.29.0.tar.gz", hash = "sha256:676842302f2304a86b31ac56398dd672fae8471128d2086896393384dbafc095", size = 58619 } +sdist = { url = "https://files.pythonhosted.org/packages/8a/f6/1692a42cf426494d89dbc693ba55ebd653bd2e84bbb6b3da4127b87956df/poethepoet-0.32.0.tar.gz", hash = "sha256:a700be02e932e1a8907ae630928fc769ea9a77986189ba6867e6e3fd8f60e5b7", size = 62962 } wheels = [ - { url = "https://files.pythonhosted.org/packages/b5/7d/871be58cc970ab4c9a541d64b77b7454d150a409c3e48b53fc9eac7a8967/poethepoet-0.29.0-py3-none-any.whl", hash = "sha256:f8dfe55006dcfb5cf31bcb1904e1262e1c642a4502fee3688cbf1bddfe5c7601", size = 76069 }, + { url = "https://files.pythonhosted.org/packages/27/12/2994011e33d37772228439fe215fc022ff180b161ab7bd8ea5ac92717556/poethepoet-0.32.0-py3-none-any.whl", hash = "sha256:fba84c72d923feac228d1ea7734c5a54701f2e71fad42845f027c0fbf998a073", size = 81717 }, ] [[package]] name = "polars" -version = "1.11.0" +version = "1.19.0" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/42/88/4b06b7636f80575b9286781d12e263514a21108ba00e0f8b209478fa2a04/polars-1.11.0.tar.gz", hash = "sha256:4fbdd772b5f4538eb9f5ae4f3256290dba1f6c6b9d5226aed918801ed51089f4", size = 4076185 } +sdist = { url = "https://files.pythonhosted.org/packages/26/d9/66ada2204483c4c4d83898ade77eacd5fbef26ae4975a0d7d5de134ca46a/polars-1.19.0.tar.gz", hash = "sha256:b52ada5c43fcdadf64f282522198c5549ee4e46ea57d236a4d7e572643070d9d", size = 4267947 } wheels = [ - { url = "https://files.pythonhosted.org/packages/74/fa/185cf232322e6e1b0b07ef92914853f60b067b16bfae5e9f4ebfc752a3d2/polars-1.11.0-cp39-abi3-macosx_10_12_x86_64.whl", hash = "sha256:d20152fc29b83ffa4ca7d92b056866b1755dda346a3841106d9b361ccc96d94b", size = 32847858 }, - { url = "https://files.pythonhosted.org/packages/9c/dc/fda904586956236da0e26da51ed4f09487aa42f51634b8df6477f08ee7d5/polars-1.11.0-cp39-abi3-macosx_11_0_arm64.whl", hash = "sha256:fd48e8f607ae42f49abf4491e67fb1ad7d85157cb0a45a164fc4d1760d67e8ef", size = 28813631 }, - { url = "https://files.pythonhosted.org/packages/94/25/7eaafa7320e5bdb88f7f793a08ab0a877309eef1a4537351e362cbd1dcba/polars-1.11.0-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1293f826e5469626d2a4da5e66afb0b46c6f8cb43d16e301d99aa5b911518c34", size = 34046798 }, - { url = "https://files.pythonhosted.org/packages/47/03/374d9c4e6176ba4af5aa95ff002f3b5e41aff86da6037332b5107b74b5df/polars-1.11.0-cp39-abi3-manylinux_2_24_aarch64.whl", hash = "sha256:0c41c79fc7e2159a0d8fb69a3d0d26c402846d10fe6ff772b2591766e39dfac4", size = 30410176 }, - { url = "https://files.pythonhosted.org/packages/28/6b/0420d9a29e303b43be581ec70025329d84bd536ccef1a7907c81b8e352f6/polars-1.11.0-cp39-abi3-win_amd64.whl", hash = "sha256:a361d50ab5b0a6387bfe07a8a755bad7e61ba3d03381e4d1e343f49f6f0eb893", size = 33713325 }, + { url = "https://files.pythonhosted.org/packages/c0/7d/e8645281281d44d96752443366ceef2df76c9c1e17dce040111abb6a4a12/polars-1.19.0-cp39-abi3-macosx_10_12_x86_64.whl", hash = "sha256:51c01837268a1aa41785e60ed7d3363d4b52f652ab0eef4981f887bdfa2e9ca7", size = 29472039 }, + { url = "https://files.pythonhosted.org/packages/d7/fb/7e5054598d6bb7a47e4ca086797bae61270f7d570350cf779dd97384d913/polars-1.19.0-cp39-abi3-macosx_11_0_arm64.whl", hash = "sha256:20f8235e810f6ee795d7a215a3560945e6a1b57d017f87ba0c8542dced1fc665", size = 26150541 }, + { url = "https://files.pythonhosted.org/packages/ba/ba/6d715730c28b035abd308fc2cf0fcbae0cedea6216797e83ce4a9a96c6d4/polars-1.19.0-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:be0ea51f7b3553652bf0d53f3b925e969a898d4feb9980acecf8e3037d696903", size = 32751173 }, + { url = "https://files.pythonhosted.org/packages/ea/9a/bee8ab37ab82b8eea75170afa3b37ea7e1df74e4c4da8f6c93b3009977fd/polars-1.19.0-cp39-abi3-manylinux_2_24_aarch64.whl", hash = "sha256:30305ef4e1b634c67a5d985832296fade9908482c5b1abb0100800808b2d090e", size = 29704437 }, + { url = "https://files.pythonhosted.org/packages/57/ec/74afa5699e37e03e3acc7f241f4e2c3e8c91847524005424d9cf038b3034/polars-1.19.0-cp39-abi3-win_amd64.whl", hash = "sha256:de4aa45e24f8f94a1da9cc6031a7db6fa65ac7de8246fac0bc581ebb427d0643", size = 32846039 }, + { url = "https://files.pythonhosted.org/packages/cf/5b/c6f6c70ddc9d3070dee65f4640437cb84ccb4cca04f7a81b01db15329ae3/polars-1.19.0-cp39-abi3-win_arm64.whl", hash = "sha256:d7ca7aeb63fa22c0a00f6cfa95dd5252c249e83dd4d1b954583a59f97a8e407b", size = 29029208 }, ] [[package]] @@ -3555,17 +3534,17 @@ wheels = [ [[package]] name = "psutil" -version = "6.1.0" +version = "6.1.1" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/26/10/2a30b13c61e7cf937f4adf90710776b7918ed0a9c434e2c38224732af310/psutil-6.1.0.tar.gz", hash = "sha256:353815f59a7f64cdaca1c0307ee13558a0512f6db064e92fe833784f08539c7a", size = 508565 } +sdist = { url = "https://files.pythonhosted.org/packages/1f/5a/07871137bb752428aa4b659f910b399ba6f291156bdea939be3e96cae7cb/psutil-6.1.1.tar.gz", hash = "sha256:cf8496728c18f2d0b45198f06895be52f36611711746b7f30c464b422b50e2f5", size = 508502 } wheels = [ - { url = "https://files.pythonhosted.org/packages/01/9e/8be43078a171381953cfee33c07c0d628594b5dbfc5157847b85022c2c1b/psutil-6.1.0-cp36-abi3-macosx_10_9_x86_64.whl", hash = "sha256:6e2dcd475ce8b80522e51d923d10c7871e45f20918e027ab682f94f1c6351688", size = 247762 }, - { url = "https://files.pythonhosted.org/packages/1d/cb/313e80644ea407f04f6602a9e23096540d9dc1878755f3952ea8d3d104be/psutil-6.1.0-cp36-abi3-macosx_11_0_arm64.whl", hash = "sha256:0895b8414afafc526712c498bd9de2b063deaac4021a3b3c34566283464aff8e", size = 248777 }, - { url = "https://files.pythonhosted.org/packages/65/8e/bcbe2025c587b5d703369b6a75b65d41d1367553da6e3f788aff91eaf5bd/psutil-6.1.0-cp36-abi3-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9dcbfce5d89f1d1f2546a2090f4fcf87c7f669d1d90aacb7d7582addece9fb38", size = 284259 }, - { url = "https://files.pythonhosted.org/packages/58/4d/8245e6f76a93c98aab285a43ea71ff1b171bcd90c9d238bf81f7021fb233/psutil-6.1.0-cp36-abi3-manylinux_2_12_x86_64.manylinux2010_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:498c6979f9c6637ebc3a73b3f87f9eb1ec24e1ce53a7c5173b8508981614a90b", size = 287255 }, - { url = "https://files.pythonhosted.org/packages/27/c2/d034856ac47e3b3cdfa9720d0e113902e615f4190d5d1bdb8df4b2015fb2/psutil-6.1.0-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d905186d647b16755a800e7263d43df08b790d709d575105d419f8b6ef65423a", size = 288804 }, - { url = "https://files.pythonhosted.org/packages/ea/55/5389ed243c878725feffc0d6a3bc5ef6764312b6fc7c081faaa2cfa7ef37/psutil-6.1.0-cp37-abi3-win32.whl", hash = "sha256:1ad45a1f5d0b608253b11508f80940985d1d0c8f6111b5cb637533a0e6ddc13e", size = 250386 }, - { url = "https://files.pythonhosted.org/packages/11/91/87fa6f060e649b1e1a7b19a4f5869709fbf750b7c8c262ee776ec32f3028/psutil-6.1.0-cp37-abi3-win_amd64.whl", hash = "sha256:a8fb3752b491d246034fa4d279ff076501588ce8cbcdbb62c32fd7a377d996be", size = 254228 }, + { url = "https://files.pythonhosted.org/packages/61/99/ca79d302be46f7bdd8321089762dd4476ee725fce16fc2b2e1dbba8cac17/psutil-6.1.1-cp36-abi3-macosx_10_9_x86_64.whl", hash = "sha256:fc0ed7fe2231a444fc219b9c42d0376e0a9a1a72f16c5cfa0f68d19f1a0663e8", size = 247511 }, + { url = "https://files.pythonhosted.org/packages/0b/6b/73dbde0dd38f3782905d4587049b9be64d76671042fdcaf60e2430c6796d/psutil-6.1.1-cp36-abi3-macosx_11_0_arm64.whl", hash = "sha256:0bdd4eab935276290ad3cb718e9809412895ca6b5b334f5a9111ee6d9aff9377", size = 248985 }, + { url = "https://files.pythonhosted.org/packages/17/38/c319d31a1d3f88c5b79c68b3116c129e5133f1822157dd6da34043e32ed6/psutil-6.1.1-cp36-abi3-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b6e06c20c05fe95a3d7302d74e7097756d4ba1247975ad6905441ae1b5b66003", size = 284488 }, + { url = "https://files.pythonhosted.org/packages/9c/39/0f88a830a1c8a3aba27fededc642da37613c57cbff143412e3536f89784f/psutil-6.1.1-cp36-abi3-manylinux_2_12_x86_64.manylinux2010_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:97f7cb9921fbec4904f522d972f0c0e1f4fabbdd4e0287813b21215074a0f160", size = 287477 }, + { url = "https://files.pythonhosted.org/packages/47/da/99f4345d4ddf2845cb5b5bd0d93d554e84542d116934fde07a0c50bd4e9f/psutil-6.1.1-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:33431e84fee02bc84ea36d9e2c4a6d395d479c9dd9bba2376c1f6ee8f3a4e0b3", size = 289017 }, + { url = "https://files.pythonhosted.org/packages/38/53/bd755c2896f4461fd4f36fa6a6dcb66a88a9e4b9fd4e5b66a77cf9d4a584/psutil-6.1.1-cp37-abi3-win32.whl", hash = "sha256:eaa912e0b11848c4d9279a93d7e2783df352b082f40111e078388701fd479e53", size = 250602 }, + { url = "https://files.pythonhosted.org/packages/7b/d7/7831438e6c3ebbfa6e01a927127a6cb42ad3ab844247f3c5b96bea25d73d/psutil-6.1.1-cp37-abi3-win_amd64.whl", hash = "sha256:f35cfccb065fff93529d2afb4a2e89e363fe63ca1e4a5da22b603a85833c2649", size = 254444 }, ] [[package]] @@ -3779,14 +3758,14 @@ crypto = [ [[package]] name = "pypdf" -version = "4.3.1" +version = "5.1.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "typing-extensions", marker = "python_full_version < '3.11'" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/f0/65/2ed7c9e1d31d860f096061b3dd2d665f501e09faaa0409a3f0d719d2a16d/pypdf-4.3.1.tar.gz", hash = "sha256:b2f37fe9a3030aa97ca86067a56ba3f9d3565f9a791b305c7355d8392c30d91b", size = 293266 } +sdist = { url = "https://files.pythonhosted.org/packages/6b/9a/72d74f05f64895ebf1c7f6646cf7fe6dd124398c5c49240093f92d6f0fdd/pypdf-5.1.0.tar.gz", hash = "sha256:425a129abb1614183fd1aca6982f650b47f8026867c0ce7c4b9f281c443d2740", size = 5011381 } wheels = [ - { url = "https://files.pythonhosted.org/packages/3c/60/eccdd92dd4af3e4bea6d6a342f7588c618a15b9bec4b968af581e498bcc4/pypdf-4.3.1-py3-none-any.whl", hash = "sha256:64b31da97eda0771ef22edb1bfecd5deee4b72c3d1736b7df2689805076d6418", size = 295825 }, + { url = "https://files.pythonhosted.org/packages/04/fc/6f52588ac1cb4400a7804ef88d0d4e00cfe57a7ac6793ec3b00de5a8758b/pypdf-5.1.0-py3-none-any.whl", hash = "sha256:3bd4f503f4ebc58bae40d81e81a9176c400cbbac2ba2d877367595fb524dfdfc", size = 297976 }, ] [[package]] @@ -3830,14 +3809,14 @@ wheels = [ [[package]] name = "pytest-asyncio" -version = "0.24.0" +version = "0.25.1" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "pytest" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/52/6d/c6cf50ce320cf8611df7a1254d86233b3df7cc07f9b5f5cbcb82e08aa534/pytest_asyncio-0.24.0.tar.gz", hash = "sha256:d081d828e576d85f875399194281e92bf8a68d60d72d1a2faf2feddb6c46b276", size = 49855 } +sdist = { url = "https://files.pythonhosted.org/packages/4b/04/0477a4bdd176ad678d148c075f43620b3f7a060ff61c7da48500b1fa8a75/pytest_asyncio-0.25.1.tar.gz", hash = "sha256:79be8a72384b0c917677e00daa711e07db15259f4d23203c59012bcd989d4aee", size = 53760 } wheels = [ - { url = "https://files.pythonhosted.org/packages/96/31/6607dab48616902f76885dfcf62c08d929796fc3b2d2318faf9fd54dbed9/pytest_asyncio-0.24.0-py3-none-any.whl", hash = "sha256:a811296ed596b69bf0b6f3dc40f83bcaf341b155a269052d82efa2b25ac7037b", size = 18024 }, + { url = "https://files.pythonhosted.org/packages/81/fb/efc7226b384befd98d0e00d8c4390ad57f33c8fde00094b85c5e07897def/pytest_asyncio-0.25.1-py3-none-any.whl", hash = "sha256:c84878849ec63ff2ca509423616e071ef9cd8cc93c053aa33b5b8fb70a990671", size = 19357 }, ] [[package]] @@ -4160,61 +4139,61 @@ wheels = [ [[package]] name = "rpds-py" -version = "0.20.0" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/55/64/b693f262791b818880d17268f3f8181ef799b0d187f6f731b1772e05a29a/rpds_py-0.20.0.tar.gz", hash = "sha256:d72a210824facfdaf8768cf2d7ca25a042c30320b3020de2fa04640920d4e121", size = 25814 } -wheels = [ - { url = "https://files.pythonhosted.org/packages/71/2d/a7e60483b72b91909e18f29a5c5ae847bac4e2ae95b77bb77e1f41819a58/rpds_py-0.20.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:3ad0fda1635f8439cde85c700f964b23ed5fc2d28016b32b9ee5fe30da5c84e2", size = 318432 }, - { url = "https://files.pythonhosted.org/packages/b5/b4/f15b0c55a6d880ce74170e7e28c3ed6c5acdbbd118df50b91d1dabf86008/rpds_py-0.20.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9bb4a0d90fdb03437c109a17eade42dfbf6190408f29b2744114d11586611d6f", size = 311333 }, - { url = "https://files.pythonhosted.org/packages/36/10/3f4e490fe6eb069c07c22357d0b4804cd94cb9f8d01345ef9b1d93482b9d/rpds_py-0.20.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c6377e647bbfd0a0b159fe557f2c6c602c159fc752fa316572f012fc0bf67150", size = 366697 }, - { url = "https://files.pythonhosted.org/packages/f5/c8/cd6ab31b4424c7fab3b17e153b6ea7d1bb0d7cabea5c1ef683cc8adb8bc2/rpds_py-0.20.0-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:eb851b7df9dda52dc1415ebee12362047ce771fc36914586b2e9fcbd7d293b3e", size = 368386 }, - { url = "https://files.pythonhosted.org/packages/60/5e/642a44fda6dda90b5237af7a0ef1d088159c30a504852b94b0396eb62125/rpds_py-0.20.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1e0f80b739e5a8f54837be5d5c924483996b603d5502bfff79bf33da06164ee2", size = 395374 }, - { url = "https://files.pythonhosted.org/packages/7c/b5/ff18c093c9e72630f6d6242e5ccb0728ef8265ba0a154b5972f89d23790a/rpds_py-0.20.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5a8c94dad2e45324fc74dce25e1645d4d14df9a4e54a30fa0ae8bad9a63928e3", size = 433189 }, - { url = "https://files.pythonhosted.org/packages/4a/6d/1166a157b227f2333f8e8ae320b6b7ea2a6a38fbe7a3563ad76dffc8608d/rpds_py-0.20.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f8e604fe73ba048c06085beaf51147eaec7df856824bfe7b98657cf436623daf", size = 354849 }, - { url = "https://files.pythonhosted.org/packages/70/a4/70ea49863ea09ae4c2971f2eef58e80b757e3c0f2f618c5815bb751f7847/rpds_py-0.20.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:df3de6b7726b52966edf29663e57306b23ef775faf0ac01a3e9f4012a24a4140", size = 373233 }, - { url = "https://files.pythonhosted.org/packages/3b/d3/822a28152a1e7e2ba0dc5d06cf8736f4cd64b191bb6ec47fb51d1c3c5ccf/rpds_py-0.20.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:cf258ede5bc22a45c8e726b29835b9303c285ab46fc7c3a4cc770736b5304c9f", size = 541852 }, - { url = "https://files.pythonhosted.org/packages/c6/a5/6ef91e4425dc8b3445ff77d292fc4c5e37046462434a0423c4e0a596a8bd/rpds_py-0.20.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:55fea87029cded5df854ca7e192ec7bdb7ecd1d9a3f63d5c4eb09148acf4a7ce", size = 547630 }, - { url = "https://files.pythonhosted.org/packages/72/f8/d5625ee05c4e5c478954a16d9359069c66fe8ac8cd5ddf28f80d3b321837/rpds_py-0.20.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:ae94bd0b2f02c28e199e9bc51485d0c5601f58780636185660f86bf80c89af94", size = 525766 }, - { url = "https://files.pythonhosted.org/packages/94/3c/1ff1ed6ae323b3e16fdfcdae0f0a67f373a6c3d991229dc32b499edeffb7/rpds_py-0.20.0-cp310-none-win32.whl", hash = "sha256:28527c685f237c05445efec62426d285e47a58fb05ba0090a4340b73ecda6dee", size = 199174 }, - { url = "https://files.pythonhosted.org/packages/ec/ba/5762c0aee2403dfea14ed74b0f8a2415cfdbb21cf745d600d9a8ac952c5b/rpds_py-0.20.0-cp310-none-win_amd64.whl", hash = "sha256:238a2d5b1cad28cdc6ed15faf93a998336eb041c4e440dd7f902528b8891b399", size = 213543 }, - { url = "https://files.pythonhosted.org/packages/ab/2a/191374c52d7be0b056cc2a04d718d2244c152f915d4a8d2db2aacc526189/rpds_py-0.20.0-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:ac2f4f7a98934c2ed6505aead07b979e6f999389f16b714448fb39bbaa86a489", size = 318369 }, - { url = "https://files.pythonhosted.org/packages/0e/6a/2c9fdcc6d235ac0d61ec4fd9981184689c3e682abd05e3caa49bccb9c298/rpds_py-0.20.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:220002c1b846db9afd83371d08d239fdc865e8f8c5795bbaec20916a76db3318", size = 311303 }, - { url = "https://files.pythonhosted.org/packages/d2/b2/725487d29633f64ef8f9cbf4729111a0b61702c8f8e94db1653930f52cce/rpds_py-0.20.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8d7919548df3f25374a1f5d01fbcd38dacab338ef5f33e044744b5c36729c8db", size = 366424 }, - { url = "https://files.pythonhosted.org/packages/7a/8c/668195ab9226d01b7cf7cd9e59c1c0be1df05d602df7ec0cf46f857dcf59/rpds_py-0.20.0-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:758406267907b3781beee0f0edfe4a179fbd97c0be2e9b1154d7f0a1279cf8e5", size = 368359 }, - { url = "https://files.pythonhosted.org/packages/52/28/356f6a39c1adeb02cf3e5dd526f5e8e54e17899bef045397abcfbf50dffa/rpds_py-0.20.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3d61339e9f84a3f0767b1995adfb171a0d00a1185192718a17af6e124728e0f5", size = 394886 }, - { url = "https://files.pythonhosted.org/packages/a2/65/640fb1a89080a8fb6f4bebd3dafb65a2edba82e2e44c33e6eb0f3e7956f1/rpds_py-0.20.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1259c7b3705ac0a0bd38197565a5d603218591d3f6cee6e614e380b6ba61c6f6", size = 432416 }, - { url = "https://files.pythonhosted.org/packages/a7/e8/85835077b782555d6b3416874b702ea6ebd7db1f145283c9252968670dd5/rpds_py-0.20.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5c1dc0f53856b9cc9a0ccca0a7cc61d3d20a7088201c0937f3f4048c1718a209", size = 354819 }, - { url = "https://files.pythonhosted.org/packages/4f/87/1ac631e923d65cbf36fbcfc6eaa702a169496de1311e54be142f178e53ee/rpds_py-0.20.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:7e60cb630f674a31f0368ed32b2a6b4331b8350d67de53c0359992444b116dd3", size = 373282 }, - { url = "https://files.pythonhosted.org/packages/e4/ce/cb316f7970189e217b998191c7cf0da2ede3d5437932c86a7210dc1e9994/rpds_py-0.20.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:dbe982f38565bb50cb7fb061ebf762c2f254ca3d8c20d4006878766e84266272", size = 541540 }, - { url = "https://files.pythonhosted.org/packages/90/d7/4112d7655ec8aff168ecc91d4ceb51c557336edde7e6ccf6463691a2f253/rpds_py-0.20.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:514b3293b64187172bc77c8fb0cdae26981618021053b30d8371c3a902d4d5ad", size = 547640 }, - { url = "https://files.pythonhosted.org/packages/ab/44/4f61d64dfed98cc71623f3a7fcb612df636a208b4b2c6611eaa985e130a9/rpds_py-0.20.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:d0a26ffe9d4dd35e4dfdd1e71f46401cff0181c75ac174711ccff0459135fa58", size = 525555 }, - { url = "https://files.pythonhosted.org/packages/35/f2/a862d81eacb21f340d584cd1c749c289979f9a60e9229f78bffc0418a199/rpds_py-0.20.0-cp311-none-win32.whl", hash = "sha256:89c19a494bf3ad08c1da49445cc5d13d8fefc265f48ee7e7556839acdacf69d0", size = 199338 }, - { url = "https://files.pythonhosted.org/packages/cc/ec/77d0674f9af4872919f3738018558dd9d37ad3f7ad792d062eadd4af7cba/rpds_py-0.20.0-cp311-none-win_amd64.whl", hash = "sha256:c638144ce971df84650d3ed0096e2ae7af8e62ecbbb7b201c8935c370df00a2c", size = 213585 }, - { url = "https://files.pythonhosted.org/packages/89/b7/f9682c5cc37fcc035f4a0fc33c1fe92ec9cbfdee0cdfd071cf948f53e0df/rpds_py-0.20.0-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:a84ab91cbe7aab97f7446652d0ed37d35b68a465aeef8fc41932a9d7eee2c1a6", size = 321468 }, - { url = "https://files.pythonhosted.org/packages/b8/ad/fc82be4eaceb8d444cb6fc1956ce972b3a0795104279de05e0e4131d0a47/rpds_py-0.20.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:56e27147a5a4c2c21633ff8475d185734c0e4befd1c989b5b95a5d0db699b21b", size = 313062 }, - { url = "https://files.pythonhosted.org/packages/0e/1c/6039e80b13a08569a304dc13476dc986352dca4598e909384db043b4e2bb/rpds_py-0.20.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2580b0c34583b85efec8c5c5ec9edf2dfe817330cc882ee972ae650e7b5ef739", size = 370168 }, - { url = "https://files.pythonhosted.org/packages/dc/c9/5b9aa35acfb58946b4b785bc8e700ac313669e02fb100f3efa6176a83e81/rpds_py-0.20.0-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:b80d4a7900cf6b66bb9cee5c352b2d708e29e5a37fe9bf784fa97fc11504bf6c", size = 371376 }, - { url = "https://files.pythonhosted.org/packages/7b/dd/0e0dbeb70d8a5357d2814764d467ded98d81d90d3570de4fb05ec7224f6b/rpds_py-0.20.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:50eccbf054e62a7b2209b28dc7a22d6254860209d6753e6b78cfaeb0075d7bee", size = 397200 }, - { url = "https://files.pythonhosted.org/packages/e4/da/a47d931eb688ccfd77a7389e45935c79c41e8098d984d87335004baccb1d/rpds_py-0.20.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:49a8063ea4296b3a7e81a5dfb8f7b2d73f0b1c20c2af401fb0cdf22e14711a96", size = 426824 }, - { url = "https://files.pythonhosted.org/packages/0f/f7/a59a673594e6c2ff2dbc44b00fd4ecdec2fc399bb6a7bd82d612699a0121/rpds_py-0.20.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ea438162a9fcbee3ecf36c23e6c68237479f89f962f82dae83dc15feeceb37e4", size = 357967 }, - { url = "https://files.pythonhosted.org/packages/5f/61/3ba1905396b2cb7088f9503a460b87da33452da54d478cb9241f6ad16d00/rpds_py-0.20.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:18d7585c463087bddcfa74c2ba267339f14f2515158ac4db30b1f9cbdb62c8ef", size = 378905 }, - { url = "https://files.pythonhosted.org/packages/08/31/6d0df9356b4edb0a3a077f1ef714e25ad21f9f5382fc490c2383691885ea/rpds_py-0.20.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:d4c7d1a051eeb39f5c9547e82ea27cbcc28338482242e3e0b7768033cb083821", size = 546348 }, - { url = "https://files.pythonhosted.org/packages/ae/15/d33c021de5cb793101df9961c3c746dfc476953dbbf5db337d8010dffd4e/rpds_py-0.20.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:e4df1e3b3bec320790f699890d41c59d250f6beda159ea3c44c3f5bac1976940", size = 553152 }, - { url = "https://files.pythonhosted.org/packages/70/2d/5536d28c507a4679179ab15aa0049440e4d3dd6752050fa0843ed11e9354/rpds_py-0.20.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:2cf126d33a91ee6eedc7f3197b53e87a2acdac63602c0f03a02dd69e4b138174", size = 528807 }, - { url = "https://files.pythonhosted.org/packages/e3/62/7ebe6ec0d3dd6130921f8cffb7e34afb7f71b3819aa0446a24c5e81245ec/rpds_py-0.20.0-cp312-none-win32.whl", hash = "sha256:8bc7690f7caee50b04a79bf017a8d020c1f48c2a1077ffe172abec59870f1139", size = 200993 }, - { url = "https://files.pythonhosted.org/packages/ec/2f/b938864d66b86a6e4acadefdc56de75ef56f7cafdfd568a6464605457bd5/rpds_py-0.20.0-cp312-none-win_amd64.whl", hash = "sha256:0e13e6952ef264c40587d510ad676a988df19adea20444c2b295e536457bc585", size = 214458 }, - { url = "https://files.pythonhosted.org/packages/06/39/bf1f664c347c946ef56cecaa896e3693d91acc741afa78ebb3fdb7aba08b/rpds_py-0.20.0-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:617c7357272c67696fd052811e352ac54ed1d9b49ab370261a80d3b6ce385045", size = 319444 }, - { url = "https://files.pythonhosted.org/packages/c1/71/876135d3cb90d62468540b84e8e83ff4dc92052ab309bfdea7ea0b9221ad/rpds_py-0.20.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:9426133526f69fcaba6e42146b4e12d6bc6c839b8b555097020e2b78ce908dcc", size = 311699 }, - { url = "https://files.pythonhosted.org/packages/f7/da/8ccaeba6a3dda7467aebaf893de9eafd56275e2c90773c83bf15fb0b8374/rpds_py-0.20.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:deb62214c42a261cb3eb04d474f7155279c1a8a8c30ac89b7dcb1721d92c3c02", size = 367825 }, - { url = "https://files.pythonhosted.org/packages/04/b6/02a54c47c178d180395b3c9a8bfb3b93906e08f9acf7b4a1067d27c3fae0/rpds_py-0.20.0-pp310-pypy310_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:fcaeb7b57f1a1e071ebd748984359fef83ecb026325b9d4ca847c95bc7311c92", size = 369046 }, - { url = "https://files.pythonhosted.org/packages/a7/64/df4966743aa4def8727dc13d06527c8b13eb7412c1429def2d4701bee520/rpds_py-0.20.0-pp310-pypy310_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d454b8749b4bd70dd0a79f428731ee263fa6995f83ccb8bada706e8d1d3ff89d", size = 395896 }, - { url = "https://files.pythonhosted.org/packages/6f/d9/7ff03ff3642c600f27ff94512bb158a8d815fea5ed4162c75a7e850d6003/rpds_py-0.20.0-pp310-pypy310_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d807dc2051abe041b6649681dce568f8e10668e3c1c6543ebae58f2d7e617855", size = 432427 }, - { url = "https://files.pythonhosted.org/packages/b8/c6/e1b886f7277b3454e55e85332e165091c19114eecb5377b88d892fd36ccf/rpds_py-0.20.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c3c20f0ddeb6e29126d45f89206b8291352b8c5b44384e78a6499d68b52ae511", size = 355403 }, - { url = "https://files.pythonhosted.org/packages/e2/62/e26bd5b944e547c7bfd0b6ca7e306bfa430f8bd298ab72a1217976a7ca8d/rpds_py-0.20.0-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b7f19250ceef892adf27f0399b9e5afad019288e9be756d6919cb58892129f51", size = 374491 }, - { url = "https://files.pythonhosted.org/packages/c3/92/93c5a530898d3a5d1ce087455071ba714b77806ed9ffee4070d0c7a53b7e/rpds_py-0.20.0-pp310-pypy310_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:4f1ed4749a08379555cebf4650453f14452eaa9c43d0a95c49db50c18b7da075", size = 543622 }, - { url = "https://files.pythonhosted.org/packages/01/9e/d68fba289625b5d3c9d1925825d7da716fbf812bda2133ac409021d5db13/rpds_py-0.20.0-pp310-pypy310_pp73-musllinux_1_2_i686.whl", hash = "sha256:dcedf0b42bcb4cfff4101d7771a10532415a6106062f005ab97d1d0ab5681c60", size = 548558 }, - { url = "https://files.pythonhosted.org/packages/bf/d6/4b2fad4898154365f0f2bd72ffd190349274a4c1d6a6f94f02a83bb2b8f1/rpds_py-0.20.0-pp310-pypy310_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:39ed0d010457a78f54090fafb5d108501b5aa5604cc22408fc1c0c77eac14344", size = 525753 }, - { url = "https://files.pythonhosted.org/packages/d2/ea/6f121d1802f3adae1981aea4209ea66f9d3c7f2f6d6b85ef4f13a61d17ef/rpds_py-0.20.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:bb273176be34a746bdac0b0d7e4e2c467323d13640b736c4c477881a3220a989", size = 213529 }, +version = "0.22.3" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/01/80/cce854d0921ff2f0a9fa831ba3ad3c65cee3a46711addf39a2af52df2cfd/rpds_py-0.22.3.tar.gz", hash = "sha256:e32fee8ab45d3c2db6da19a5323bc3362237c8b653c70194414b892fd06a080d", size = 26771 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/42/2a/ead1d09e57449b99dcc190d8d2323e3a167421d8f8fdf0f217c6f6befe47/rpds_py-0.22.3-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:6c7b99ca52c2c1752b544e310101b98a659b720b21db00e65edca34483259967", size = 359514 }, + { url = "https://files.pythonhosted.org/packages/8f/7e/1254f406b7793b586c68e217a6a24ec79040f85e030fff7e9049069284f4/rpds_py-0.22.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:be2eb3f2495ba669d2a985f9b426c1797b7d48d6963899276d22f23e33d47e37", size = 349031 }, + { url = "https://files.pythonhosted.org/packages/aa/da/17c6a2c73730d426df53675ff9cc6653ac7a60b6438d03c18e1c822a576a/rpds_py-0.22.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:70eb60b3ae9245ddea20f8a4190bd79c705a22f8028aaf8bbdebe4716c3fab24", size = 381485 }, + { url = "https://files.pythonhosted.org/packages/aa/13/2dbacd820466aa2a3c4b747afb18d71209523d353cf865bf8f4796c969ea/rpds_py-0.22.3-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:4041711832360a9b75cfb11b25a6a97c8fb49c07b8bd43d0d02b45d0b499a4ff", size = 386794 }, + { url = "https://files.pythonhosted.org/packages/6d/62/96905d0a35ad4e4bc3c098b2f34b2e7266e211d08635baa690643d2227be/rpds_py-0.22.3-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:64607d4cbf1b7e3c3c8a14948b99345eda0e161b852e122c6bb71aab6d1d798c", size = 423523 }, + { url = "https://files.pythonhosted.org/packages/eb/1b/d12770f2b6a9fc2c3ec0d810d7d440f6d465ccd8b7f16ae5385952c28b89/rpds_py-0.22.3-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:81e69b0a0e2537f26d73b4e43ad7bc8c8efb39621639b4434b76a3de50c6966e", size = 446695 }, + { url = "https://files.pythonhosted.org/packages/4d/cf/96f1fd75512a017f8e07408b6d5dbeb492d9ed46bfe0555544294f3681b3/rpds_py-0.22.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc27863442d388870c1809a87507727b799c8460573cfbb6dc0eeaef5a11b5ec", size = 381959 }, + { url = "https://files.pythonhosted.org/packages/ab/f0/d1c5b501c8aea85aeb938b555bfdf7612110a2f8cdc21ae0482c93dd0c24/rpds_py-0.22.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e79dd39f1e8c3504be0607e5fc6e86bb60fe3584bec8b782578c3b0fde8d932c", size = 410420 }, + { url = "https://files.pythonhosted.org/packages/33/3b/45b6c58fb6aad5a569ae40fb890fc494c6b02203505a5008ee6dc68e65f7/rpds_py-0.22.3-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:e0fa2d4ec53dc51cf7d3bb22e0aa0143966119f42a0c3e4998293a3dd2856b09", size = 557620 }, + { url = "https://files.pythonhosted.org/packages/83/62/3fdd2d3d47bf0bb9b931c4c73036b4ab3ec77b25e016ae26fab0f02be2af/rpds_py-0.22.3-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:fda7cb070f442bf80b642cd56483b5548e43d366fe3f39b98e67cce780cded00", size = 584202 }, + { url = "https://files.pythonhosted.org/packages/04/f2/5dced98b64874b84ca824292f9cee2e3f30f3bcf231d15a903126684f74d/rpds_py-0.22.3-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:cff63a0272fcd259dcc3be1657b07c929c466b067ceb1c20060e8d10af56f5bf", size = 552787 }, + { url = "https://files.pythonhosted.org/packages/67/13/2273dea1204eda0aea0ef55145da96a9aa28b3f88bb5c70e994f69eda7c3/rpds_py-0.22.3-cp310-cp310-win32.whl", hash = "sha256:9bd7228827ec7bb817089e2eb301d907c0d9827a9e558f22f762bb690b131652", size = 220088 }, + { url = "https://files.pythonhosted.org/packages/4e/80/8c8176b67ad7f4a894967a7a4014ba039626d96f1d4874d53e409b58d69f/rpds_py-0.22.3-cp310-cp310-win_amd64.whl", hash = "sha256:9beeb01d8c190d7581a4d59522cd3d4b6887040dcfc744af99aa59fef3e041a8", size = 231737 }, + { url = "https://files.pythonhosted.org/packages/15/ad/8d1ddf78f2805a71253fcd388017e7b4a0615c22c762b6d35301fef20106/rpds_py-0.22.3-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:d20cfb4e099748ea39e6f7b16c91ab057989712d31761d3300d43134e26e165f", size = 359773 }, + { url = "https://files.pythonhosted.org/packages/c8/75/68c15732293a8485d79fe4ebe9045525502a067865fa4278f178851b2d87/rpds_py-0.22.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:68049202f67380ff9aa52f12e92b1c30115f32e6895cd7198fa2a7961621fc5a", size = 349214 }, + { url = "https://files.pythonhosted.org/packages/3c/4c/7ce50f3070083c2e1b2bbd0fb7046f3da55f510d19e283222f8f33d7d5f4/rpds_py-0.22.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fb4f868f712b2dd4bcc538b0a0c1f63a2b1d584c925e69a224d759e7070a12d5", size = 380477 }, + { url = "https://files.pythonhosted.org/packages/9a/e9/835196a69cb229d5c31c13b8ae603bd2da9a6695f35fe4270d398e1db44c/rpds_py-0.22.3-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:bc51abd01f08117283c5ebf64844a35144a0843ff7b2983e0648e4d3d9f10dbb", size = 386171 }, + { url = "https://files.pythonhosted.org/packages/f9/8e/33fc4eba6683db71e91e6d594a2cf3a8fbceb5316629f0477f7ece5e3f75/rpds_py-0.22.3-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:0f3cec041684de9a4684b1572fe28c7267410e02450f4561700ca5a3bc6695a2", size = 422676 }, + { url = "https://files.pythonhosted.org/packages/37/47/2e82d58f8046a98bb9497a8319604c92b827b94d558df30877c4b3c6ccb3/rpds_py-0.22.3-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7ef9d9da710be50ff6809fed8f1963fecdfecc8b86656cadfca3bc24289414b0", size = 446152 }, + { url = "https://files.pythonhosted.org/packages/e1/78/79c128c3e71abbc8e9739ac27af11dc0f91840a86fce67ff83c65d1ba195/rpds_py-0.22.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:59f4a79c19232a5774aee369a0c296712ad0e77f24e62cad53160312b1c1eaa1", size = 381300 }, + { url = "https://files.pythonhosted.org/packages/c9/5b/2e193be0e8b228c1207f31fa3ea79de64dadb4f6a4833111af8145a6bc33/rpds_py-0.22.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:1a60bce91f81ddaac922a40bbb571a12c1070cb20ebd6d49c48e0b101d87300d", size = 409636 }, + { url = "https://files.pythonhosted.org/packages/c2/3f/687c7100b762d62186a1c1100ffdf99825f6fa5ea94556844bbbd2d0f3a9/rpds_py-0.22.3-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:e89391e6d60251560f0a8f4bd32137b077a80d9b7dbe6d5cab1cd80d2746f648", size = 556708 }, + { url = "https://files.pythonhosted.org/packages/8c/a2/c00cbc4b857e8b3d5e7f7fc4c81e23afd8c138b930f4f3ccf9a41a23e9e4/rpds_py-0.22.3-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:e3fb866d9932a3d7d0c82da76d816996d1667c44891bd861a0f97ba27e84fc74", size = 583554 }, + { url = "https://files.pythonhosted.org/packages/d0/08/696c9872cf56effdad9ed617ac072f6774a898d46b8b8964eab39ec562d2/rpds_py-0.22.3-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:1352ae4f7c717ae8cba93421a63373e582d19d55d2ee2cbb184344c82d2ae55a", size = 552105 }, + { url = "https://files.pythonhosted.org/packages/18/1f/4df560be1e994f5adf56cabd6c117e02de7c88ee238bb4ce03ed50da9d56/rpds_py-0.22.3-cp311-cp311-win32.whl", hash = "sha256:b0b4136a252cadfa1adb705bb81524eee47d9f6aab4f2ee4fa1e9d3cd4581f64", size = 220199 }, + { url = "https://files.pythonhosted.org/packages/b8/1b/c29b570bc5db8237553002788dc734d6bd71443a2ceac2a58202ec06ef12/rpds_py-0.22.3-cp311-cp311-win_amd64.whl", hash = "sha256:8bd7c8cfc0b8247c8799080fbff54e0b9619e17cdfeb0478ba7295d43f635d7c", size = 231775 }, + { url = "https://files.pythonhosted.org/packages/75/47/3383ee3bd787a2a5e65a9b9edc37ccf8505c0a00170e3a5e6ea5fbcd97f7/rpds_py-0.22.3-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:27e98004595899949bd7a7b34e91fa7c44d7a97c40fcaf1d874168bb652ec67e", size = 352334 }, + { url = "https://files.pythonhosted.org/packages/40/14/aa6400fa8158b90a5a250a77f2077c0d0cd8a76fce31d9f2b289f04c6dec/rpds_py-0.22.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:1978d0021e943aae58b9b0b196fb4895a25cc53d3956b8e35e0b7682eefb6d56", size = 342111 }, + { url = "https://files.pythonhosted.org/packages/7d/06/395a13bfaa8a28b302fb433fb285a67ce0ea2004959a027aea8f9c52bad4/rpds_py-0.22.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:655ca44a831ecb238d124e0402d98f6212ac527a0ba6c55ca26f616604e60a45", size = 384286 }, + { url = "https://files.pythonhosted.org/packages/43/52/d8eeaffab047e6b7b7ef7f00d5ead074a07973968ffa2d5820fa131d7852/rpds_py-0.22.3-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:feea821ee2a9273771bae61194004ee2fc33f8ec7db08117ef9147d4bbcbca8e", size = 391739 }, + { url = "https://files.pythonhosted.org/packages/83/31/52dc4bde85c60b63719610ed6f6d61877effdb5113a72007679b786377b8/rpds_py-0.22.3-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:22bebe05a9ffc70ebfa127efbc429bc26ec9e9b4ee4d15a740033efda515cf3d", size = 427306 }, + { url = "https://files.pythonhosted.org/packages/70/d5/1bab8e389c2261dba1764e9e793ed6830a63f830fdbec581a242c7c46bda/rpds_py-0.22.3-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3af6e48651c4e0d2d166dc1b033b7042ea3f871504b6805ba5f4fe31581d8d38", size = 442717 }, + { url = "https://files.pythonhosted.org/packages/82/a1/a45f3e30835b553379b3a56ea6c4eb622cf11e72008229af840e4596a8ea/rpds_py-0.22.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e67ba3c290821343c192f7eae1d8fd5999ca2dc99994114643e2f2d3e6138b15", size = 385721 }, + { url = "https://files.pythonhosted.org/packages/a6/27/780c942de3120bdd4d0e69583f9c96e179dfff082f6ecbb46b8d6488841f/rpds_py-0.22.3-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:02fbb9c288ae08bcb34fb41d516d5eeb0455ac35b5512d03181d755d80810059", size = 415824 }, + { url = "https://files.pythonhosted.org/packages/94/0b/aa0542ca88ad20ea719b06520f925bae348ea5c1fdf201b7e7202d20871d/rpds_py-0.22.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:f56a6b404f74ab372da986d240e2e002769a7d7102cc73eb238a4f72eec5284e", size = 561227 }, + { url = "https://files.pythonhosted.org/packages/0d/92/3ed77d215f82c8f844d7f98929d56cc321bb0bcfaf8f166559b8ec56e5f1/rpds_py-0.22.3-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:0a0461200769ab3b9ab7e513f6013b7a97fdeee41c29b9db343f3c5a8e2b9e61", size = 587424 }, + { url = "https://files.pythonhosted.org/packages/09/42/cacaeb047a22cab6241f107644f230e2935d4efecf6488859a7dd82fc47d/rpds_py-0.22.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:8633e471c6207a039eff6aa116e35f69f3156b3989ea3e2d755f7bc41754a4a7", size = 555953 }, + { url = "https://files.pythonhosted.org/packages/e6/52/c921dc6d5f5d45b212a456c1f5b17df1a471127e8037eb0972379e39dff4/rpds_py-0.22.3-cp312-cp312-win32.whl", hash = "sha256:593eba61ba0c3baae5bc9be2f5232430453fb4432048de28399ca7376de9c627", size = 221339 }, + { url = "https://files.pythonhosted.org/packages/f2/c7/f82b5be1e8456600395366f86104d1bd8d0faed3802ad511ef6d60c30d98/rpds_py-0.22.3-cp312-cp312-win_amd64.whl", hash = "sha256:d115bffdd417c6d806ea9069237a4ae02f513b778e3789a359bc5856e0404cc4", size = 235786 }, + { url = "https://files.pythonhosted.org/packages/8b/63/e29f8ee14fcf383574f73b6bbdcbec0fbc2e5fc36b4de44d1ac389b1de62/rpds_py-0.22.3-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:d48424e39c2611ee1b84ad0f44fb3b2b53d473e65de061e3f460fc0be5f1939d", size = 360786 }, + { url = "https://files.pythonhosted.org/packages/d3/e0/771ee28b02a24e81c8c0e645796a371350a2bb6672753144f36ae2d2afc9/rpds_py-0.22.3-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:24e8abb5878e250f2eb0d7859a8e561846f98910326d06c0d51381fed59357bd", size = 350589 }, + { url = "https://files.pythonhosted.org/packages/cf/49/abad4c4a1e6f3adf04785a99c247bfabe55ed868133e2d1881200aa5d381/rpds_py-0.22.3-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4b232061ca880db21fa14defe219840ad9b74b6158adb52ddf0e87bead9e8493", size = 381848 }, + { url = "https://files.pythonhosted.org/packages/3a/7d/f4bc6d6fbe6af7a0d2b5f2ee77079efef7c8528712745659ec0026888998/rpds_py-0.22.3-pp310-pypy310_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ac0a03221cdb5058ce0167ecc92a8c89e8d0decdc9e99a2ec23380793c4dcb96", size = 387879 }, + { url = "https://files.pythonhosted.org/packages/13/b0/575c797377fdcd26cedbb00a3324232e4cb2c5d121f6e4b0dbf8468b12ef/rpds_py-0.22.3-pp310-pypy310_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:eb0c341fa71df5a4595f9501df4ac5abfb5a09580081dffbd1ddd4654e6e9123", size = 423916 }, + { url = "https://files.pythonhosted.org/packages/54/78/87157fa39d58f32a68d3326f8a81ad8fb99f49fe2aa7ad9a1b7d544f9478/rpds_py-0.22.3-pp310-pypy310_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:bf9db5488121b596dbfc6718c76092fda77b703c1f7533a226a5a9f65248f8ad", size = 448410 }, + { url = "https://files.pythonhosted.org/packages/59/69/860f89996065a88be1b6ff2d60e96a02b920a262d8aadab99e7903986597/rpds_py-0.22.3-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0b8db6b5b2d4491ad5b6bdc2bc7c017eec108acbf4e6785f42a9eb0ba234f4c9", size = 382841 }, + { url = "https://files.pythonhosted.org/packages/bd/d7/bc144e10d27e3cb350f98df2492a319edd3caaf52ddfe1293f37a9afbfd7/rpds_py-0.22.3-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b3d504047aba448d70cf6fa22e06cb09f7cbd761939fdd47604f5e007675c24e", size = 409662 }, + { url = "https://files.pythonhosted.org/packages/14/2a/6bed0b05233c291a94c7e89bc76ffa1c619d4e1979fbfe5d96024020c1fb/rpds_py-0.22.3-pp310-pypy310_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:e61b02c3f7a1e0b75e20c3978f7135fd13cb6cf551bf4a6d29b999a88830a338", size = 558221 }, + { url = "https://files.pythonhosted.org/packages/11/23/cd8f566de444a137bc1ee5795e47069a947e60810ba4152886fe5308e1b7/rpds_py-0.22.3-pp310-pypy310_pp73-musllinux_1_2_i686.whl", hash = "sha256:e35ba67d65d49080e8e5a1dd40101fccdd9798adb9b050ff670b7d74fa41c566", size = 583780 }, + { url = "https://files.pythonhosted.org/packages/8d/63/79c3602afd14d501f751e615a74a59040328da5ef29ed5754ae80d236b84/rpds_py-0.22.3-pp310-pypy310_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:26fd7cac7dd51011a245f29a2cc6489c4608b5a8ce8d75661bb4a1066c52dfbe", size = 553619 }, + { url = "https://files.pythonhosted.org/packages/9f/2e/c5c1689e80298d4e94c75b70faada4c25445739d91b94c211244a3ed7ed1/rpds_py-0.22.3-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:177c7c0fce2855833819c98e43c262007f42ce86651ffbb84f37883308cb0e7d", size = 233338 }, ] [[package]] @@ -4278,7 +4257,7 @@ wheels = [ [[package]] name = "selenium" -version = "4.25.0" +version = "4.27.1" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "certifi" }, @@ -4288,9 +4267,9 @@ dependencies = [ { name = "urllib3", extra = ["socks"] }, { name = "websocket-client" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/0e/5a/d3735b189b91715fd0f5a9b8d55e2605061309849470e96ab830f02cba40/selenium-4.25.0.tar.gz", hash = "sha256:95d08d3b82fb353f3c474895154516604c7f0e6a9a565ae6498ef36c9bac6921", size = 957765 } +sdist = { url = "https://files.pythonhosted.org/packages/44/8c/62c47c91072aa03af1c3b7d7f1c59b987db41c9fec0f158fb03a0da51aa6/selenium-4.27.1.tar.gz", hash = "sha256:5296c425a75ff1b44d0d5199042b36a6d1ef76c04fb775b97b40be739a9caae2", size = 973526 } wheels = [ - { url = "https://files.pythonhosted.org/packages/aa/85/fa44f23dd5d5066a72f7c4304cce4b5ff9a6e7fd92431a48b2c63fbf63ec/selenium-4.25.0-py3-none-any.whl", hash = "sha256:3798d2d12b4a570bc5790163ba57fef10b2afee958bf1d80f2a3cf07c4141f33", size = 9693127 }, + { url = "https://files.pythonhosted.org/packages/a6/1e/5f1a5dd2a28528c4b3ec6e076b58e4c035810c805328f9936123283ca14e/selenium-4.27.1-py3-none-any.whl", hash = "sha256:b89b1f62b5cfe8025868556fe82360d6b649d464f75d2655cb966c8f8447ea18", size = 9707007 }, ] [[package]] @@ -4665,7 +4644,7 @@ wheels = [ [[package]] name = "textual" -version = "0.85.0" +version = "1.0.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "markdown-it-py", extra = ["linkify", "plugins"] }, @@ -4673,14 +4652,14 @@ dependencies = [ { name = "rich" }, { name = "typing-extensions" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/f0/ef/d498d5eb07ebe63299517bbee7e4be2fe8e1b4f0835763446cef1c4eaed0/textual-0.85.0.tar.gz", hash = "sha256:645c0fd0b4f61cd19383df78a1acd4f3b555e2c514cfa2f454e20692dffc10a0", size = 1461202 } +sdist = { url = "https://files.pythonhosted.org/packages/1f/b6/59b1de04bb4dca0f21ed7ba0b19309ed7f3f5de4396edf20cc2855e53085/textual-1.0.0.tar.gz", hash = "sha256:bec9fe63547c1c552569d1b75d309038b7d456c03f86dfa3706ddb099b151399", size = 1532733 } wheels = [ - { url = "https://files.pythonhosted.org/packages/9c/d5/0f35e93d1343fd8a4a1571c104dd6f0a9d038aa89d203146f22b9beed725/textual-0.85.0-py3-none-any.whl", hash = "sha256:8e75d023f06b242fb88233926dfb7801792f867643493096dd45dd216dc950f3", size = 614318 }, + { url = "https://files.pythonhosted.org/packages/ac/bb/5fb6656c625019cd653d5215237d7cd6e0b12e7eae4195c3d1c91b2136fc/textual-1.0.0-py3-none-any.whl", hash = "sha256:2d4a701781c05104925e463ae370c630567c70c2880e92ab838052e3e23c986f", size = 660456 }, ] [[package]] name = "textual-dev" -version = "1.6.1" +version = "1.7.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "aiohttp" }, @@ -4690,9 +4669,9 @@ dependencies = [ { name = "textual-serve" }, { name = "typing-extensions" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/ff/85/93a28974fd75aa941b0aac415fc430bf693a91b219a25dc9447f1bd83338/textual_dev-1.6.1.tar.gz", hash = "sha256:0d0d4523a09566bae56eb9ebc4fcbb09069d0f335448e6b9b10dd2d805606bd8", size = 25624 } +sdist = { url = "https://files.pythonhosted.org/packages/a1/d3/ed0b20f6de0af1b7062c402d59d256029c0daa055ad9e04c27471b450cdd/textual_dev-1.7.0.tar.gz", hash = "sha256:bf1a50eaaff4cd6a863535dd53f06dbbd62617c371604f66f56de3908220ccd5", size = 25935 } wheels = [ - { url = "https://files.pythonhosted.org/packages/6b/aa/c89ce57be40847eebab57184a7223735ac56ee2063400c363a74d5e7a18e/textual_dev-1.6.1-py3-none-any.whl", hash = "sha256:de93279da6dd0772be88a83e494be1bc895df0a0c3e47bcd48fa1acb1a83a34b", size = 26853 }, + { url = "https://files.pythonhosted.org/packages/50/4b/3c1eb9cbc39f2f28d27e10ef2fe42bfe0cf3c2f8445a454c124948d6169b/textual_dev-1.7.0-py3-none-any.whl", hash = "sha256:a93a846aeb6a06edb7808504d9c301565f7f4bf2e7046d56583ed755af356c8d", size = 27221 }, ] [[package]] @@ -4763,7 +4742,7 @@ sdist = { url = "https://files.pythonhosted.org/packages/17/82/86982e4b6d16e4feb [[package]] name = "tldextract" -version = "5.1.2" +version = "5.1.3" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "filelock" }, @@ -4771,9 +4750,9 @@ dependencies = [ { name = "requests" }, { name = "requests-file" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/db/ed/c92a5d6edaafec52f388c2d2946b4664294299cebf52bb1ef9cbc44ae739/tldextract-5.1.2.tar.gz", hash = "sha256:c9e17f756f05afb5abac04fe8f766e7e70f9fe387adb1859f0f52408ee060200", size = 116825 } +sdist = { url = "https://files.pythonhosted.org/packages/4a/4f/eee4bebcbad25a798bf55601d3a4aee52003bebcf9e55fce08b91ca541a9/tldextract-5.1.3.tar.gz", hash = "sha256:d43c7284c23f5dc8a42fd0fee2abede2ff74cc622674e4cb07f514ab3330c338", size = 125033 } wheels = [ - { url = "https://files.pythonhosted.org/packages/fc/6d/8eaafb735b39c4ab3bb8fe4324ef8f0f0af27a7df9bb4cd503927bd5475d/tldextract-5.1.2-py3-none-any.whl", hash = "sha256:4dfc4c277b6b97fa053899fcdb892d2dc27295851ab5fac4e07797b6a21b2e46", size = 97560 }, + { url = "https://files.pythonhosted.org/packages/c6/86/aebe15fa40a992c446be5cf14e70e58a251277494c14d26bdbcff0e658fd/tldextract-5.1.3-py3-none-any.whl", hash = "sha256:78de310cc2ca018692de5ddf320f9d6bd7c5cf857d0fd4f2175f0cdf4440ea75", size = 104923 }, ] [[package]] @@ -4846,20 +4825,20 @@ wheels = [ [[package]] name = "tornado" -version = "6.4.1" +version = "6.4.2" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/ee/66/398ac7167f1c7835406888a386f6d0d26ee5dbf197d8a571300be57662d3/tornado-6.4.1.tar.gz", hash = "sha256:92d3ab53183d8c50f8204a51e6f91d18a15d5ef261e84d452800d4ff6fc504e9", size = 500623 } +sdist = { url = "https://files.pythonhosted.org/packages/59/45/a0daf161f7d6f36c3ea5fc0c2de619746cc3dd4c76402e9db545bd920f63/tornado-6.4.2.tar.gz", hash = "sha256:92bad5b4746e9879fd7bf1eb21dce4e3fc5128d71601f80005afa39237ad620b", size = 501135 } wheels = [ - { url = "https://files.pythonhosted.org/packages/00/d9/c33be3c1a7564f7d42d87a8d186371a75fd142097076767a5c27da941fef/tornado-6.4.1-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:163b0aafc8e23d8cdc3c9dfb24c5368af84a81e3364745ccb4427669bf84aec8", size = 435924 }, - { url = "https://files.pythonhosted.org/packages/2e/0f/721e113a2fac2f1d7d124b3279a1da4c77622e104084f56119875019ffab/tornado-6.4.1-cp38-abi3-macosx_10_9_x86_64.whl", hash = "sha256:6d5ce3437e18a2b66fbadb183c1d3364fb03f2be71299e7d10dbeeb69f4b2a14", size = 433883 }, - { url = "https://files.pythonhosted.org/packages/13/cf/786b8f1e6fe1c7c675e79657448178ad65e41c1c9765ef82e7f6f765c4c5/tornado-6.4.1-cp38-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e2e20b9113cd7293f164dc46fffb13535266e713cdb87bd2d15ddb336e96cfc4", size = 437224 }, - { url = "https://files.pythonhosted.org/packages/e4/8e/a6ce4b8d5935558828b0f30f3afcb2d980566718837b3365d98e34f6067e/tornado-6.4.1-cp38-abi3-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8ae50a504a740365267b2a8d1a90c9fbc86b780a39170feca9bcc1787ff80842", size = 436597 }, - { url = "https://files.pythonhosted.org/packages/22/d4/54f9d12668b58336bd30defe0307e6c61589a3e687b05c366f804b7faaf0/tornado-6.4.1-cp38-abi3-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:613bf4ddf5c7a95509218b149b555621497a6cc0d46ac341b30bd9ec19eac7f3", size = 436797 }, - { url = "https://files.pythonhosted.org/packages/cf/3f/2c792e7afa7dd8b24fad7a2ed3c2f24a5ec5110c7b43a64cb6095cc106b8/tornado-6.4.1-cp38-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:25486eb223babe3eed4b8aecbac33b37e3dd6d776bc730ca14e1bf93888b979f", size = 437516 }, - { url = "https://files.pythonhosted.org/packages/71/63/c8fc62745e669ac9009044b889fc531b6f88ac0f5f183cac79eaa950bb23/tornado-6.4.1-cp38-abi3-musllinux_1_2_i686.whl", hash = "sha256:454db8a7ecfcf2ff6042dde58404164d969b6f5d58b926da15e6b23817950fc4", size = 436958 }, - { url = "https://files.pythonhosted.org/packages/94/d4/f8ac1f5bd22c15fad3b527e025ce219bd526acdbd903f52053df2baecc8b/tornado-6.4.1-cp38-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:a02a08cc7a9314b006f653ce40483b9b3c12cda222d6a46d4ac63bb6c9057698", size = 436882 }, - { url = "https://files.pythonhosted.org/packages/4b/3e/a8124c21cc0bbf144d7903d2a0cadab15cadaf683fa39a0f92bc567f0d4d/tornado-6.4.1-cp38-abi3-win32.whl", hash = "sha256:d9a566c40b89757c9aa8e6f032bcdb8ca8795d7c1a9762910c722b1635c9de4d", size = 438092 }, - { url = "https://files.pythonhosted.org/packages/d9/2f/3f2f05e84a7aff787a96d5fb06821323feb370fe0baed4db6ea7b1088f32/tornado-6.4.1-cp38-abi3-win_amd64.whl", hash = "sha256:b24b8982ed444378d7f21d563f4180a2de31ced9d8d84443907a0a64da2072e7", size = 438532 }, + { url = "https://files.pythonhosted.org/packages/26/7e/71f604d8cea1b58f82ba3590290b66da1e72d840aeb37e0d5f7291bd30db/tornado-6.4.2-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:e828cce1123e9e44ae2a50a9de3055497ab1d0aeb440c5ac23064d9e44880da1", size = 436299 }, + { url = "https://files.pythonhosted.org/packages/96/44/87543a3b99016d0bf54fdaab30d24bf0af2e848f1d13d34a3a5380aabe16/tornado-6.4.2-cp38-abi3-macosx_10_9_x86_64.whl", hash = "sha256:072ce12ada169c5b00b7d92a99ba089447ccc993ea2143c9ede887e0937aa803", size = 434253 }, + { url = "https://files.pythonhosted.org/packages/cb/fb/fdf679b4ce51bcb7210801ef4f11fdac96e9885daa402861751353beea6e/tornado-6.4.2-cp38-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1a017d239bd1bb0919f72af256a970624241f070496635784d9bf0db640d3fec", size = 437602 }, + { url = "https://files.pythonhosted.org/packages/4f/3b/e31aeffffc22b475a64dbeb273026a21b5b566f74dee48742817626c47dc/tornado-6.4.2-cp38-abi3-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c36e62ce8f63409301537222faffcef7dfc5284f27eec227389f2ad11b09d946", size = 436972 }, + { url = "https://files.pythonhosted.org/packages/22/55/b78a464de78051a30599ceb6983b01d8f732e6f69bf37b4ed07f642ac0fc/tornado-6.4.2-cp38-abi3-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bca9eb02196e789c9cb5c3c7c0f04fb447dc2adffd95265b2c7223a8a615ccbf", size = 437173 }, + { url = "https://files.pythonhosted.org/packages/79/5e/be4fb0d1684eb822c9a62fb18a3e44a06188f78aa466b2ad991d2ee31104/tornado-6.4.2-cp38-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:304463bd0772442ff4d0f5149c6f1c2135a1fae045adf070821c6cdc76980634", size = 437892 }, + { url = "https://files.pythonhosted.org/packages/f5/33/4f91fdd94ea36e1d796147003b490fe60a0215ac5737b6f9c65e160d4fe0/tornado-6.4.2-cp38-abi3-musllinux_1_2_i686.whl", hash = "sha256:c82c46813ba483a385ab2a99caeaedf92585a1f90defb5693351fa7e4ea0bf73", size = 437334 }, + { url = "https://files.pythonhosted.org/packages/2b/ae/c1b22d4524b0e10da2f29a176fb2890386f7bd1f63aacf186444873a88a0/tornado-6.4.2-cp38-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:932d195ca9015956fa502c6b56af9eb06106140d844a335590c1ec7f5277d10c", size = 437261 }, + { url = "https://files.pythonhosted.org/packages/b5/25/36dbd49ab6d179bcfc4c6c093a51795a4f3bed380543a8242ac3517a1751/tornado-6.4.2-cp38-abi3-win32.whl", hash = "sha256:2876cef82e6c5978fde1e0d5b1f919d756968d5b4282418f3146b79b58556482", size = 438463 }, + { url = "https://files.pythonhosted.org/packages/61/cc/58b1adeb1bb46228442081e746fcdbc4540905c87e8add7c277540934edb/tornado-6.4.2-cp38-abi3-win_amd64.whl", hash = "sha256:908b71bf3ff37d81073356a5fadcc660eb10c1476ee6e2725588626ce7e5ca38", size = 438907 }, ] [[package]] @@ -4885,7 +4864,7 @@ wheels = [ [[package]] name = "trio" -version = "0.27.0" +version = "0.28.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "attrs" }, @@ -4896,9 +4875,9 @@ dependencies = [ { name = "sniffio" }, { name = "sortedcontainers" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/17/d1/a83dee5be404da7afe5a71783a33b8907bacb935a6dc8c69ab785e4a3eed/trio-0.27.0.tar.gz", hash = "sha256:1dcc95ab1726b2da054afea8fd761af74bad79bd52381b84eae408e983c76831", size = 568064 } +sdist = { url = "https://files.pythonhosted.org/packages/b3/73/57efab729506a8d4b89814f1e356ec8f3369de0ed4fd7e7616974d09646d/trio-0.28.0.tar.gz", hash = "sha256:4e547896fe9e8a5658e54e4c7c5fa1db748cbbbaa7c965e7d40505b928c73c05", size = 580318 } wheels = [ - { url = "https://files.pythonhosted.org/packages/3c/83/ec3196c360afffbc5b342ead48d1eb7393dd74fa70bca75d33905a86f211/trio-0.27.0-py3-none-any.whl", hash = "sha256:68eabbcf8f457d925df62da780eff15ff5dc68fd6b367e2dde59f7aaf2a0b884", size = 481734 }, + { url = "https://files.pythonhosted.org/packages/b4/04/9954a59e1fb6732f5436225c9af963811d7b24ea62a8bf96991f2cb8c26e/trio-0.28.0-py3-none-any.whl", hash = "sha256:56d58977acc1635735a96581ec70513cc781b8b6decd299c487d3be2a721cd94", size = 486317 }, ] [[package]] @@ -4945,24 +4924,24 @@ wheels = [ [[package]] name = "types-aiofiles" -version = "24.1.0.20240626" +version = "24.1.0.20241221" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/13/e9/013940b017c313c2e15c64017268fdb0c25e0638621fb8a5d9ebe00fb0f4/types-aiofiles-24.1.0.20240626.tar.gz", hash = "sha256:48604663e24bc2d5038eac05ccc33e75799b0779e93e13d6a8f711ddc306ac08", size = 9357 } +sdist = { url = "https://files.pythonhosted.org/packages/ab/5e/f984b9ddc7eecdf31e683e692d933f3672276ed95aad6adb9aea9ecbdc29/types_aiofiles-24.1.0.20241221.tar.gz", hash = "sha256:c40f6c290b0af9e902f7f3fa91213cf5bb67f37086fb21dc0ff458253586ad55", size = 14081 } wheels = [ - { url = "https://files.pythonhosted.org/packages/c3/ad/c4b3275d21c5be79487c4f6ed7cd13336997746fe099236cb29256a44a90/types_aiofiles-24.1.0.20240626-py3-none-any.whl", hash = "sha256:7939eca4a8b4f9c6491b6e8ef160caee9a21d32e18534a57d5ed90aee47c66b4", size = 9389 }, + { url = "https://files.pythonhosted.org/packages/ff/da/77902220df98ce920444cf3611fa0b1cf0dc2cfa5a137c55e93829aa458e/types_aiofiles-24.1.0.20241221-py3-none-any.whl", hash = "sha256:11d4e102af0627c02e8c1d17736caa3c39de1058bea37e2f4de6ef11a5b652ab", size = 14162 }, ] [[package]] name = "types-docker" -version = "7.1.0.20240827" +version = "7.1.0.20241229" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "types-requests" }, { name = "urllib3" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/3a/03/623a13f6747622caaa5f7faf628a13637be57c1416b3715dc33aadfe20b6/types-docker-7.1.0.20240827.tar.gz", hash = "sha256:3b0a02da44cffef176dfc4f8b2c4a502ee66e83d79c8527c0cf5806eb599cfac", size = 24137 } +sdist = { url = "https://files.pythonhosted.org/packages/00/4b/7ca6c1fe916ef4c71f145234902bb4da074e410d9cc0bd72572790c3f06d/types_docker-7.1.0.20241229.tar.gz", hash = "sha256:d968f164bb02f934bc2f178515dd4b3c8b2b4e371a9400ec440247c09c139545", size = 29032 } wheels = [ - { url = "https://files.pythonhosted.org/packages/02/72/3afbbfee55e264eb0917b04bcb5fc4790da57fd01191df5c461b28dc0e2f/types_docker-7.1.0.20240827-py3-none-any.whl", hash = "sha256:e71067745f0100bcc2c1a2fc794ea15204c916430b09e61bd6bddf1bf56e4343", size = 38888 }, + { url = "https://files.pythonhosted.org/packages/e4/32/8a1c95566816fef8f7b2407d25981cf0d3ecf2f226ed0ab3a34969994ab7/types_docker-7.1.0.20241229-py3-none-any.whl", hash = "sha256:b760745a6cb0351a19108c0b76e2a43ebc05a686f6c3ec9bc1a991ff9f1cc353", size = 43650 }, ] [[package]] @@ -4976,20 +4955,20 @@ wheels = [ [[package]] name = "types-protobuf" -version = "5.28.0.20240924" +version = "5.29.1.20241207" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/90/c3/217fe2c6a4b8ed75c5ecbd27ae8dedd7bc8e8728ac4b29d16005d3a3aba2/types-protobuf-5.28.0.20240924.tar.gz", hash = "sha256:d181af8a256e5a91ce8d5adb53496e880efd9144c7d54483e3653332b60296f0", size = 54324 } +sdist = { url = "https://files.pythonhosted.org/packages/70/89/b661a447139f665ccea8e39bfdd52a92f803df4b5de0e6001a3537feaacb/types_protobuf-5.29.1.20241207.tar.gz", hash = "sha256:2ebcadb8ab3ef2e3e2f067e0882906d64ba0dc65fc5b0fd7a8b692315b4a0be9", size = 59190 } wheels = [ - { url = "https://files.pythonhosted.org/packages/61/2b/98bfe67a73b15964513b471ce10b610ab0df28825900e0e7517b2bf23952/types_protobuf-5.28.0.20240924-py3-none-any.whl", hash = "sha256:5cecf612ccdefb7dc95f7a51fb502902f20fc2e6681cd500184aaa1b3931d6a7", size = 68761 }, + { url = "https://files.pythonhosted.org/packages/7e/6e/cdf152187019d6f6d04066b23e48659d961b527e9c6d43b48459d160e332/types_protobuf-5.29.1.20241207-py3-none-any.whl", hash = "sha256:92893c42083e9b718c678badc0af7a9a1307b92afe1599e5cba5f3d35b668b2f", size = 73902 }, ] [[package]] name = "types-python-dateutil" -version = "2.9.0.20241003" +version = "2.9.0.20241206" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/31/f8/f6ee4c803a7beccffee21bb29a71573b39f7037c224843eff53e5308c16e/types-python-dateutil-2.9.0.20241003.tar.gz", hash = "sha256:58cb85449b2a56d6684e41aeefb4c4280631246a0da1a719bdbe6f3fb0317446", size = 9210 } +sdist = { url = "https://files.pythonhosted.org/packages/a9/60/47d92293d9bc521cd2301e423a358abfac0ad409b3a1606d8fbae1321961/types_python_dateutil-2.9.0.20241206.tar.gz", hash = "sha256:18f493414c26ffba692a72369fea7a154c502646301ebfe3d56a04b3767284cb", size = 13802 } wheels = [ - { url = "https://files.pythonhosted.org/packages/35/d6/ba5f61958f358028f2e2ba1b8e225b8e263053bd57d3a79e2d2db64c807b/types_python_dateutil-2.9.0.20241003-py3-none-any.whl", hash = "sha256:250e1d8e80e7bbc3a6c99b907762711d1a1cdd00e978ad39cb5940f6f0a87f3d", size = 9693 }, + { url = "https://files.pythonhosted.org/packages/0f/b3/ca41df24db5eb99b00d97f89d7674a90cb6b3134c52fb8121b6d8d30f15c/types_python_dateutil-2.9.0.20241206-py3-none-any.whl", hash = "sha256:e248a4bc70a486d3e3ec84d0dc30eec3a5f979d6e7ee4123ae043eedbb987f53", size = 14384 }, ] [[package]] @@ -5006,11 +4985,11 @@ wheels = [ [[package]] name = "types-tabulate" -version = "0.9.0.20240106" +version = "0.9.0.20241207" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/cf/9d/65b82ce032fd1cc4df752461175a800c1cfc336461f07ceff10c6a5913eb/types-tabulate-0.9.0.20240106.tar.gz", hash = "sha256:c9b6db10dd7fcf55bd1712dd3537f86ddce72a08fd62bb1af4338c7096ce947e", size = 3442 } +sdist = { url = "https://files.pythonhosted.org/packages/3f/43/16030404a327e4ff8c692f2273854019ed36718667b2993609dc37d14dd4/types_tabulate-0.9.0.20241207.tar.gz", hash = "sha256:ac1ac174750c0a385dfd248edc6279fa328aaf4ea317915ab879a2ec47833230", size = 8195 } wheels = [ - { url = "https://files.pythonhosted.org/packages/f0/17/d53c0bb370100313df6800e9096bdfc27b32b8e4a9390bfb35bc4b17db78/types_tabulate-0.9.0.20240106-py3-none-any.whl", hash = "sha256:0378b7b6fe0ccb4986299496d027a6d4c218298ecad67199bbd0e2d7e9d335a1", size = 3350 }, + { url = "https://files.pythonhosted.org/packages/5e/86/a9ebfd509cbe74471106dffed320e208c72537f9aeb0a55eaa6b1b5e4d17/types_tabulate-0.9.0.20241207-py3-none-any.whl", hash = "sha256:b8dad1343c2a8ba5861c5441370c3e35908edd234ff036d4298708a1d4cf8a85", size = 8307 }, ] [[package]] @@ -5083,55 +5062,55 @@ wheels = [ [[package]] name = "watchfiles" -version = "0.24.0" +version = "1.0.3" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "anyio" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/c8/27/2ba23c8cc85796e2d41976439b08d52f691655fdb9401362099502d1f0cf/watchfiles-0.24.0.tar.gz", hash = "sha256:afb72325b74fa7a428c009c1b8be4b4d7c2afedafb2982827ef2156646df2fe1", size = 37870 } -wheels = [ - { url = "https://files.pythonhosted.org/packages/89/a1/631c12626378b9f1538664aa221feb5c60dfafbd7f60b451f8d0bdbcdedd/watchfiles-0.24.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:083dc77dbdeef09fa44bb0f4d1df571d2e12d8a8f985dccde71ac3ac9ac067a0", size = 375096 }, - { url = "https://files.pythonhosted.org/packages/f7/5c/f27c979c8a10aaa2822286c1bffdce3db731cd1aa4224b9f86623e94bbfe/watchfiles-0.24.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:e94e98c7cb94cfa6e071d401ea3342767f28eb5a06a58fafdc0d2a4974f4f35c", size = 367425 }, - { url = "https://files.pythonhosted.org/packages/74/0d/1889e5649885484d29f6c792ef274454d0a26b20d6ed5fdba5409335ccb6/watchfiles-0.24.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:82ae557a8c037c42a6ef26c494d0631cacca040934b101d001100ed93d43f361", size = 437705 }, - { url = "https://files.pythonhosted.org/packages/85/8a/01d9a22e839f0d1d547af11b1fcac6ba6f889513f1b2e6f221d9d60d9585/watchfiles-0.24.0-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:acbfa31e315a8f14fe33e3542cbcafc55703b8f5dcbb7c1eecd30f141df50db3", size = 433636 }, - { url = "https://files.pythonhosted.org/packages/62/32/a93db78d340c7ef86cde469deb20e36c6b2a873edee81f610e94bbba4e06/watchfiles-0.24.0-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b74fdffce9dfcf2dc296dec8743e5b0332d15df19ae464f0e249aa871fc1c571", size = 451069 }, - { url = "https://files.pythonhosted.org/packages/99/c2/e9e2754fae3c2721c9a7736f92dab73723f1968ed72535fff29e70776008/watchfiles-0.24.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:449f43f49c8ddca87c6b3980c9284cab6bd1f5c9d9a2b00012adaaccd5e7decd", size = 469306 }, - { url = "https://files.pythonhosted.org/packages/4c/45/f317d9e3affb06c3c27c478de99f7110143e87f0f001f0f72e18d0e1ddce/watchfiles-0.24.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4abf4ad269856618f82dee296ac66b0cd1d71450fc3c98532d93798e73399b7a", size = 476187 }, - { url = "https://files.pythonhosted.org/packages/ac/d3/f1f37248abe0114916921e638f71c7d21fe77e3f2f61750e8057d0b68ef2/watchfiles-0.24.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9f895d785eb6164678ff4bb5cc60c5996b3ee6df3edb28dcdeba86a13ea0465e", size = 425743 }, - { url = "https://files.pythonhosted.org/packages/2b/e8/c7037ea38d838fd81a59cd25761f106ee3ef2cfd3261787bee0c68908171/watchfiles-0.24.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:7ae3e208b31be8ce7f4c2c0034f33406dd24fbce3467f77223d10cd86778471c", size = 612327 }, - { url = "https://files.pythonhosted.org/packages/a0/c5/0e6e228aafe01a7995fbfd2a4edb221bb11a2744803b65a5663fb85e5063/watchfiles-0.24.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:2efec17819b0046dde35d13fb8ac7a3ad877af41ae4640f4109d9154ed30a188", size = 595096 }, - { url = "https://files.pythonhosted.org/packages/63/d5/4780e8bf3de3b4b46e7428a29654f7dc041cad6b19fd86d083e4b6f64bbe/watchfiles-0.24.0-cp310-none-win32.whl", hash = "sha256:6bdcfa3cd6fdbdd1a068a52820f46a815401cbc2cb187dd006cb076675e7b735", size = 264149 }, - { url = "https://files.pythonhosted.org/packages/fe/1b/5148898ba55fc9c111a2a4a5fb67ad3fa7eb2b3d7f0618241ed88749313d/watchfiles-0.24.0-cp310-none-win_amd64.whl", hash = "sha256:54ca90a9ae6597ae6dc00e7ed0a040ef723f84ec517d3e7ce13e63e4bc82fa04", size = 277542 }, - { url = "https://files.pythonhosted.org/packages/85/02/366ae902cd81ca5befcd1854b5c7477b378f68861597cef854bd6dc69fbe/watchfiles-0.24.0-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:bdcd5538e27f188dd3c804b4a8d5f52a7fc7f87e7fd6b374b8e36a4ca03db428", size = 375579 }, - { url = "https://files.pythonhosted.org/packages/bc/67/d8c9d256791fe312fea118a8a051411337c948101a24586e2df237507976/watchfiles-0.24.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2dadf8a8014fde6addfd3c379e6ed1a981c8f0a48292d662e27cabfe4239c83c", size = 367726 }, - { url = "https://files.pythonhosted.org/packages/b1/dc/a8427b21ef46386adf824a9fec4be9d16a475b850616cfd98cf09a97a2ef/watchfiles-0.24.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6509ed3f467b79d95fc62a98229f79b1a60d1b93f101e1c61d10c95a46a84f43", size = 437735 }, - { url = "https://files.pythonhosted.org/packages/3a/21/0b20bef581a9fbfef290a822c8be645432ceb05fb0741bf3c032e0d90d9a/watchfiles-0.24.0-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:8360f7314a070c30e4c976b183d1d8d1585a4a50c5cb603f431cebcbb4f66327", size = 433644 }, - { url = "https://files.pythonhosted.org/packages/1c/e8/d5e5f71cc443c85a72e70b24269a30e529227986096abe091040d6358ea9/watchfiles-0.24.0-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:316449aefacf40147a9efaf3bd7c9bdd35aaba9ac5d708bd1eb5763c9a02bef5", size = 450928 }, - { url = "https://files.pythonhosted.org/packages/61/ee/bf17f5a370c2fcff49e1fec987a6a43fd798d8427ea754ce45b38f9e117a/watchfiles-0.24.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:73bde715f940bea845a95247ea3e5eb17769ba1010efdc938ffcb967c634fa61", size = 469072 }, - { url = "https://files.pythonhosted.org/packages/a3/34/03b66d425986de3fc6077e74a74c78da298f8cb598887f664a4485e55543/watchfiles-0.24.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3770e260b18e7f4e576edca4c0a639f704088602e0bc921c5c2e721e3acb8d15", size = 475517 }, - { url = "https://files.pythonhosted.org/packages/70/eb/82f089c4f44b3171ad87a1b433abb4696f18eb67292909630d886e073abe/watchfiles-0.24.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aa0fd7248cf533c259e59dc593a60973a73e881162b1a2f73360547132742823", size = 425480 }, - { url = "https://files.pythonhosted.org/packages/53/20/20509c8f5291e14e8a13104b1808cd7cf5c44acd5feaecb427a49d387774/watchfiles-0.24.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:d7a2e3b7f5703ffbd500dabdefcbc9eafeff4b9444bbdd5d83d79eedf8428fab", size = 612322 }, - { url = "https://files.pythonhosted.org/packages/df/2b/5f65014a8cecc0a120f5587722068a975a692cadbe9fe4ea56b3d8e43f14/watchfiles-0.24.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:d831ee0a50946d24a53821819b2327d5751b0c938b12c0653ea5be7dea9c82ec", size = 595094 }, - { url = "https://files.pythonhosted.org/packages/18/98/006d8043a82c0a09d282d669c88e587b3a05cabdd7f4900e402250a249ac/watchfiles-0.24.0-cp311-none-win32.whl", hash = "sha256:49d617df841a63b4445790a254013aea2120357ccacbed00253f9c2b5dc24e2d", size = 264191 }, - { url = "https://files.pythonhosted.org/packages/8a/8b/badd9247d6ec25f5f634a9b3d0d92e39c045824ec7e8afcedca8ee52c1e2/watchfiles-0.24.0-cp311-none-win_amd64.whl", hash = "sha256:d3dcb774e3568477275cc76554b5a565024b8ba3a0322f77c246bc7111c5bb9c", size = 277527 }, - { url = "https://files.pythonhosted.org/packages/af/19/35c957c84ee69d904299a38bae3614f7cede45f07f174f6d5a2f4dbd6033/watchfiles-0.24.0-cp311-none-win_arm64.whl", hash = "sha256:9301c689051a4857d5b10777da23fafb8e8e921bcf3abe6448a058d27fb67633", size = 266253 }, - { url = "https://files.pythonhosted.org/packages/35/82/92a7bb6dc82d183e304a5f84ae5437b59ee72d48cee805a9adda2488b237/watchfiles-0.24.0-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:7211b463695d1e995ca3feb38b69227e46dbd03947172585ecb0588f19b0d87a", size = 374137 }, - { url = "https://files.pythonhosted.org/packages/87/91/49e9a497ddaf4da5e3802d51ed67ff33024597c28f652b8ab1e7c0f5718b/watchfiles-0.24.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:4b8693502d1967b00f2fb82fc1e744df128ba22f530e15b763c8d82baee15370", size = 367733 }, - { url = "https://files.pythonhosted.org/packages/0d/d8/90eb950ab4998effea2df4cf3a705dc594f6bc501c5a353073aa990be965/watchfiles-0.24.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cdab9555053399318b953a1fe1f586e945bc8d635ce9d05e617fd9fe3a4687d6", size = 437322 }, - { url = "https://files.pythonhosted.org/packages/6c/a2/300b22e7bc2a222dd91fce121cefa7b49aa0d26a627b2777e7bdfcf1110b/watchfiles-0.24.0-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:34e19e56d68b0dad5cff62273107cf5d9fbaf9d75c46277aa5d803b3ef8a9e9b", size = 433409 }, - { url = "https://files.pythonhosted.org/packages/99/44/27d7708a43538ed6c26708bcccdde757da8b7efb93f4871d4cc39cffa1cc/watchfiles-0.24.0-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:41face41f036fee09eba33a5b53a73e9a43d5cb2c53dad8e61fa6c9f91b5a51e", size = 452142 }, - { url = "https://files.pythonhosted.org/packages/b0/ec/c4e04f755be003129a2c5f3520d2c47026f00da5ecb9ef1e4f9449637571/watchfiles-0.24.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5148c2f1ea043db13ce9b0c28456e18ecc8f14f41325aa624314095b6aa2e9ea", size = 469414 }, - { url = "https://files.pythonhosted.org/packages/c5/4e/cdd7de3e7ac6432b0abf282ec4c1a1a2ec62dfe423cf269b86861667752d/watchfiles-0.24.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7e4bd963a935aaf40b625c2499f3f4f6bbd0c3776f6d3bc7c853d04824ff1c9f", size = 472962 }, - { url = "https://files.pythonhosted.org/packages/27/69/e1da9d34da7fc59db358424f5d89a56aaafe09f6961b64e36457a80a7194/watchfiles-0.24.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c79d7719d027b7a42817c5d96461a99b6a49979c143839fc37aa5748c322f234", size = 425705 }, - { url = "https://files.pythonhosted.org/packages/e8/c1/24d0f7357be89be4a43e0a656259676ea3d7a074901f47022f32e2957798/watchfiles-0.24.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:32aa53a9a63b7f01ed32e316e354e81e9da0e6267435c7243bf8ae0f10b428ef", size = 612851 }, - { url = "https://files.pythonhosted.org/packages/c7/af/175ba9b268dec56f821639c9893b506c69fd999fe6a2e2c51de420eb2f01/watchfiles-0.24.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:ce72dba6a20e39a0c628258b5c308779b8697f7676c254a845715e2a1039b968", size = 594868 }, - { url = "https://files.pythonhosted.org/packages/44/81/1f701323a9f70805bc81c74c990137123344a80ea23ab9504a99492907f8/watchfiles-0.24.0-cp312-none-win32.whl", hash = "sha256:d9018153cf57fc302a2a34cb7564870b859ed9a732d16b41a9b5cb2ebed2d444", size = 264109 }, - { url = "https://files.pythonhosted.org/packages/b4/0b/32cde5bc2ebd9f351be326837c61bdeb05ad652b793f25c91cac0b48a60b/watchfiles-0.24.0-cp312-none-win_amd64.whl", hash = "sha256:551ec3ee2a3ac9cbcf48a4ec76e42c2ef938a7e905a35b42a1267fa4b1645896", size = 277055 }, - { url = "https://files.pythonhosted.org/packages/4b/81/daade76ce33d21dbec7a15afd7479de8db786e5f7b7d249263b4ea174e08/watchfiles-0.24.0-cp312-none-win_arm64.whl", hash = "sha256:b52a65e4ea43c6d149c5f8ddb0bef8d4a1e779b77591a458a893eb416624a418", size = 266169 }, - { url = "https://files.pythonhosted.org/packages/df/94/1ad200e937ec91b2a9d6b39ae1cf9c2b1a9cc88d5ceb43aa5c6962eb3c11/watchfiles-0.24.0-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:632676574429bee8c26be8af52af20e0c718cc7f5f67f3fb658c71928ccd4f7f", size = 376986 }, - { url = "https://files.pythonhosted.org/packages/ee/fd/d9e020d687ccf90fe95efc513fbb39a8049cf5a3ff51f53c59fcf4c47a5d/watchfiles-0.24.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:a2a9891723a735d3e2540651184be6fd5b96880c08ffe1a98bae5017e65b544b", size = 369445 }, - { url = "https://files.pythonhosted.org/packages/43/cb/c0279b35053555d10ef03559c5aebfcb0c703d9c70a7b4e532df74b9b0e8/watchfiles-0.24.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4a7fa2bc0efef3e209a8199fd111b8969fe9db9c711acc46636686331eda7dd4", size = 439383 }, - { url = "https://files.pythonhosted.org/packages/8b/c4/08b3c2cda45db5169148a981c2100c744a4a222fa7ae7644937c0c002069/watchfiles-0.24.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:01550ccf1d0aed6ea375ef259706af76ad009ef5b0203a3a4cce0f6024f9b68a", size = 426804 }, +sdist = { url = "https://files.pythonhosted.org/packages/3c/7e/4569184ea04b501840771b8fcecee19b2233a8b72c196061263c0ef23c0b/watchfiles-1.0.3.tar.gz", hash = "sha256:f3ff7da165c99a5412fe5dd2304dd2dbaaaa5da718aad942dcb3a178eaa70c56", size = 38185 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/cd/6c/7be04641c81209ea281b83b1174aa9d5ba53bec2a896d75a6b10428b4063/watchfiles-1.0.3-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:1da46bb1eefb5a37a8fb6fd52ad5d14822d67c498d99bda8754222396164ae42", size = 395213 }, + { url = "https://files.pythonhosted.org/packages/bd/d6/99438baa225891bda882adefefc14c9023ef3cdaf9772cd47973bb566e96/watchfiles-1.0.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:2b961b86cd3973f5822826017cad7f5a75795168cb645c3a6b30c349094e02e3", size = 384755 }, + { url = "https://files.pythonhosted.org/packages/88/93/b10295ce8696e5e37f480ba4ae89e387e88ba425d72808c87d30f4cdefb1/watchfiles-1.0.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:34e87c7b3464d02af87f1059fedda5484e43b153ef519e4085fe1a03dd94801e", size = 441701 }, + { url = "https://files.pythonhosted.org/packages/c5/3a/0359b7bddb1b7cbe6fb7096805b6e2f859f0de3d6130dcab9ac635db87e2/watchfiles-1.0.3-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d9dd2b89a16cf7ab9c1170b5863e68de6bf83db51544875b25a5f05a7269e678", size = 447540 }, + { url = "https://files.pythonhosted.org/packages/e2/a7/3400b4f105c68804495b76398165ffe6c00af93eab395279285f43cd0e42/watchfiles-1.0.3-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2b4691234d31686dca133c920f94e478b548a8e7c750f28dbbc2e4333e0d3da9", size = 472467 }, + { url = "https://files.pythonhosted.org/packages/c3/1a/8f928800d038d4fdb1e9df6e0c380c8cee17e6fb180e1faceb3f94de6df7/watchfiles-1.0.3-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:90b0fe1fcea9bd6e3084b44875e179b4adcc4057a3b81402658d0eb58c98edf8", size = 494467 }, + { url = "https://files.pythonhosted.org/packages/13/70/af75edf5b763f09e31a0f19ce045f3731db22599cb521807760b7d82b196/watchfiles-1.0.3-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0b90651b4cf9e158d01faa0833b073e2e37719264bcee3eac49fc3c74e7d304b", size = 492671 }, + { url = "https://files.pythonhosted.org/packages/4a/6e/8723f4b0967cc8d94f33fc531c33d66b596090b024f449983d3a8d97cfca/watchfiles-1.0.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c2e9fe695ff151b42ab06501820f40d01310fbd58ba24da8923ace79cf6d702d", size = 443811 }, + { url = "https://files.pythonhosted.org/packages/ee/5d/f3ca68a71d978d43168a65a1b4e1f72290c5350379aa148917e4ed0b2c46/watchfiles-1.0.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:62691f1c0894b001c7cde1195c03b7801aaa794a837bd6eef24da87d1542838d", size = 615477 }, + { url = "https://files.pythonhosted.org/packages/0d/d0/3d27a26f276ef07ca4cd3c6766684444317ddd147943e00bdb157cfdf3c3/watchfiles-1.0.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:275c1b0e942d335fccb6014d79267d1b9fa45b5ac0639c297f1e856f2f532552", size = 614237 }, + { url = "https://files.pythonhosted.org/packages/97/e9/ff30b210099d75cfa407924b3c265d3054f14b83ddf02072bd637394aab6/watchfiles-1.0.3-cp310-cp310-win32.whl", hash = "sha256:06ce08549e49ba69ccc36fc5659a3d0ff4e3a07d542b895b8a9013fcab46c2dc", size = 270798 }, + { url = "https://files.pythonhosted.org/packages/ed/86/694f07eb91d3e81a359661b48ff6984543e50be767c50c08196155d417bf/watchfiles-1.0.3-cp310-cp310-win_amd64.whl", hash = "sha256:f280b02827adc9d87f764972fbeb701cf5611f80b619c20568e1982a277d6146", size = 284192 }, + { url = "https://files.pythonhosted.org/packages/24/a8/06e2d5f840b285718a09be7c71ea19b7177b005cec87b8923dd7e8541b20/watchfiles-1.0.3-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:ffe709b1d0bc2e9921257569675674cafb3a5f8af689ab9f3f2b3f88775b960f", size = 394821 }, + { url = "https://files.pythonhosted.org/packages/57/9f/f98a57ada3d4b1fcd0e325aa6c307e2248ecb048f71c96fba34a602f02e7/watchfiles-1.0.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:418c5ce332f74939ff60691e5293e27c206c8164ce2b8ce0d9abf013003fb7fe", size = 384898 }, + { url = "https://files.pythonhosted.org/packages/a3/31/33ba914010cbfd01033ca3727aff6585b6b2ea2b051b6fbaecdf4e2160b9/watchfiles-1.0.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2f492d2907263d6d0d52f897a68647195bc093dafed14508a8d6817973586b6b", size = 441710 }, + { url = "https://files.pythonhosted.org/packages/d9/dd/e56b2ef07c2c34e4152950f0ce98a1081215ef027cf39e5dab61a0f8bd95/watchfiles-1.0.3-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:48c9f3bc90c556a854f4cab6a79c16974099ccfa3e3e150673d82d47a4bc92c9", size = 447681 }, + { url = "https://files.pythonhosted.org/packages/60/8f/3837df33f3d0cbef8ae59559891d688490bf2960373ea077ff11cbf79115/watchfiles-1.0.3-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:75d3bcfa90454dba8df12adc86b13b6d85fda97d90e708efc036c2760cc6ba44", size = 472312 }, + { url = "https://files.pythonhosted.org/packages/5a/b3/95d103e5bb609b20f175e8acdf8b32c4b091f96f781c066fd3bff2b17778/watchfiles-1.0.3-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5691340f259b8f76b45fb31b98e594d46c36d1dc8285efa7975f7f50230c9093", size = 494779 }, + { url = "https://files.pythonhosted.org/packages/4f/f0/9fdc60daf5abf7b0deb225c9b0a37fd72dc407fa33f071ae2f70e84e268c/watchfiles-1.0.3-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1e263cc718545b7f897baeac1f00299ab6fabe3e18caaacacb0edf6d5f35513c", size = 492090 }, + { url = "https://files.pythonhosted.org/packages/96/e5/a9967e77f173280ab1abbfd7ead90f2b94060574968baf5e6d7cbe9dd490/watchfiles-1.0.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1c6cf7709ed3e55704cc06f6e835bf43c03bc8e3cb8ff946bf69a2e0a78d9d77", size = 443713 }, + { url = "https://files.pythonhosted.org/packages/60/38/e5390d4633a558878113e45d32e39d30cf58eb94e0359f41737be209321b/watchfiles-1.0.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:703aa5e50e465be901e0e0f9d5739add15e696d8c26c53bc6fc00eb65d7b9469", size = 615306 }, + { url = "https://files.pythonhosted.org/packages/5c/27/8a1ee74544c93e3242ca073087b45c64367aeb6897b622e43c8172c2b421/watchfiles-1.0.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:bfcae6aecd9e0cb425f5145afee871465b98b75862e038d42fe91fd753ddd780", size = 614333 }, + { url = "https://files.pythonhosted.org/packages/fc/f8/25698f5b734907662b50acf3e81996053abdfe26fcf38804d028412876a8/watchfiles-1.0.3-cp311-cp311-win32.whl", hash = "sha256:6a76494d2c5311584f22416c5a87c1e2cb954ff9b5f0988027bc4ef2a8a67181", size = 270987 }, + { url = "https://files.pythonhosted.org/packages/39/78/f600dee7b387e6088c8d1f4c898a4340d07aecfe6406bd90ec4c1925ef08/watchfiles-1.0.3-cp311-cp311-win_amd64.whl", hash = "sha256:cf745cbfad6389c0e331786e5fe9ae3f06e9d9c2ce2432378e1267954793975c", size = 284098 }, + { url = "https://files.pythonhosted.org/packages/ca/6f/27ba8aec0a4b45a6063454465eefb42777158081d9df18eab5f1d6a3bd8a/watchfiles-1.0.3-cp311-cp311-win_arm64.whl", hash = "sha256:2dcc3f60c445f8ce14156854a072ceb36b83807ed803d37fdea2a50e898635d6", size = 276804 }, + { url = "https://files.pythonhosted.org/packages/bf/a9/c8b5ab33444306e1a324cb2b51644f8458dd459e30c3841f925012893e6a/watchfiles-1.0.3-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:93436ed550e429da007fbafb723e0769f25bae178fbb287a94cb4ccdf42d3af3", size = 391395 }, + { url = "https://files.pythonhosted.org/packages/ad/d3/403af5f07359863c03951796ddab265ee8cce1a6147510203d0bf43950e7/watchfiles-1.0.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:c18f3502ad0737813c7dad70e3e1cc966cc147fbaeef47a09463bbffe70b0a00", size = 381432 }, + { url = "https://files.pythonhosted.org/packages/f6/5f/921f2f2beabaf24b1ad81ac22bb69df8dd5771fdb68d6f34a5912a420941/watchfiles-1.0.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6a5bc3ca468bb58a2ef50441f953e1f77b9a61bd1b8c347c8223403dc9b4ac9a", size = 441448 }, + { url = "https://files.pythonhosted.org/packages/63/d7/67d0d750b246f248ccdb400a85a253e93e419ea5b6cbe968fa48b97a5f30/watchfiles-1.0.3-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:0d1ec043f02ca04bf21b1b32cab155ce90c651aaf5540db8eb8ad7f7e645cba8", size = 446852 }, + { url = "https://files.pythonhosted.org/packages/53/7c/d7cd94c7d0905f1e2f1c2232ea9bc39b1a48affd007e09c547ead96edb8f/watchfiles-1.0.3-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f58d3bfafecf3d81c15d99fc0ecf4319e80ac712c77cf0ce2661c8cf8bf84066", size = 471662 }, + { url = "https://files.pythonhosted.org/packages/26/81/738f8e66f7525753996b8aa292f78dcec1ef77887d62e6cdfb04cc2f352f/watchfiles-1.0.3-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1df924ba82ae9e77340101c28d56cbaff2c991bd6fe8444a545d24075abb0a87", size = 493765 }, + { url = "https://files.pythonhosted.org/packages/d2/50/78e21f5da24ab39114e9b24f7b0945ea1c6fc7bc9ae86cd87f8eaeb47325/watchfiles-1.0.3-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:632a52dcaee44792d0965c17bdfe5dc0edad5b86d6a29e53d6ad4bf92dc0ff49", size = 490558 }, + { url = "https://files.pythonhosted.org/packages/a8/93/1873fea6354b2858eae8970991d64e9a449d87726d596490d46bf00af8ed/watchfiles-1.0.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:80bf4b459d94a0387617a1b499f314aa04d8a64b7a0747d15d425b8c8b151da0", size = 442808 }, + { url = "https://files.pythonhosted.org/packages/4f/b4/2fc4c92fb28b029f66d04a4d430fe929284e9ff717b04bb7a3bb8a7a5605/watchfiles-1.0.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:ca94c85911601b097d53caeeec30201736ad69a93f30d15672b967558df02885", size = 615287 }, + { url = "https://files.pythonhosted.org/packages/1e/d4/93da24db39257e440240d338b617c5153ad11d361c34108f5c0e1e0743eb/watchfiles-1.0.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:65ab1fb635476f6170b07e8e21db0424de94877e4b76b7feabfe11f9a5fc12b5", size = 612812 }, + { url = "https://files.pythonhosted.org/packages/c6/67/9fd3661c2dc0309abd6021876653d91e8b64fb279529e2cadaa3520ef3e3/watchfiles-1.0.3-cp312-cp312-win32.whl", hash = "sha256:49bc1bc26abf4f32e132652f4b3bfeec77d8f8f62f57652703ef127e85a3e38d", size = 271642 }, + { url = "https://files.pythonhosted.org/packages/ae/aa/8c887edb78cd67f5d4d6a35c3aeb46d748643ebf962163130fb1871e2ee0/watchfiles-1.0.3-cp312-cp312-win_amd64.whl", hash = "sha256:48681c86f2cb08348631fed788a116c89c787fdf1e6381c5febafd782f6c3b44", size = 285505 }, + { url = "https://files.pythonhosted.org/packages/7b/31/d212fa6390f0e73a91913ada0b925b294a78d67794795371208baf73f0b5/watchfiles-1.0.3-cp312-cp312-win_arm64.whl", hash = "sha256:9e080cf917b35b20c889225a13f290f2716748362f6071b859b60b8847a6aa43", size = 277263 }, + { url = "https://files.pythonhosted.org/packages/26/48/5a75b18ad40cc69ea6e0003bb748db162a3215bbe44a1293e073876d51bd/watchfiles-1.0.3-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:84fac88278f42d61c519a6c75fb5296fd56710b05bbdcc74bdf85db409a03780", size = 396233 }, + { url = "https://files.pythonhosted.org/packages/dc/b2/03ce3447a3271483b030b8bafc39be19739f9a4a23edec31c6688e8a066d/watchfiles-1.0.3-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:c68be72b1666d93b266714f2d4092d78dc53bd11cf91ed5a3c16527587a52e29", size = 386050 }, + { url = "https://files.pythonhosted.org/packages/ab/0c/38914f56a95aa6ec911bb7cee617762d93aaf5a11efecadbb698d6b0b9a2/watchfiles-1.0.3-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:889a37e2acf43c377b5124166bece139b4c731b61492ab22e64d371cce0e6e80", size = 442404 }, + { url = "https://files.pythonhosted.org/packages/4d/8c/a95d3ba1ccfa33a43649668f699150cce1ea795e4300c33b4c3e974a444b/watchfiles-1.0.3-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7ca05cacf2e5c4a97d02a2878a24020daca21dbb8823b023b978210a75c79098", size = 444461 }, ] [[package]] From 7ee18c28373041f43e37fd96c54ab845ceba9595 Mon Sep 17 00:00:00 2001 From: Jack Gerrits Date: Tue, 7 Jan 2025 10:41:48 -0500 Subject: [PATCH 05/61] Add setup dotnet to codeql CI (#4916) Add setup dotnet to codeql CI: --- .github/workflows/codeql.yml | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index 6c2ca98ccedb..f2c3efb9bdd2 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -100,6 +100,11 @@ jobs: # to build your code. # ā„¹ļø Command-line programs to run using the OS shell. # šŸ“š See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun + - if: matrix.build-mode == 'manual' + name: Setup .NET 8.0 + uses: actions/setup-dotnet@v4 + with: + dotnet-version: '8.0.x' - if: matrix.build-mode == 'manual' shell: bash working-directory: dotnet From bdfdc6ab5e0210eecbd36e37faf872d638222249 Mon Sep 17 00:00:00 2001 From: Griffin Bassman Date: Tue, 7 Jan 2025 10:48:37 -0500 Subject: [PATCH 06/61] fix: poe check errors to pass (#4917) Co-authored-by: Jack Gerrits --- python/packages/autogen-core/tests/test_types.py | 6 +++--- .../src/autogen_test_utils/telemetry_test_utils.py | 1 - 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/python/packages/autogen-core/tests/test_types.py b/python/packages/autogen-core/tests/test_types.py index 16697e006e6e..afc64484f7f9 100644 --- a/python/packages/autogen-core/tests/test_types.py +++ b/python/packages/autogen-core/tests/test_types.py @@ -57,15 +57,15 @@ class NestedBaseModelList: @dataclass class NestedBaseModelList2: - nested: list[MyBaseModel] + nested: List[MyBaseModel] @dataclass class NestedBaseModelList3: - nested: list[list[MyBaseModel]] + nested: List[List[MyBaseModel]] @dataclass class NestedBaseModelList4: - nested: list[list[list[list[list[list[MyBaseModel]]]]]] + nested: List[List[List[List[List[List[MyBaseModel]]]]]] @dataclass class NestedBaseModelUnion: diff --git a/python/packages/autogen-test-utils/src/autogen_test_utils/telemetry_test_utils.py b/python/packages/autogen-test-utils/src/autogen_test_utils/telemetry_test_utils.py index 00a13a6eabd7..04ff69428653 100644 --- a/python/packages/autogen-test-utils/src/autogen_test_utils/telemetry_test_utils.py +++ b/python/packages/autogen-test-utils/src/autogen_test_utils/telemetry_test_utils.py @@ -1,6 +1,5 @@ from typing import List, Sequence -import pytest from opentelemetry.sdk.trace import ReadableSpan, TracerProvider from opentelemetry.sdk.trace.export import SimpleSpanProcessor, SpanExporter, SpanExportResult From 310564908b9a6fd1675ea12770872ec9bbcaa3e0 Mon Sep 17 00:00:00 2001 From: Jack Gerrits Date: Tue, 7 Jan 2025 12:21:50 -0500 Subject: [PATCH 07/61] fix!: Move azure auth provider to separate module (#4912) * Move azure auth provider to separate module * Update lock * fix component gen --- python/packages/autogen-ext/pyproject.toml | 2 +- .../_azure_token_provider.py => auth/azure/__init__.py} | 5 +++-- .../autogen-ext/src/autogen_ext/models/openai/__init__.py | 2 -- .../src/autogen_ext/models/openai/_openai_client.py | 7 ++++--- .../src/autogen_ext/models/openai/config/__init__.py | 4 +--- .../src/component_schema_gen/__main__.py | 3 ++- python/uv.lock | 4 ---- 7 files changed, 11 insertions(+), 16 deletions(-) rename python/packages/autogen-ext/src/autogen_ext/{models/openai/_azure_token_provider.py => auth/azure/__init__.py} (99%) diff --git a/python/packages/autogen-ext/pyproject.toml b/python/packages/autogen-ext/pyproject.toml index 156d3ff79ddd..a4e7d0d9f1e2 100644 --- a/python/packages/autogen-ext/pyproject.toml +++ b/python/packages/autogen-ext/pyproject.toml @@ -25,7 +25,7 @@ m1 = "autogen_ext.teams.magentic_one_cli:main" langchain = ["langchain_core~= 0.3.3"] azure = ["azure-core", "azure-identity"] docker = ["docker~=7.0"] -openai = ["openai>=1.52.2", "aiofiles", "azure-core", "azure-identity"] +openai = ["openai>=1.52.2", "aiofiles"] file-surfer = [ "autogen-agentchat==0.4.0.dev13", "markitdown>=0.0.1a2", diff --git a/python/packages/autogen-ext/src/autogen_ext/models/openai/_azure_token_provider.py b/python/packages/autogen-ext/src/autogen_ext/auth/azure/__init__.py similarity index 99% rename from python/packages/autogen-ext/src/autogen_ext/models/openai/_azure_token_provider.py rename to python/packages/autogen-ext/src/autogen_ext/auth/azure/__init__.py index 2201c921751c..607a3e01c3a4 100644 --- a/python/packages/autogen-ext/src/autogen_ext/models/openai/_azure_token_provider.py +++ b/python/packages/autogen-ext/src/autogen_ext/auth/azure/__init__.py @@ -1,11 +1,12 @@ from typing import List from autogen_core import Component -from azure.core.credentials import TokenProvider -from azure.identity import DefaultAzureCredential, get_bearer_token_provider from pydantic import BaseModel from typing_extensions import Self +from azure.core.credentials import TokenProvider +from azure.identity import DefaultAzureCredential, get_bearer_token_provider + class TokenProviderConfig(BaseModel): provider_kind: str diff --git a/python/packages/autogen-ext/src/autogen_ext/models/openai/__init__.py b/python/packages/autogen-ext/src/autogen_ext/models/openai/__init__.py index 47586d16c373..bad5690e3cd9 100644 --- a/python/packages/autogen-ext/src/autogen_ext/models/openai/__init__.py +++ b/python/packages/autogen-ext/src/autogen_ext/models/openai/__init__.py @@ -1,4 +1,3 @@ -from ._azure_token_provider import AzureTokenProvider from ._openai_client import ( AzureOpenAIChatCompletionClient, OpenAIChatCompletionClient, @@ -10,5 +9,4 @@ "AzureOpenAIChatCompletionClient", "OpenAIClientConfiguration", "OpenAIChatCompletionClient", - "AzureTokenProvider", ] diff --git a/python/packages/autogen-ext/src/autogen_ext/models/openai/_openai_client.py b/python/packages/autogen-ext/src/autogen_ext/models/openai/_openai_client.py index 5c55dbffcb33..3fb8710b6ac7 100644 --- a/python/packages/autogen-ext/src/autogen_ext/models/openai/_openai_client.py +++ b/python/packages/autogen-ext/src/autogen_ext/models/openai/_openai_client.py @@ -69,8 +69,6 @@ from pydantic import BaseModel from typing_extensions import Self, Unpack -from autogen_ext.models.openai._azure_token_provider import AzureTokenProvider - from . import _model_info from .config import ( AzureOpenAIClientConfiguration, @@ -1076,8 +1074,9 @@ def __setstate__(self, state: Dict[str, Any]) -> None: self._client = _azure_openai_client_from_config(state["_raw_config"]) def _to_config(self) -> AzureOpenAIClientConfigurationConfigModel: - copied_config = self._raw_config.copy() + from ...auth.azure import AzureTokenProvider + copied_config = self._raw_config.copy() if "azure_ad_token_provider" in copied_config: if not isinstance(copied_config["azure_ad_token_provider"], AzureTokenProvider): raise ValueError("azure_ad_token_provider must be a AzureTokenProvider to be component serialized") @@ -1090,6 +1089,8 @@ def _to_config(self) -> AzureOpenAIClientConfigurationConfigModel: @classmethod def _from_config(cls, config: AzureOpenAIClientConfigurationConfigModel) -> Self: + from ...auth.azure import AzureTokenProvider + copied_config = config.model_copy().model_dump(exclude_none=True) if "azure_ad_token_provider" in copied_config: copied_config["azure_ad_token_provider"] = AzureTokenProvider.load_component( diff --git a/python/packages/autogen-ext/src/autogen_ext/models/openai/config/__init__.py b/python/packages/autogen-ext/src/autogen_ext/models/openai/config/__init__.py index b98158504a8d..482af3a6fa69 100644 --- a/python/packages/autogen-ext/src/autogen_ext/models/openai/config/__init__.py +++ b/python/packages/autogen-ext/src/autogen_ext/models/openai/config/__init__.py @@ -5,8 +5,6 @@ from pydantic import BaseModel from typing_extensions import Required, TypedDict -from .._azure_token_provider import AzureTokenProvider - class ResponseFormat(TypedDict): type: Literal["text", "json_object"] @@ -51,7 +49,7 @@ class AzureOpenAIClientConfiguration(BaseOpenAIClientConfiguration, total=False) azure_deployment: str api_version: Required[str] azure_ad_token: str - azure_ad_token_provider: AsyncAzureADTokenProvider | AzureTokenProvider + azure_ad_token_provider: AsyncAzureADTokenProvider # Or AzureTokenProvider __all__ = [ diff --git a/python/packages/component-schema-gen/src/component_schema_gen/__main__.py b/python/packages/component-schema-gen/src/component_schema_gen/__main__.py index a61e7f38635b..bf0a21f1f141 100644 --- a/python/packages/component-schema-gen/src/component_schema_gen/__main__.py +++ b/python/packages/component-schema-gen/src/component_schema_gen/__main__.py @@ -8,7 +8,8 @@ ComponentConfigImpl, _type_to_provider_str, # type: ignore ) -from autogen_ext.models.openai import AzureOpenAIChatCompletionClient, AzureTokenProvider, OpenAIChatCompletionClient +from autogen_ext.auth.azure import AzureTokenProvider +from autogen_ext.models.openai import AzureOpenAIChatCompletionClient, OpenAIChatCompletionClient from pydantic import BaseModel all_defs: Dict[str, Any] = {} diff --git a/python/uv.lock b/python/uv.lock index 5df8962713cb..1e4c1c576036 100644 --- a/python/uv.lock +++ b/python/uv.lock @@ -497,8 +497,6 @@ magentic-one = [ ] openai = [ { name = "aiofiles" }, - { name = "azure-core" }, - { name = "azure-identity" }, { name = "openai" }, ] video-surfer = [ @@ -529,9 +527,7 @@ requires-dist = [ { name = "autogen-agentchat", marker = "extra == 'web-surfer'", editable = "packages/autogen-agentchat" }, { name = "autogen-core", editable = "packages/autogen-core" }, { name = "azure-core", marker = "extra == 'azure'" }, - { name = "azure-core", marker = "extra == 'openai'" }, { name = "azure-identity", marker = "extra == 'azure'" }, - { name = "azure-identity", marker = "extra == 'openai'" }, { name = "docker", marker = "extra == 'docker'", specifier = "~=7.0" }, { name = "ffmpeg-python", marker = "extra == 'video-surfer'" }, { name = "grpcio", marker = "extra == 'grpc'", specifier = "~=1.62.0" }, From f4382f01c82a3b9e3359d88d74379c3ce6478e0d Mon Sep 17 00:00:00 2001 From: Jack Gerrits Date: Tue, 7 Jan 2025 12:32:29 -0500 Subject: [PATCH 08/61] Log messages and response for LLMCall event (#4910) * Log messages and response for LLMCall event * Remove accidental change * newline --- .../autogen-core/src/autogen_core/logging.py | 20 +++++++++++++-- .../models/openai/_openai_client.py | 25 +++++++++++++------ 2 files changed, 35 insertions(+), 10 deletions(-) diff --git a/python/packages/autogen-core/src/autogen_core/logging.py b/python/packages/autogen-core/src/autogen_core/logging.py index 11bb46c04d6e..057458e18c44 100644 --- a/python/packages/autogen-core/src/autogen_core/logging.py +++ b/python/packages/autogen-core/src/autogen_core/logging.py @@ -1,18 +1,30 @@ import json from enum import Enum -from typing import Any, cast +from typing import Any, Dict, cast from ._agent_id import AgentId from ._topic import TopicId class LLMCallEvent: - def __init__(self, *, prompt_tokens: int, completion_tokens: int, **kwargs: Any) -> None: + def __init__( + self, + *, + messages: Dict[str, Any], + response: Dict[str, Any], + prompt_tokens: int, + completion_tokens: int, + agent_id: AgentId | None = None, + **kwargs: Any, + ) -> None: """To be used by model clients to log the call to the LLM. Args: + messages (Dict[str, Any]): The messages of the call. Must be json serializable. + response (Dict[str, Any]): The response of the call. Must be json serializable. prompt_tokens (int): Number of tokens used in the prompt. completion_tokens (int): Number of tokens used in the completion. + agent_id (AgentId | None, optional): The agent id of the model. Defaults to None. Example: @@ -26,8 +38,12 @@ def __init__(self, *, prompt_tokens: int, completion_tokens: int, **kwargs: Any) """ self.kwargs = kwargs + self.kwargs["type"] = "LLMCall" + self.kwargs["messages"] = messages + self.kwargs["response"] = response self.kwargs["prompt_tokens"] = prompt_tokens self.kwargs["completion_tokens"] = completion_tokens + self.kwargs["agent_id"] = None if agent_id is None else str(agent_id) self.kwargs["type"] = "LLMCall" @property diff --git a/python/packages/autogen-ext/src/autogen_ext/models/openai/_openai_client.py b/python/packages/autogen-ext/src/autogen_ext/models/openai/_openai_client.py index 3fb8710b6ac7..e3117a5d9e84 100644 --- a/python/packages/autogen-ext/src/autogen_ext/models/openai/_openai_client.py +++ b/python/packages/autogen-ext/src/autogen_ext/models/openai/_openai_client.py @@ -28,6 +28,7 @@ Component, FunctionCall, Image, + MessageHandlerContext, ) from autogen_core.logging import LLMCallEvent from autogen_core.models import ( @@ -493,20 +494,28 @@ async def create( if use_beta_client: result = cast(ParsedChatCompletion[Any], result) - if result.usage is not None: - logger.info( - LLMCallEvent( - prompt_tokens=result.usage.prompt_tokens, - completion_tokens=result.usage.completion_tokens, - ) - ) - usage = RequestUsage( # TODO backup token counting prompt_tokens=result.usage.prompt_tokens if result.usage is not None else 0, completion_tokens=(result.usage.completion_tokens if result.usage is not None else 0), ) + # If we are running in the context of a handler we can get the agent_id + try: + agent_id = MessageHandlerContext.agent_id() + except RuntimeError: + agent_id = None + + logger.info( + LLMCallEvent( + messages=cast(Dict[str, Any], oai_messages), + response=result.model_dump(), + prompt_tokens=usage.prompt_tokens, + completion_tokens=usage.completion_tokens, + agent_id=agent_id, + ) + ) + if self._resolved_model is not None: if self._resolved_model != result.model: warnings.warn( From 5b9be79fba6f3c4c3f07dbd35c2c30e338244a1b Mon Sep 17 00:00:00 2001 From: Jack Gerrits Date: Tue, 7 Jan 2025 12:51:35 -0500 Subject: [PATCH 09/61] feat!: Add message context to signature of intervention handler, add more to docs (#4882) * Add message context to signature of intervention handler, add more to docs * example * Add to test * Fix pyright * mypy --- .../termination-with-intervention.ipynb | 5 +- .../cookbook/tool-use-with-intervention.ipynb | 4 +- .../samples/slow_human_in_loop.py | 5 +- .../src/autogen_core/_intervention.py | 58 ++++++++++++++++--- .../_single_threaded_agent_runtime.py | 20 ++++++- .../autogen-core/tests/test_intervention.py | 46 ++++++++++++--- 6 files changed, 113 insertions(+), 25 deletions(-) diff --git a/python/packages/autogen-core/docs/src/user-guide/core-user-guide/cookbook/termination-with-intervention.ipynb b/python/packages/autogen-core/docs/src/user-guide/core-user-guide/cookbook/termination-with-intervention.ipynb index 4bed96d6b178..554dbf0bfef8 100644 --- a/python/packages/autogen-core/docs/src/user-guide/core-user-guide/cookbook/termination-with-intervention.ipynb +++ b/python/packages/autogen-core/docs/src/user-guide/core-user-guide/cookbook/termination-with-intervention.ipynb @@ -23,7 +23,6 @@ "from typing import Any\n", "\n", "from autogen_core import (\n", - " AgentId,\n", " DefaultInterventionHandler,\n", " DefaultTopicId,\n", " MessageContext,\n", @@ -100,7 +99,7 @@ " def __init__(self) -> None:\n", " self._termination_value: Termination | None = None\n", "\n", - " async def on_publish(self, message: Any, *, sender: AgentId | None) -> Any:\n", + " async def on_publish(self, message: Any, *, message_context: MessageContext) -> Any:\n", " if isinstance(message, Termination):\n", " self._termination_value = message\n", " return message\n", @@ -171,7 +170,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.11.9" + "version": "3.12.5" } }, "nbformat": 4, diff --git a/python/packages/autogen-core/docs/src/user-guide/core-user-guide/cookbook/tool-use-with-intervention.ipynb b/python/packages/autogen-core/docs/src/user-guide/core-user-guide/cookbook/tool-use-with-intervention.ipynb index 37def894b190..2006b2a86656 100644 --- a/python/packages/autogen-core/docs/src/user-guide/core-user-guide/cookbook/tool-use-with-intervention.ipynb +++ b/python/packages/autogen-core/docs/src/user-guide/core-user-guide/cookbook/tool-use-with-intervention.ipynb @@ -131,7 +131,9 @@ "outputs": [], "source": [ "class ToolInterventionHandler(DefaultInterventionHandler):\n", - " async def on_send(self, message: Any, *, sender: AgentId | None, recipient: AgentId) -> Any | type[DropMessage]:\n", + " async def on_send(\n", + " self, message: Any, *, message_context: MessageContext, recipient: AgentId\n", + " ) -> Any | type[DropMessage]:\n", " if isinstance(message, FunctionCall):\n", " # Request user prompt for tool execution.\n", " user_input = input(\n", diff --git a/python/packages/autogen-core/samples/slow_human_in_loop.py b/python/packages/autogen-core/samples/slow_human_in_loop.py index eb4e627bbf2f..8762b588a3bf 100644 --- a/python/packages/autogen-core/samples/slow_human_in_loop.py +++ b/python/packages/autogen-core/samples/slow_human_in_loop.py @@ -31,7 +31,6 @@ from typing import Any, Mapping, Optional from autogen_core import ( - AgentId, CancellationToken, DefaultInterventionHandler, DefaultTopicId, @@ -211,7 +210,7 @@ class NeedsUserInputHandler(DefaultInterventionHandler): def __init__(self): self.question_for_user: GetSlowUserMessage | None = None - async def on_publish(self, message: Any, *, sender: AgentId | None) -> Any: + async def on_publish(self, message: Any, *, message_context: MessageContext) -> Any: if isinstance(message, GetSlowUserMessage): self.question_for_user = message return message @@ -231,7 +230,7 @@ class TerminationHandler(DefaultInterventionHandler): def __init__(self): self.terminateMessage: TerminateMessage | None = None - async def on_publish(self, message: Any, *, sender: AgentId | None) -> Any: + async def on_publish(self, message: Any, *, message_context: MessageContext) -> Any: if isinstance(message, TerminateMessage): self.terminateMessage = message return message diff --git a/python/packages/autogen-core/src/autogen_core/_intervention.py b/python/packages/autogen-core/src/autogen_core/_intervention.py index 649026fea2d3..752c831b8583 100644 --- a/python/packages/autogen-core/src/autogen_core/_intervention.py +++ b/python/packages/autogen-core/src/autogen_core/_intervention.py @@ -1,6 +1,7 @@ from typing import Any, Protocol, final from ._agent_id import AgentId +from ._message_context import MessageContext __all__ = [ "DropMessage", @@ -10,20 +11,59 @@ @final -class DropMessage: ... +class DropMessage: + """Marker type for signalling that a message should be dropped by an intervention handler. The type itself should be returned from the handler.""" + + ... class InterventionHandler(Protocol): """An intervention handler is a class that can be used to modify, log or drop messages that are being processed by the :class:`autogen_core.base.AgentRuntime`. + The handler is called when the message is submitted to the runtime. + + Currently the only runtime which supports this is the :class:`autogen_core.base.SingleThreadedAgentRuntime`. + Note: Returning None from any of the intervention handler methods will result in a warning being issued and treated as "no change". If you intend to drop a message, you should return :class:`DropMessage` explicitly. + + Example: + + .. code-block:: python + + from autogen_core import DefaultInterventionHandler, MessageContext, AgentId, SingleThreadedAgentRuntime + from dataclasses import dataclass + from typing import Any + + + @dataclass + class MyMessage: + content: str + + + class MyInterventionHandler(DefaultInterventionHandler): + async def on_send(self, message: Any, *, message_context: MessageContext, recipient: AgentId) -> MyMessage: + if isinstance(message, MyMessage): + message.content = message.content.upper() + return message + + + runtime = SingleThreadedAgentRuntime(intervention_handlers=[MyInterventionHandler()]) + """ - async def on_send(self, message: Any, *, sender: AgentId | None, recipient: AgentId) -> Any | type[DropMessage]: ... - async def on_publish(self, message: Any, *, sender: AgentId | None) -> Any | type[DropMessage]: ... - async def on_response( - self, message: Any, *, sender: AgentId, recipient: AgentId | None - ) -> Any | type[DropMessage]: ... + async def on_send( + self, message: Any, *, message_context: MessageContext, recipient: AgentId + ) -> Any | type[DropMessage]: + """Called when a message is submitted to the AgentRuntime using :meth:`autogen_core.base.AgentRuntime.send_message`.""" + ... + + async def on_publish(self, message: Any, *, message_context: MessageContext) -> Any | type[DropMessage]: + """Called when a message is published to the AgentRuntime using :meth:`autogen_core.base.AgentRuntime.publish_message`.""" + ... + + async def on_response(self, message: Any, *, sender: AgentId, recipient: AgentId | None) -> Any | type[DropMessage]: + """Called when a response is received by the AgentRuntime from an Agent's message handler returning a value.""" + ... class DefaultInterventionHandler(InterventionHandler): @@ -31,10 +71,12 @@ class DefaultInterventionHandler(InterventionHandler): handler methods, that simply returns the message unchanged. Allows for easy subclassing to override only the desired methods.""" - async def on_send(self, message: Any, *, sender: AgentId | None, recipient: AgentId) -> Any | type[DropMessage]: + async def on_send( + self, message: Any, *, message_context: MessageContext, recipient: AgentId + ) -> Any | type[DropMessage]: return message - async def on_publish(self, message: Any, *, sender: AgentId | None) -> Any | type[DropMessage]: + async def on_publish(self, message: Any, *, message_context: MessageContext) -> Any | type[DropMessage]: return message async def on_response(self, message: Any, *, sender: AgentId, recipient: AgentId | None) -> Any | type[DropMessage]: diff --git a/python/packages/autogen-core/src/autogen_core/_single_threaded_agent_runtime.py b/python/packages/autogen-core/src/autogen_core/_single_threaded_agent_runtime.py index 9c292b9f2387..f8f3669213e4 100644 --- a/python/packages/autogen-core/src/autogen_core/_single_threaded_agent_runtime.py +++ b/python/packages/autogen-core/src/autogen_core/_single_threaded_agent_runtime.py @@ -474,7 +474,16 @@ async def _process_next(self) -> None: "intercept", handler.__class__.__name__, parent=message_envelope.metadata ): try: - temp_message = await handler.on_send(message, sender=sender, recipient=recipient) + message_context = MessageContext( + sender=sender, + topic_id=None, + is_rpc=True, + cancellation_token=message_envelope.cancellation_token, + message_id=message_envelope.message_id, + ) + temp_message = await handler.on_send( + message, message_context=message_context, recipient=recipient + ) _warn_if_none(temp_message, "on_send") except BaseException as e: future.set_exception(e) @@ -506,7 +515,14 @@ async def _process_next(self) -> None: "intercept", handler.__class__.__name__, parent=message_envelope.metadata ): try: - temp_message = await handler.on_publish(message, sender=sender) + message_context = MessageContext( + sender=sender, + topic_id=topic_id, + is_rpc=False, + cancellation_token=message_envelope.cancellation_token, + message_id=message_envelope.message_id, + ) + temp_message = await handler.on_publish(message, message_context=message_context) _warn_if_none(temp_message, "on_publish") except BaseException as e: # TODO: we should raise the intervention exception to the publisher. diff --git a/python/packages/autogen-core/tests/test_intervention.py b/python/packages/autogen-core/tests/test_intervention.py index ef6ee4ebfb70..fdd5654fff11 100644 --- a/python/packages/autogen-core/tests/test_intervention.py +++ b/python/packages/autogen-core/tests/test_intervention.py @@ -1,5 +1,15 @@ +from typing import Any + import pytest -from autogen_core import AgentId, DefaultInterventionHandler, DropMessage, SingleThreadedAgentRuntime +from autogen_core import ( + AgentId, + DefaultInterventionHandler, + DefaultSubscription, + DefaultTopicId, + DropMessage, + MessageContext, + SingleThreadedAgentRuntime, +) from autogen_core.exceptions import MessageDroppedException from autogen_test_utils import LoopbackAgent, MessageType @@ -8,10 +18,20 @@ async def test_intervention_count_messages() -> None: class DebugInterventionHandler(DefaultInterventionHandler): def __init__(self) -> None: - self.num_messages = 0 + self.num_send_messages = 0 + self.num_publish_messages = 0 + self.num_response_messages = 0 + + async def on_send(self, message: Any, *, message_context: MessageContext, recipient: AgentId) -> Any: + self.num_send_messages += 1 + return message + + async def on_publish(self, message: Any, *, message_context: MessageContext) -> Any: + self.num_publish_messages += 1 + return message - async def on_send(self, message: MessageType, *, sender: AgentId | None, recipient: AgentId) -> MessageType: - self.num_messages += 1 + async def on_response(self, message: Any, *, sender: AgentId, recipient: AgentId | None) -> Any: + self.num_response_messages += 1 return message handler = DebugInterventionHandler() @@ -22,18 +42,28 @@ async def on_send(self, message: MessageType, *, sender: AgentId | None, recipie _response = await runtime.send_message(MessageType(), recipient=loopback) - await runtime.stop() + await runtime.stop_when_idle() - assert handler.num_messages == 1 + assert handler.num_send_messages == 1 + assert handler.num_response_messages == 1 loopback_agent = await runtime.try_get_underlying_agent_instance(loopback, type=LoopbackAgent) assert loopback_agent.num_calls == 1 + runtime.start() + await runtime.add_subscription(DefaultSubscription(agent_type="name")) + + await runtime.publish_message(MessageType(), topic_id=DefaultTopicId()) + + await runtime.stop_when_idle() + assert loopback_agent.num_calls == 2 + assert handler.num_publish_messages == 1 + @pytest.mark.asyncio async def test_intervention_drop_send() -> None: class DropSendInterventionHandler(DefaultInterventionHandler): async def on_send( - self, message: MessageType, *, sender: AgentId | None, recipient: AgentId + self, message: MessageType, *, message_context: MessageContext, recipient: AgentId ) -> MessageType | type[DropMessage]: return DropMessage @@ -81,7 +111,7 @@ class InterventionException(Exception): class ExceptionInterventionHandler(DefaultInterventionHandler): # type: ignore async def on_send( - self, message: MessageType, *, sender: AgentId | None, recipient: AgentId + self, message: MessageType, *, message_context: MessageContext, recipient: AgentId ) -> MessageType | type[DropMessage]: # type: ignore raise InterventionException From 725d573d5b01efe430e51b6d9a7e7f901b895de3 Mon Sep 17 00:00:00 2001 From: Eric Zhu Date: Tue, 7 Jan 2025 09:57:23 -0800 Subject: [PATCH 10/61] Update tutorial content; move selector group chat and swarm outside of tutorial. (#4915) * Update tutorial content; move selector group chat and swarm outside of tutorial. * Add redirect --------- Co-authored-by: Jack Gerrits --- python/packages/autogen-core/docs/src/conf.py | 6 +++ .../user-guide/agentchat-user-guide/index.md | 37 ++++++++++---- .../agentchat-user-guide/migration-guide.md | 7 ++- .../{tutorial => }/selector-group-chat.ipynb | 4 +- .../{tutorial => }/selector-group-chat.svg | 0 .../{tutorial => }/swarm.ipynb | 2 +- .../{tutorial => }/swarm_customer_support.svg | 0 .../{tutorial => }/swarm_stock_research.svg | 0 .../tutorial/agents.ipynb | 50 ++++--------------- .../agentchat-user-guide/tutorial/teams.ipynb | 12 ++++- python/packages/autogen-core/pyproject.toml | 1 + python/uv.lock | 14 ++++++ 12 files changed, 75 insertions(+), 58 deletions(-) rename python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/{tutorial => }/selector-group-chat.ipynb (99%) rename python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/{tutorial => }/selector-group-chat.svg (100%) rename python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/{tutorial => }/swarm.ipynb (99%) rename python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/{tutorial => }/swarm_customer_support.svg (100%) rename python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/{tutorial => }/swarm_stock_research.svg (100%) diff --git a/python/packages/autogen-core/docs/src/conf.py b/python/packages/autogen-core/docs/src/conf.py index ce18be3febae..691a528e8a42 100644 --- a/python/packages/autogen-core/docs/src/conf.py +++ b/python/packages/autogen-core/docs/src/conf.py @@ -33,6 +33,7 @@ "sphinx.ext.viewcode", "sphinx.ext.intersphinx", "sphinx.ext.graphviz", + "sphinxext.rediraffe", "sphinx_design", "sphinx_copybutton", "_extension.gallery_directive", @@ -165,6 +166,11 @@ ('code_lint', 'text/plain', 100) ] +rediraffe_redirects = { + "user-guide/agentchat-user-guide/tutorial/selector-group-chat.ipynb": "user-guide/agentchat-user-guide/selector-group-chat.ipynb", + "user-guide/agentchat-user-guide/tutorial/swarm.ipynb": "user-guide/agentchat-user-guide/swarm.ipynb", +} + def setup_to_main( app: Sphinx, pagename: str, templatename: str, context, doctree diff --git a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/index.md b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/index.md index 0bab2460a3b1..46f526c6c3aa 100644 --- a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/index.md +++ b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/index.md @@ -31,16 +31,28 @@ How to install AgentChat Build your first agent ::: -:::{grid-item-card} {fas}`book;pst-color-primary` Magentic-One -:link: ./magentic-one.html +:::{grid-item-card} {fas}`graduation-cap;pst-color-primary` Tutorial +:link: ./tutorial/models.html -Get started with Magentic-One +Step-by-step guide to using AgentChat, learn about agents, teams, and more ::: -:::{grid-item-card} {fas}`graduation-cap;pst-color-primary` Tutorial -:link: ./tutorial/models.html +:::{grid-item-card} {fas}`book;pst-color-primary` Selector Group Chat +:link: ./selector-group-chat.html + +Multi-agent coordination through a shared context and centralized, customizable selector +::: + +:::{grid-item-card} {fas}`book;pst-color-primary` Swarm +:link: ./swarm.html + +Multi-agent coordination through a shared context and localized, tool-based selector +::: -Step-by-step guide to using AgentChat +:::{grid-item-card} {fas}`book;pst-color-primary` Magentic-One +:link: ./magentic-one.html + +Get started with Magentic-One ::: :::{grid-item-card} {fas}`code;pst-color-primary` Examples @@ -62,7 +74,6 @@ How to migrate from AutoGen 0.2.x to 0.4.x. installation quickstart -magentic-one migration-guide ``` @@ -76,13 +87,21 @@ tutorial/messages tutorial/agents tutorial/teams tutorial/human-in-the-loop -tutorial/selector-group-chat -tutorial/swarm tutorial/termination tutorial/custom-agents tutorial/state ``` +```{toctree} +:maxdepth: 1 +:hidden: +:caption: Advanced + +selector-group-chat +swarm +magentic-one +``` + ```{toctree} :maxdepth: 1 :hidden: diff --git a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/migration-guide.md b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/migration-guide.md index 9360daaff0ef..a0beff3e2f8c 100644 --- a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/migration-guide.md +++ b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/migration-guide.md @@ -600,7 +600,7 @@ for more details. In `v0.4`, you get a {py:class}`~autogen_agentchat.base.TaskResult` object from a `run` or `run_stream` method. The {py:class}`~autogen_agentchat.base.TaskResult` object contains the `messages` which is the message history of the chat, including both agents' private (tool calls, etc.) and public messages. - + There are some notable differences between {py:class}`~autogen_agentchat.base.TaskResult` and `ChatResult`: - The `messages` list in {py:class}`~autogen_agentchat.base.TaskResult` uses different message format than the `ChatResult.chat_history` list. @@ -610,7 +610,6 @@ There are some notable differences between {py:class}`~autogen_agentchat.base.Ta ## Conversion between v0.2 and v0.4 Messages - You can use the following conversion functions to convert between a v0.4 message in {py:attr}`autogen_agentchat.base.TaskResult.messages` and a v0.2 message in `ChatResult.chat_history`. @@ -809,7 +808,7 @@ asyncio.run(main()) ``` For LLM-based speaker selection, you can use the {py:class}`~autogen_agentchat.teams.SelectorGroupChat` instead. -See [Selector Group Chat Tutorial](./tutorial/selector-group-chat.ipynb) +See [Selector Group Chat Tutorial](./selector-group-chat.ipynb) and {py:class}`~autogen_agentchat.teams.SelectorGroupChat` for more details. > **Note**: In `v0.4`, you do not need to register functions on a user proxy to use tools @@ -912,7 +911,7 @@ as the tools are directly executed within the {py:class}`~autogen_agentchat.agen which publishes the response from the tool to the group chat. So the group chat manager does not need to be involved in routing tool calls. -See [Selector Group Chat Tutorial](./tutorial/selector-group-chat.ipynb) for an example +See [Selector Group Chat Tutorial](./selector-group-chat.ipynb) for an example of using tools in a group chat. ## Group Chat with Custom Selector (Stateflow) diff --git a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/selector-group-chat.ipynb b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/selector-group-chat.ipynb similarity index 99% rename from python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/selector-group-chat.ipynb rename to python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/selector-group-chat.ipynb index 7fbe5b13125b..efb7c24190fb 100644 --- a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/selector-group-chat.ipynb +++ b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/selector-group-chat.ipynb @@ -22,7 +22,7 @@ "- Customizable selection function to override the default model-based selection\n", "\n", "```{note}\n", - "{py:class}`~autogen_agentchat.teams.SelectorGroupChat` is a high-level API. For more control and customization, refer to the [Group Chat Pattern](../../core-user-guide/design-patterns/group-chat.ipynb) in the Core API documentation to implement your own group chat logic.\n", + "{py:class}`~autogen_agentchat.teams.SelectorGroupChat` is a high-level API. For more control and customization, refer to the [Group Chat Pattern](../core-user-guide/design-patterns/group-chat.ipynb) in the Core API documentation to implement your own group chat logic.\n", "```\n", "\n", "## How Does it Work?\n", @@ -32,7 +32,7 @@ "When the team receives a task through {py:meth}`~autogen_agentchat.teams.BaseGroupChat.run` or {py:meth}`~autogen_agentchat.teams.BaseGroupChat.run_stream`,\n", "the following steps are executed:\n", "\n", - "1. The team analyzes the current conversation context, including the conversation history and participants' {py:attr}`~autogen_agentchat.base.ChatAgent.name` and {py:attr}`~autogen_agentchat.base.ChatAgent.description` attributes, to determine the next speaker using a model. You can override the model by providing a custom selection function.\n", + "1. The team analyzes the current conversation context, including the conversation history and participants' {py:attr}`~autogen_agentchat.base.ChatAgent.name` and {py:attr}`~autogen_agentchat.base.ChatAgent.description` attributes, to determine the next speaker using a model. By default, the team will not select the same speak consecutively unless it is the only agent available. This can be changed by setting `allow_repeated_speaker=True`. You can also override the model by providing a custom selection function.\n", "2. The team prompts the selected speaker agent to provide a response, which is then **broadcasted** to all other participants.\n", "3. The termination condition is checked to determine if the conversation should end, if not, the process repeats from step 1.\n", "4. When the conversation ends, the team returns the {py:class}`~autogen_agentchat.base.TaskResult` containing the conversation history from this task.\n", diff --git a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/selector-group-chat.svg b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/selector-group-chat.svg similarity index 100% rename from python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/selector-group-chat.svg rename to python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/selector-group-chat.svg diff --git a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/swarm.ipynb b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/swarm.ipynb similarity index 99% rename from python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/swarm.ipynb rename to python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/swarm.ipynb index 6dd0511089cc..99bb02710ffe 100644 --- a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/swarm.ipynb +++ b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/swarm.ipynb @@ -18,7 +18,7 @@ "```{note}\n", "{py:class}`~autogen_agentchat.teams.Swarm` is a high-level API. If you need more\n", "control and customization that is not supported by this API, you can take a look\n", - "at the [Handoff Pattern](../../core-user-guide/design-patterns/handoffs.ipynb)\n", + "at the [Handoff Pattern](../core-user-guide/design-patterns/handoffs.ipynb)\n", "in the Core API documentation and implement your own version of the Swarm pattern.\n", "```\n", "\n", diff --git a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/swarm_customer_support.svg b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/swarm_customer_support.svg similarity index 100% rename from python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/swarm_customer_support.svg rename to python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/swarm_customer_support.svg diff --git a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/swarm_stock_research.svg b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/swarm_stock_research.svg similarity index 100% rename from python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/swarm_stock_research.svg rename to python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/swarm_stock_research.svg diff --git a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/agents.ipynb b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/agents.ipynb index dc4076f6170c..95b6834d99a6 100644 --- a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/agents.ipynb +++ b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/agents.ipynb @@ -14,6 +14,7 @@ "- {py:meth}`~autogen_agentchat.agents.BaseChatAgent.on_messages`: Send the agent a sequence of {py:class}`~autogen_agentchat.messages.ChatMessage` get a {py:class}`~autogen_agentchat.base.Response`. **It is important to note that agents are expected to be stateful and this method is expected to be called with new messages, not the complete history**.\n", "- {py:meth}`~autogen_agentchat.agents.BaseChatAgent.on_messages_stream`: Same as {py:meth}`~autogen_agentchat.agents.BaseChatAgent.on_messages` but returns an iterator of {py:class}`~autogen_agentchat.messages.AgentEvent` or {py:class}`~autogen_agentchat.messages.ChatMessage` followed by a {py:class}`~autogen_agentchat.base.Response` as the last item.\n", "- {py:meth}`~autogen_agentchat.agents.BaseChatAgent.on_reset`: Reset the agent to its initial state.\n", + "- {py:meth}`~autogen_agentchat.agents.BaseChatAgent.run` and {py:meth}`~autogen_agentchat.agents.BaseChatAgent.run_stream`: convenience methods that call {py:meth}`~autogen_agentchat.agents.BaseChatAgent.on_messages` and {py:meth}`~autogen_agentchat.agents.BaseChatAgent.on_messages_stream` respectively but offer the same interface as [Teams](./teams.ipynb).\n", "\n", "See {py:mod}`autogen_agentchat.messages` for more information on AgentChat message types.\n", "\n", @@ -115,7 +116,10 @@ "Unlike in v0.2 AgentChat, the tools are executed by the same agent directly within\n", "the same call to {py:meth}`~autogen_agentchat.agents.AssistantAgent.on_messages`.\n", "By default, the agent will return the result of the tool call as the final response.\n", - "```" + "```\n", + "\n", + "You can also call the {py:meth}`~autogen_agentchat.agents.BaseChatAgent.run` method, which is a convenience method that calls {py:meth}`~autogen_agentchat.agents.BaseChatAgent.on_messages`. \n", + "It follows the same interface as [Teams](./teams.ipynb) and returns a {py:class}`~autogen_agentchat.base.TaskResult` object." ] }, { @@ -186,7 +190,9 @@ "with the final item being the response message in the {py:attr}`~autogen_agentchat.base.Response.chat_message` attribute.\n", "\n", "From the messages, you can observe that the assistant agent utilized the `web_search` tool to\n", - "gather information and responded based on the search results." + "gather information and responded based on the search results.\n", + "\n", + "You can also use {py:meth}`~autogen_agentchat.agents.BaseChatAgent.run_stream` to get the same streaming behavior as {py:meth}`~autogen_agentchat.agents.BaseChatAgent.on_messages_stream`. It follows the same interface as [Teams](./teams.ipynb)." ] }, { @@ -310,45 +316,6 @@ ")" ] }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## User Proxy Agent\n", - "\n", - "{py:class}`~autogen_agentchat.agents.UserProxyAgent` is a built-in agent that\n", - "provides one way for a user to intervene in the process. This agent will put the team in a temporary blocking state, and thus any exceptions or runtime failures while in the blocked state will result in a deadlock. It is strongly advised that this agent be coupled with a timeout mechanic and that all errors and exceptions emanating from it are handled." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from autogen_agentchat.agents import UserProxyAgent\n", - "\n", - "\n", - "async def user_proxy_run() -> None:\n", - " user_proxy_agent = UserProxyAgent(\"user_proxy\")\n", - " response = await user_proxy_agent.on_messages(\n", - " [TextMessage(content=\"What is your name? \", source=\"user\")], cancellation_token=CancellationToken()\n", - " )\n", - " print(f\"Your name is {response.chat_message.content}\")\n", - "\n", - "\n", - "# Use asyncio.run(user_proxy_run()) when running in a script.\n", - "await user_proxy_run()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The User Proxy agent is ideally used for on-demand human-in-the-loop interactions for scenarios such as Just In Time approvals, human feedback, alerts, etc. For slower user interactions, consider terminating a team using a termination condition and start another one from\n", - "{py:meth}`~autogen_agentchat.base.TaskRunner.run` or {py:meth}`~autogen_agentchat.base.TaskRunner.run_stream` with another message." - ] - }, { "cell_type": "markdown", "metadata": {}, @@ -357,6 +324,7 @@ "\n", "The following preset agents are available:\n", "\n", + "- {py:class}`~autogen_agentchat.agents.UserProxyAgent`: An agent that takes user input returns it as responses.\n", "- {py:class}`~autogen_agentchat.agents.CodeExecutorAgent`: An agent that can execute code.\n", "- {py:class}`~autogen_ext.agents.openai.OpenAIAssistantAgent`: An agent that is backed by an OpenAI Assistant, with ability to use custom tools.\n", "- {py:class}`~autogen_ext.agents.web_surfer.MultimodalWebSurfer`: A multi-modal agent that can search the web and visit web pages for information.\n", diff --git a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/teams.ipynb b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/teams.ipynb index a4a43d980f3c..8781b798578d 100644 --- a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/teams.ipynb +++ b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/teams.ipynb @@ -8,7 +8,17 @@ "\n", "In this section you'll learn how to create a _multi-agent team_ (or simply team) using AutoGen. A team is a group of agents that work together to achieve a common goal.\n", "\n", - "We'll first show you how to create and run a team. We'll then explain how to observe the team's behavior, which is crucial for debugging and understanding the team's performance, and common operations to control the team's behavior." + "We'll first show you how to create and run a team. We'll then explain how to observe the team's behavior, which is crucial for debugging and understanding the team's performance, and common operations to control the team's behavior.\n", + "\n", + "```{note}\n", + "When to use a team? \n", + "Teams can solve more complex tasks but they also need more scaffolding to steer than single agents.\n", + "While AgentChat has made it easier for you to work with teams,\n", + "you should first try to solve your problem with a single agent, and use a team\n", + "when a single agent becomes insufficient.\n", + "Make sure you have tried giving your single agent the right tools and instructions before\n", + "moving to a team.\n", + "```" ] }, { diff --git a/python/packages/autogen-core/pyproject.toml b/python/packages/autogen-core/pyproject.toml index 0c06a7934571..41f36ef594a4 100644 --- a/python/packages/autogen-core/pyproject.toml +++ b/python/packages/autogen-core/pyproject.toml @@ -71,6 +71,7 @@ dev = [ "sphinxcontrib-apidoc", "autodoc_pydantic~=2.2", "pygments", + "sphinxext-rediraffe", "autogen_ext==0.4.0.dev13", diff --git a/python/uv.lock b/python/uv.lock index 1e4c1c576036..6aa63d6eef19 100644 --- a/python/uv.lock +++ b/python/uv.lock @@ -392,6 +392,7 @@ dev = [ { name = "sphinx-copybutton" }, { name = "sphinx-design" }, { name = "sphinxcontrib-apidoc" }, + { name = "sphinxext-rediraffe" }, { name = "tavily-python" }, { name = "textual" }, { name = "textual-dev" }, @@ -451,6 +452,7 @@ dev = [ { name = "sphinx-copybutton" }, { name = "sphinx-design" }, { name = "sphinxcontrib-apidoc" }, + { name = "sphinxext-rediraffe" }, { name = "tavily-python" }, { name = "textual" }, { name = "textual-dev" }, @@ -4486,6 +4488,18 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/52/a7/d2782e4e3f77c8450f727ba74a8f12756d5ba823d81b941f1b04da9d033a/sphinxcontrib_serializinghtml-2.0.0-py3-none-any.whl", hash = "sha256:6e2cb0eef194e10c27ec0023bfeb25badbbb5868244cf5bc5bdc04e4464bf331", size = 92072 }, ] +[[package]] +name = "sphinxext-rediraffe" +version = "0.2.7" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "sphinx" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/1f/b4/e5fbb493f796430230189a1ce5f9beff1ac1b98619fc71ed35deca6059a5/sphinxext-rediraffe-0.2.7.tar.gz", hash = "sha256:651dcbfae5ffda9ffd534dfb8025f36120e5efb6ea1a33f5420023862b9f725d", size = 8735 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/76/4f/c8797e796199e55cf6c8979ecdf5f4b09b81e93f87b3193c759faea63263/sphinxext_rediraffe-0.2.7-py3-none-any.whl", hash = "sha256:9e430a52d4403847f4ffb3a8dd6dfc34a9fe43525305131f52ed899743a5fd8c", size = 8267 }, +] + [[package]] name = "spider-client" version = "0.0.27" From d4b406b78caa4b832598085abd08ed4cdf2a814a Mon Sep 17 00:00:00 2001 From: Jack Gerrits Date: Tue, 7 Jan 2025 13:30:04 -0500 Subject: [PATCH 11/61] refactor!: Reduce dependencies of core, remove image from URL (#4919) * Reduce dependencies of core * Update lock * add package to test deps * Update lock * aio console dep * Update lock --- .../packages/autogen-agentchat/pyproject.toml | 1 + python/packages/autogen-core/pyproject.toml | 16 +++----- .../autogen-core/src/autogen_core/_image.py | 41 ++++++++++++++----- .../models/openai/_openai_client.py | 3 +- python/uv.lock | 22 ++++------ 5 files changed, 48 insertions(+), 35 deletions(-) diff --git a/python/packages/autogen-agentchat/pyproject.toml b/python/packages/autogen-agentchat/pyproject.toml index b1c52a0bb4de..b8b694d9651d 100644 --- a/python/packages/autogen-agentchat/pyproject.toml +++ b/python/packages/autogen-agentchat/pyproject.toml @@ -16,6 +16,7 @@ classifiers = [ ] dependencies = [ "autogen-core==0.4.0.dev13", + "aioconsole>=0.8.1" ] [tool.ruff] diff --git a/python/packages/autogen-core/pyproject.toml b/python/packages/autogen-core/pyproject.toml index 41f36ef594a4..9c15908f3b50 100644 --- a/python/packages/autogen-core/pyproject.toml +++ b/python/packages/autogen-core/pyproject.toml @@ -15,24 +15,20 @@ classifiers = [ "Operating System :: OS Independent", ] dependencies = [ - "openai>=1.3", "pillow>=11.0.0", - "aioconsole>=0.8.1", - "aiohttp>=3.10.10", - "typing-extensions", + "typing-extensions>=4.0.0", "pydantic<3.0.0,>=2.10.0", "protobuf~=4.25.1", - "tiktoken>=0.8.0", - "opentelemetry-api~=1.27.0", - "asyncio_atexit", + "opentelemetry-api>=1.27.0", "jsonref~=1.1.0", ] [dependency-groups] dev = [ - "autogen_test_utils", "aiofiles", + "asyncio_atexit", + "autogen_test_utils", "azure-identity", "chess", "colorama", @@ -46,6 +42,7 @@ dev = [ "llama-index", "markdownify", "nbqa", + "opentelemetry-sdk>=1.27.0", "pip", "polars", "python-dotenv", @@ -55,12 +52,11 @@ dev = [ "textual-imageview", "textual", "types-aiofiles", + "types-docker", "types-pillow", "types-protobuf", "types-requests", - "types-docker", "wikipedia", - "opentelemetry-sdk>=1.27.0", # Documentation "myst-nb==1.1.2", diff --git a/python/packages/autogen-core/src/autogen_core/_image.py b/python/packages/autogen-core/src/autogen_core/_image.py index 1b360ca460c6..e24dfaa6bcd9 100644 --- a/python/packages/autogen-core/src/autogen_core/_image.py +++ b/python/packages/autogen-core/src/autogen_core/_image.py @@ -4,10 +4,8 @@ import re from io import BytesIO from pathlib import Path -from typing import Any, cast +from typing import Any, Dict, cast -import aiohttp -from openai.types.chat import ChatCompletionContentPartImageParam from PIL import Image as PILImage from pydantic import GetCoreSchemaHandler, ValidationInfo from pydantic_core import core_schema @@ -15,6 +13,32 @@ class Image: + """Represents an image. + + + Example: + + Loading an image from a URL: + + .. code-block:: python + + from autogen_core import Image + from PIL import Image as PILImage + import aiohttp + import asyncio + + + async def from_url(url: str) -> Image: + async with aiohttp.ClientSession() as session: + async with session.get(url) as response: + content = await response.read() + return Image.from_pil(PILImage.open(content)) + + + image = asyncio.run(from_url("https://example.com/image")) + + """ + def __init__(self, image: PILImage.Image): self.image: PILImage.Image = image.convert("RGB") @@ -31,13 +55,6 @@ def from_uri(cls, uri: str) -> Image: base64_data = re.sub(r"data:image/(?:png|jpeg);base64,", "", uri) return cls.from_base64(base64_data) - @classmethod - async def from_url(cls, url: str) -> Image: - async with aiohttp.ClientSession() as session: - async with session.get(url) as response: - content = await response.read() - return cls(PILImage.open(content)) - @classmethod def from_base64(cls, base64_str: str) -> Image: return cls(PILImage.open(BytesIO(base64.b64decode(base64_str)))) @@ -60,7 +77,9 @@ def _repr_html_(self) -> str: def data_uri(self) -> str: return _convert_base64_to_data_uri(self.to_base64()) - def to_openai_format(self, detail: Literal["auto", "low", "high"] = "auto") -> ChatCompletionContentPartImageParam: + # Returns openai.types.chat.ChatCompletionContentPartImageParam, which is a TypedDict + # We don't use the explicit type annotation so that we can avoid a dependency on the OpenAI Python SDK in this package. + def to_openai_format(self, detail: Literal["auto", "low", "high"] = "auto") -> Dict[str, Any]: return {"type": "image_url", "image_url": {"url": self.data_uri, "detail": detail}} @classmethod diff --git a/python/packages/autogen-ext/src/autogen_ext/models/openai/_openai_client.py b/python/packages/autogen-ext/src/autogen_ext/models/openai/_openai_client.py index e3117a5d9e84..2975d19b64f6 100644 --- a/python/packages/autogen-ext/src/autogen_ext/models/openai/_openai_client.py +++ b/python/packages/autogen-ext/src/autogen_ext/models/openai/_openai_client.py @@ -51,6 +51,7 @@ from openai.types.chat import ( ChatCompletion, ChatCompletionAssistantMessageParam, + ChatCompletionContentPartImageParam, ChatCompletionContentPartParam, ChatCompletionContentPartTextParam, ChatCompletionMessageParam, @@ -154,7 +155,7 @@ def user_message_to_oai(message: UserMessage) -> ChatCompletionUserMessageParam: elif isinstance(part, Image): # TODO: support url based images # TODO: support specifying details - parts.append(part.to_openai_format()) + parts.append(cast(ChatCompletionContentPartImageParam, part.to_openai_format())) else: raise ValueError(f"Unknown content type: {part}") return ChatCompletionUserMessageParam( diff --git a/python/uv.lock b/python/uv.lock index 6aa63d6eef19..84913dfed925 100644 --- a/python/uv.lock +++ b/python/uv.lock @@ -336,33 +336,33 @@ name = "autogen-agentchat" version = "0.4.0.dev13" source = { editable = "packages/autogen-agentchat" } dependencies = [ + { name = "aioconsole" }, { name = "autogen-core" }, ] [package.metadata] -requires-dist = [{ name = "autogen-core", editable = "packages/autogen-core" }] +requires-dist = [ + { name = "aioconsole", specifier = ">=0.8.1" }, + { name = "autogen-core", editable = "packages/autogen-core" }, +] [[package]] name = "autogen-core" version = "0.4.0.dev13" source = { editable = "packages/autogen-core" } dependencies = [ - { name = "aioconsole" }, - { name = "aiohttp" }, - { name = "asyncio-atexit" }, { name = "jsonref" }, - { name = "openai" }, { name = "opentelemetry-api" }, { name = "pillow" }, { name = "protobuf" }, { name = "pydantic" }, - { name = "tiktoken" }, { name = "typing-extensions" }, ] [package.dev-dependencies] dev = [ { name = "aiofiles" }, + { name = "asyncio-atexit" }, { name = "autodoc-pydantic" }, { name = "autogen-ext" }, { name = "autogen-test-utils" }, @@ -407,22 +407,18 @@ dev = [ [package.metadata] requires-dist = [ - { name = "aioconsole", specifier = ">=0.8.1" }, - { name = "aiohttp", specifier = ">=3.10.10" }, - { name = "asyncio-atexit" }, { name = "jsonref", specifier = "~=1.1.0" }, - { name = "openai", specifier = ">=1.3" }, - { name = "opentelemetry-api", specifier = "~=1.27.0" }, + { name = "opentelemetry-api", specifier = ">=1.27.0" }, { name = "pillow", specifier = ">=11.0.0" }, { name = "protobuf", specifier = "~=4.25.1" }, { name = "pydantic", specifier = ">=2.10.0,<3.0.0" }, - { name = "tiktoken", specifier = ">=0.8.0" }, - { name = "typing-extensions" }, + { name = "typing-extensions", specifier = ">=4.0.0" }, ] [package.metadata.requires-dev] dev = [ { name = "aiofiles" }, + { name = "asyncio-atexit" }, { name = "autodoc-pydantic", specifier = "~=2.2" }, { name = "autogen-ext", editable = "packages/autogen-ext" }, { name = "autogen-test-utils", editable = "packages/autogen-test-utils" }, From 7641577f6bcff4f4892f071e8033c3d4ef12145a Mon Sep 17 00:00:00 2001 From: Jack Gerrits Date: Tue, 7 Jan 2025 13:38:51 -0500 Subject: [PATCH 12/61] Adds 0.2 docs link to navbar (#4921) --- python/packages/autogen-core/docs/src/conf.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/python/packages/autogen-core/docs/src/conf.py b/python/packages/autogen-core/docs/src/conf.py index 691a528e8a42..3b5181fc602c 100644 --- a/python/packages/autogen-core/docs/src/conf.py +++ b/python/packages/autogen-core/docs/src/conf.py @@ -115,7 +115,6 @@ } ], - "announcement": 'AutoGen 0.4 is a work in progress. Go here to find the 0.2 documentation.', "footer_start": ["copyright"], "footer_center": ["footer-middle-links"], "footer_end": ["theme-version"], @@ -127,7 +126,9 @@ "version_match": switcher_version, }, "show_version_warning_banner": True, - + "external_links": [ + {"name": "0.2 Docs", "url": "https://microsoft.github.io/autogen/0.2/"}, + ] } html_js_files = ["custom-icon.js", "override-switcher-button.js"] From 5635ea397f32f1f5e7eb2fc8e08797212bbe5328 Mon Sep 17 00:00:00 2001 From: Eric Zhu Date: Tue, 7 Jan 2025 11:51:59 -0800 Subject: [PATCH 13/61] Improve language for teams note (#4925) --- .../agentchat-user-guide/tutorial/teams.ipynb | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/teams.ipynb b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/teams.ipynb index 8781b798578d..ce0f39664158 100644 --- a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/teams.ipynb +++ b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/teams.ipynb @@ -11,13 +11,13 @@ "We'll first show you how to create and run a team. We'll then explain how to observe the team's behavior, which is crucial for debugging and understanding the team's performance, and common operations to control the team's behavior.\n", "\n", "```{note}\n", - "When to use a team? \n", - "Teams can solve more complex tasks but they also need more scaffolding to steer than single agents.\n", - "While AgentChat has made it easier for you to work with teams,\n", - "you should first try to solve your problem with a single agent, and use a team\n", - "when a single agent becomes insufficient.\n", - "Make sure you have tried giving your single agent the right tools and instructions before\n", - "moving to a team.\n", + "When should you use a team?\n", + "Teams are for complex tasks that require collaboration and diverse expertise.\n", + "However, they also demand more scaffolding to steer compared to single agents.\n", + "While AutoGen simplifies the process of working with teams, start with\n", + "a single agent for simpler tasks, and transition to a multi-agent team when a single agent proves inadequate.\n", + "Ensure that you have optimized your single agent with the appropriate tools\n", + "and instructions before moving to a team-based approach.\n", "```" ] }, @@ -591,7 +591,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.11.5" + "version": "3.12.7" } }, "nbformat": 4, From 71a3b238e75280d8c453e7a93d69f1b74976faab Mon Sep 17 00:00:00 2001 From: peterychang <49209570+peterychang@users.noreply.github.com> Date: Tue, 7 Jan 2025 16:37:02 -0500 Subject: [PATCH 14/61] Activate deactivate agents (#4800) * Instantiate and call activate/deactivate on agents * autoformatting * remove activate. Rename deactivate to close * remove unneeded import * create close fn in runtime * change runtime close behavior * uv.lock --------- Co-authored-by: Jack Gerrits --- .../autogen_agentchat/agents/_base_chat_agent.py | 4 ++++ .../src/autogen_agentchat/base/_chat_agent.py | 4 ++++ .../autogen-core/src/autogen_core/_agent.py | 4 ++++ .../autogen-core/src/autogen_core/_base_agent.py | 3 +++ .../autogen_core/_single_threaded_agent_runtime.py | 14 ++++++++++++++ python/packages/autogen-core/tests/test_runtime.py | 14 ++++++++++++++ .../autogen_ext/runtimes/grpc/_worker_runtime.py | 1 + 7 files changed, 44 insertions(+) diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_base_chat_agent.py b/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_base_chat_agent.py index abcb64eff260..42b7cb78a007 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_base_chat_agent.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_base_chat_agent.py @@ -190,3 +190,7 @@ async def save_state(self) -> Mapping[str, Any]: async def load_state(self, state: Mapping[str, Any]) -> None: """Restore agent from saved state. Default implementation for stateless agents.""" BaseState.model_validate(state) + + async def close(self) -> None: + """Called when the runtime is closed""" + pass diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/base/_chat_agent.py b/python/packages/autogen-agentchat/src/autogen_agentchat/base/_chat_agent.py index c4ea0218916c..256f752bfa80 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/base/_chat_agent.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/base/_chat_agent.py @@ -64,3 +64,7 @@ async def save_state(self) -> Mapping[str, Any]: async def load_state(self, state: Mapping[str, Any]) -> None: """Restore agent from saved state""" ... + + async def close(self) -> None: + """Called when the runtime is stopped or any stop method is called""" + ... diff --git a/python/packages/autogen-core/src/autogen_core/_agent.py b/python/packages/autogen-core/src/autogen_core/_agent.py index edb5e59b1ce3..0f37b822ff8a 100644 --- a/python/packages/autogen-core/src/autogen_core/_agent.py +++ b/python/packages/autogen-core/src/autogen_core/_agent.py @@ -45,3 +45,7 @@ async def load_state(self, state: Mapping[str, Any]) -> None: """ ... + + async def close(self) -> None: + """Called when the runtime is closed""" + ... diff --git a/python/packages/autogen-core/src/autogen_core/_base_agent.py b/python/packages/autogen-core/src/autogen_core/_base_agent.py index cfefb4ab72f8..bffb61b876bb 100644 --- a/python/packages/autogen-core/src/autogen_core/_base_agent.py +++ b/python/packages/autogen-core/src/autogen_core/_base_agent.py @@ -152,6 +152,9 @@ async def load_state(self, state: Mapping[str, Any]) -> None: warnings.warn("load_state not implemented", stacklevel=2) pass + async def close(self) -> None: + pass + @classmethod async def register( cls, diff --git a/python/packages/autogen-core/src/autogen_core/_single_threaded_agent_runtime.py b/python/packages/autogen-core/src/autogen_core/_single_threaded_agent_runtime.py index f8f3669213e4..d682c1c7beb0 100644 --- a/python/packages/autogen-core/src/autogen_core/_single_threaded_agent_runtime.py +++ b/python/packages/autogen-core/src/autogen_core/_single_threaded_agent_runtime.py @@ -309,6 +309,7 @@ async def _process_send(self, message_envelope: SendMessageEnvelope) -> None: ) ) recipient_agent = await self._get_agent(recipient) + message_context = MessageContext( sender=message_envelope.sender, topic_id=None, @@ -589,10 +590,21 @@ def start(self) -> None: raise RuntimeError("Runtime is already started") self._run_context = RunContext(self) + async def close(self) -> None: + """Calls :meth:`stop` if applicable and the :meth:`Agent.close` method on all instantiated agents""" + # stop the runtime if it hasn't been stopped yet + if self._run_context is not None: + await self.stop() + # close all the agents that have been instantiated + for agent_id in self._instantiated_agents: + agent = await self._get_agent(agent_id) + await agent.close() + async def stop(self) -> None: """Immediately stop the runtime message processing loop. The currently processing message will be completed, but all others following it will be discarded.""" if self._run_context is None: raise RuntimeError("Runtime is not started") + await self._run_context.stop() self._run_context = None self._message_queue = Queue() @@ -603,6 +615,7 @@ async def stop_when_idle(self) -> None: if self._run_context is None: raise RuntimeError("Runtime is not started") await self._run_context.stop_when_idle() + self._run_context = None self._message_queue = Queue() @@ -623,6 +636,7 @@ async def stop_when(self, condition: Callable[[], bool]) -> None: if self._run_context is None: raise RuntimeError("Runtime is not started") await self._run_context.stop_when(condition) + self._run_context = None self._message_queue = Queue() diff --git a/python/packages/autogen-core/tests/test_runtime.py b/python/packages/autogen-core/tests/test_runtime.py index 16de5ccc18f6..57cef4ec4810 100644 --- a/python/packages/autogen-core/tests/test_runtime.py +++ b/python/packages/autogen-core/tests/test_runtime.py @@ -86,6 +86,8 @@ async def test_register_receives_publish(tracer_provider: TracerProvider) -> Non "autogen publish default.(default)-T", ] + await runtime.close() + @pytest.mark.asyncio async def test_register_receives_publish_with_construction(caplog: pytest.LogCaptureFixture) -> None: @@ -107,6 +109,8 @@ async def agent_factory() -> LoopbackAgent: # Check if logger has the exception. assert any("Error constructing agent" in e.message for e in caplog.records) + await runtime.close() + @pytest.mark.asyncio async def test_register_receives_publish_cascade() -> None: @@ -137,6 +141,8 @@ async def test_register_receives_publish_cascade() -> None: agent = await runtime.try_get_underlying_agent_instance(AgentId(f"name{i}", "default"), CascadingAgent) assert agent.num_calls == total_num_calls_expected + await runtime.close() + @pytest.mark.asyncio async def test_register_factory_explicit_name() -> None: @@ -162,6 +168,8 @@ async def test_register_factory_explicit_name() -> None: ) assert other_long_running_agent.num_calls == 0 + await runtime.close() + @pytest.mark.asyncio async def test_default_subscription() -> None: @@ -185,6 +193,8 @@ async def test_default_subscription() -> None: ) assert other_long_running_agent.num_calls == 0 + await runtime.close() + @pytest.mark.asyncio async def test_type_subscription() -> None: @@ -208,6 +218,8 @@ class LoopbackAgentWithSubscription(LoopbackAgent): ... ) assert other_long_running_agent.num_calls == 0 + await runtime.close() + @pytest.mark.asyncio async def test_default_subscription_publish_to_other_source() -> None: @@ -229,3 +241,5 @@ async def test_default_subscription_publish_to_other_source() -> None: AgentId("name", key="other"), type=LoopbackAgentWithDefaultSubscription ) assert other_long_running_agent.num_calls == 1 + + await runtime.close() diff --git a/python/packages/autogen-ext/src/autogen_ext/runtimes/grpc/_worker_runtime.py b/python/packages/autogen-ext/src/autogen_ext/runtimes/grpc/_worker_runtime.py index b39ec04a3e82..4ae66e44ccf6 100644 --- a/python/packages/autogen-ext/src/autogen_ext/runtimes/grpc/_worker_runtime.py +++ b/python/packages/autogen-ext/src/autogen_ext/runtimes/grpc/_worker_runtime.py @@ -179,6 +179,7 @@ async def recv(self) -> agent_worker_pb2.Message: class GrpcWorkerAgentRuntime(AgentRuntime): + # TODO: Needs to handle agent close() call def __init__( self, host_address: str, From f113c9a959c32b303a4dadccab9bfd59f3f2839b Mon Sep 17 00:00:00 2001 From: Eric Zhu Date: Tue, 7 Jan 2025 15:31:29 -0800 Subject: [PATCH 15/61] Move core samples to /python/samples (#4911) * Move core samples to /python/samples * Fix proto check * Add sample code check workflow * Update pyright settings; fix types --- .github/workflows/checks.yml | 18 + .../core-user-guide/framework/tools.ipynb | 16 +- .../autogen-core/samples/common/__init__.py | 0 .../samples/common/agents/__init__.py | 5 - .../common/agents/_chat_completion_agent.py | 263 -------------- .../samples/common/patterns/__init__.py | 3 - .../common/patterns/_group_chat_manager.py | 154 -------- .../common/patterns/_group_chat_utils.py | 88 ----- .../autogen-core/samples/common/types.py | 75 ---- python/pyproject.toml | 12 +- python/samples/agentchat_chainlit/app.py | 23 +- .../core_async_human_in_the_loop/.gitignore | 1 + .../core_async_human_in_the_loop}/README.md | 14 +- .../core_async_human_in_the_loop/main.py} | 24 +- .../model_config_template.json | 38 ++ .../core_async_human_in_the_loop/utils.py | 47 +++ python/samples/core_chess_game/.gitignore | 1 + python/samples/core_chess_game/README.md | 23 ++ .../core_chess_game/main.py} | 159 +++++---- .../model_config_template.json | 38 ++ .../core_chess_game}/utils.py | 47 +-- .../core_distributed-group-chat}/.gitignore | 0 .../core_distributed-group-chat}/README.md | 0 .../core_distributed-group-chat}/_agents.py | 0 .../core_distributed-group-chat}/_types.py | 0 .../core_distributed-group-chat}/_utils.py | 0 .../core_distributed-group-chat}/config.yaml | 0 .../public/avatars/editor.png | 0 .../public/avatars/group_chat_manager.png | 0 .../public/avatars/user.png | 0 .../public/avatars/writer.png | 0 .../public/favicon.png | 0 .../public/logo.png | 0 .../core_distributed-group-chat}/run.sh | 0 .../run_editor_agent.py | 0 .../run_group_chat_manager.py | 0 .../core_distributed-group-chat}/run_host.py | 0 .../core_distributed-group-chat}/run_ui.py | 0 .../run_writer_agent.py | 0 .../core_grpc_worker_runtime}/agents.py | 0 .../run_cascading_publisher.py | 0 .../run_cascading_worker.py | 0 .../core_grpc_worker_runtime}/run_host.py | 0 .../run_worker_pub_sub.py | 0 .../run_worker_rpc.py | 0 .../core_semantic_router}/README.md | 0 .../core_semantic_router}/_agents.py | 0 .../_semantic_router_agent.py | 0 .../_semantic_router_components.py | 0 .../core_semantic_router}/run_host.py | 0 .../run_semantic_router.py | 0 .../core_xlang_hello_python_agent}/README.md | 0 .../hello_python_agent.py | 0 .../protos/__init__.py | 0 .../protos/agent_events_pb2.py | 0 .../protos/agent_events_pb2.pyi | 0 .../protos/agent_events_pb2_grpc.py | 0 .../protos/agent_events_pb2_grpc.pyi | 0 .../user_input.py | 0 python/uv.lock | 337 +++++++++++++++--- 60 files changed, 587 insertions(+), 799 deletions(-) delete mode 100644 python/packages/autogen-core/samples/common/__init__.py delete mode 100644 python/packages/autogen-core/samples/common/agents/__init__.py delete mode 100644 python/packages/autogen-core/samples/common/agents/_chat_completion_agent.py delete mode 100644 python/packages/autogen-core/samples/common/patterns/__init__.py delete mode 100644 python/packages/autogen-core/samples/common/patterns/_group_chat_manager.py delete mode 100644 python/packages/autogen-core/samples/common/patterns/_group_chat_utils.py delete mode 100644 python/packages/autogen-core/samples/common/types.py create mode 100644 python/samples/core_async_human_in_the_loop/.gitignore rename python/{packages/autogen-core/samples => samples/core_async_human_in_the_loop}/README.md (62%) rename python/{packages/autogen-core/samples/slow_human_in_loop.py => samples/core_async_human_in_the_loop/main.py} (95%) create mode 100644 python/samples/core_async_human_in_the_loop/model_config_template.json create mode 100644 python/samples/core_async_human_in_the_loop/utils.py create mode 100644 python/samples/core_chess_game/.gitignore create mode 100644 python/samples/core_chess_game/README.md rename python/{packages/autogen-core/samples/chess_game.py => samples/core_chess_game/main.py} (59%) create mode 100644 python/samples/core_chess_game/model_config_template.json rename python/{packages/autogen-core/samples/common => samples/core_chess_game}/utils.py (66%) rename python/{packages/autogen-core/samples/distributed-group-chat => samples/core_distributed-group-chat}/.gitignore (100%) rename python/{packages/autogen-core/samples/distributed-group-chat => samples/core_distributed-group-chat}/README.md (100%) rename python/{packages/autogen-core/samples/distributed-group-chat => samples/core_distributed-group-chat}/_agents.py (100%) rename python/{packages/autogen-core/samples/distributed-group-chat => samples/core_distributed-group-chat}/_types.py (100%) rename python/{packages/autogen-core/samples/distributed-group-chat => samples/core_distributed-group-chat}/_utils.py (100%) rename python/{packages/autogen-core/samples/distributed-group-chat => samples/core_distributed-group-chat}/config.yaml (100%) rename python/{packages/autogen-core/samples/distributed-group-chat => samples/core_distributed-group-chat}/public/avatars/editor.png (100%) rename python/{packages/autogen-core/samples/distributed-group-chat => samples/core_distributed-group-chat}/public/avatars/group_chat_manager.png (100%) rename python/{packages/autogen-core/samples/distributed-group-chat => samples/core_distributed-group-chat}/public/avatars/user.png (100%) rename python/{packages/autogen-core/samples/distributed-group-chat => samples/core_distributed-group-chat}/public/avatars/writer.png (100%) rename python/{packages/autogen-core/samples/distributed-group-chat => samples/core_distributed-group-chat}/public/favicon.png (100%) rename python/{packages/autogen-core/samples/distributed-group-chat => samples/core_distributed-group-chat}/public/logo.png (100%) rename python/{packages/autogen-core/samples/distributed-group-chat => samples/core_distributed-group-chat}/run.sh (100%) rename python/{packages/autogen-core/samples/distributed-group-chat => samples/core_distributed-group-chat}/run_editor_agent.py (100%) rename python/{packages/autogen-core/samples/distributed-group-chat => samples/core_distributed-group-chat}/run_group_chat_manager.py (100%) rename python/{packages/autogen-core/samples/distributed-group-chat => samples/core_distributed-group-chat}/run_host.py (100%) rename python/{packages/autogen-core/samples/distributed-group-chat => samples/core_distributed-group-chat}/run_ui.py (100%) rename python/{packages/autogen-core/samples/distributed-group-chat => samples/core_distributed-group-chat}/run_writer_agent.py (100%) rename python/{packages/autogen-core/samples/worker => samples/core_grpc_worker_runtime}/agents.py (100%) rename python/{packages/autogen-core/samples/worker => samples/core_grpc_worker_runtime}/run_cascading_publisher.py (100%) rename python/{packages/autogen-core/samples/worker => samples/core_grpc_worker_runtime}/run_cascading_worker.py (100%) rename python/{packages/autogen-core/samples/worker => samples/core_grpc_worker_runtime}/run_host.py (100%) rename python/{packages/autogen-core/samples/worker => samples/core_grpc_worker_runtime}/run_worker_pub_sub.py (100%) rename python/{packages/autogen-core/samples/worker => samples/core_grpc_worker_runtime}/run_worker_rpc.py (100%) rename python/{packages/autogen-core/samples/semantic_router => samples/core_semantic_router}/README.md (100%) rename python/{packages/autogen-core/samples/semantic_router => samples/core_semantic_router}/_agents.py (100%) rename python/{packages/autogen-core/samples/semantic_router => samples/core_semantic_router}/_semantic_router_agent.py (100%) rename python/{packages/autogen-core/samples/semantic_router => samples/core_semantic_router}/_semantic_router_components.py (100%) rename python/{packages/autogen-core/samples/semantic_router => samples/core_semantic_router}/run_host.py (100%) rename python/{packages/autogen-core/samples/semantic_router => samples/core_semantic_router}/run_semantic_router.py (100%) rename python/{packages/autogen-core/samples/xlang/hello_python_agent => samples/core_xlang_hello_python_agent}/README.md (100%) rename python/{packages/autogen-core/samples/xlang/hello_python_agent => samples/core_xlang_hello_python_agent}/hello_python_agent.py (100%) rename python/{packages/autogen-core/samples => samples/core_xlang_hello_python_agent}/protos/__init__.py (100%) rename python/{packages/autogen-core/samples => samples/core_xlang_hello_python_agent}/protos/agent_events_pb2.py (100%) rename python/{packages/autogen-core/samples => samples/core_xlang_hello_python_agent}/protos/agent_events_pb2.pyi (100%) rename python/{packages/autogen-core/samples => samples/core_xlang_hello_python_agent}/protos/agent_events_pb2_grpc.py (100%) rename python/{packages/autogen-core/samples => samples/core_xlang_hello_python_agent}/protos/agent_events_pb2_grpc.pyi (100%) rename python/{packages/autogen-core/samples/xlang/hello_python_agent => samples/core_xlang_hello_python_agent}/user_input.py (100%) diff --git a/.github/workflows/checks.yml b/.github/workflows/checks.yml index 982b040120be..347e3f7042f0 100644 --- a/.github/workflows/checks.yml +++ b/.github/workflows/checks.yml @@ -214,6 +214,24 @@ jobs: poe --directory ${{ matrix.package }} docs-check-examples working-directory: ./python + samples-code-check: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: astral-sh/setup-uv@v5 + with: + enable-cache: true + - uses: actions/setup-python@v5 + with: + python-version: "3.11" + - run: uv sync --locked --all-extras + working-directory: ./python + - name: Run task + run: | + source ${{ github.workspace }}/python/.venv/bin/activate + poe samples-code-check + working-directory: ./python + markdown-code-lint: runs-on: ubuntu-latest steps: diff --git a/python/packages/autogen-core/docs/src/user-guide/core-user-guide/framework/tools.ipynb b/python/packages/autogen-core/docs/src/user-guide/core-user-guide/framework/tools.ipynb index f2bf29425c2e..23fe4dffbb4e 100644 --- a/python/packages/autogen-core/docs/src/user-guide/core-user-guide/framework/tools.ipynb +++ b/python/packages/autogen-core/docs/src/user-guide/core-user-guide/framework/tools.ipynb @@ -59,13 +59,6 @@ "print(code_execution_tool.return_value_as_string(result))" ] }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] - }, { "cell_type": "markdown", "metadata": {}, @@ -106,7 +99,7 @@ "name": "stdout", "output_type": "stream", "text": [ - "80.44429939059668\n" + "34.26925801998722\n" ] } ], @@ -148,7 +141,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 3, "metadata": {}, "outputs": [], "source": [ @@ -157,7 +150,6 @@ "\n", "from autogen_core import (\n", " AgentId,\n", - " AgentInstantiationContext,\n", " MessageContext,\n", " RoutedAgent,\n", " SingleThreadedAgentRuntime,\n", @@ -281,7 +273,7 @@ "name": "stdout", "output_type": "stream", "text": [ - "The stock price of NVDA (NVIDIA Corporation) on June 1, 2024, was approximately $179.46.\n" + "The stock price of NVDA (NVIDIA Corporation) on June 1, 2024, was approximately $27.41.\n" ] } ], @@ -299,7 +291,7 @@ ], "metadata": { "kernelspec": { - "display_name": "autogen_core", + "display_name": ".venv", "language": "python", "name": "python3" }, diff --git a/python/packages/autogen-core/samples/common/__init__.py b/python/packages/autogen-core/samples/common/__init__.py deleted file mode 100644 index e69de29bb2d1..000000000000 diff --git a/python/packages/autogen-core/samples/common/agents/__init__.py b/python/packages/autogen-core/samples/common/agents/__init__.py deleted file mode 100644 index fb5dcdb8e4e3..000000000000 --- a/python/packages/autogen-core/samples/common/agents/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -from ._chat_completion_agent import ChatCompletionAgent - -__all__ = [ - "ChatCompletionAgent", -] diff --git a/python/packages/autogen-core/samples/common/agents/_chat_completion_agent.py b/python/packages/autogen-core/samples/common/agents/_chat_completion_agent.py deleted file mode 100644 index 538175ef4ce3..000000000000 --- a/python/packages/autogen-core/samples/common/agents/_chat_completion_agent.py +++ /dev/null @@ -1,263 +0,0 @@ -import asyncio -import json -from typing import Any, Coroutine, Dict, List, Mapping, Sequence, Tuple - -from autogen_core import ( - AgentId, - CancellationToken, - DefaultTopicId, - FunctionCall, - MessageContext, - RoutedAgent, - message_handler, -) -from autogen_core.model_context import ChatCompletionContext -from autogen_core.models import ( - AssistantMessage, - ChatCompletionClient, - FunctionExecutionResult, - FunctionExecutionResultMessage, - SystemMessage, - UserMessage, -) -from autogen_core.tools import Tool - -from ..types import ( - FunctionCallMessage, - Message, - MultiModalMessage, - PublishNow, - Reset, - RespondNow, - ResponseFormat, - TextMessage, - ToolApprovalRequest, - ToolApprovalResponse, -) - - -class ChatCompletionAgent(RoutedAgent): - """An agent implementation that uses the ChatCompletion API to gnenerate - responses and execute tools. - - Args: - description (str): The description of the agent. - system_messages (List[SystemMessage]): The system messages to use for - the ChatCompletion API. - model_context (ChatCompletionContext): The context manager for storing - and retrieving ChatCompletion messages. - model_client (ChatCompletionClient): The client to use for the - ChatCompletion API. - tools (Sequence[Tool], optional): The tools used by the agent. Defaults - to []. If no tools are provided, the agent cannot handle tool calls. - If tools are provided, and the response from the model is a list of - tool calls, the agent will call itselfs with the tool calls until it - gets a response that is not a list of tool calls, and then use that - response as the final response. - tool_approver (Agent | None, optional): The agent that approves tool - calls. Defaults to None. If no tool approver is provided, the agent - will execute the tools without approval. If a tool approver is - provided, the agent will send a request to the tool approver before - executing the tools. - """ - - def __init__( - self, - description: str, - system_messages: List[SystemMessage], - model_context: ChatCompletionContext, - model_client: ChatCompletionClient, - tools: Sequence[Tool] = [], - tool_approver: AgentId | None = None, - ) -> None: - super().__init__(description) - self._description = description - self._system_messages = system_messages - self._client = model_client - self._model_context = model_context - self._tools = tools - self._tool_approver = tool_approver - - @message_handler() - async def on_text_message(self, message: TextMessage, ctx: MessageContext) -> None: - """Handle a text message. This method adds the message to the memory and - does not generate any message.""" - # Add a user message. - await self._model_context.add_message(UserMessage(content=message.content, source=message.source)) - - @message_handler() - async def on_multi_modal_message(self, message: MultiModalMessage, ctx: MessageContext) -> None: - """Handle a multimodal message. This method adds the message to the memory - and does not generate any message.""" - # Add a user message. - await self._model_context.add_message(UserMessage(content=message.content, source=message.source)) - - @message_handler() - async def on_reset(self, message: Reset, ctx: MessageContext) -> None: - """Handle a reset message. This method clears the memory.""" - # Reset the chat messages. - await self._model_context.clear() - - @message_handler() - async def on_respond_now(self, message: RespondNow, ctx: MessageContext) -> TextMessage | FunctionCallMessage: - """Handle a respond now message. This method generates a response and - returns it to the sender.""" - # Generate a response. - response = await self._generate_response(message.response_format, ctx) - - # Return the response. - return response - - @message_handler() - async def on_publish_now(self, message: PublishNow, ctx: MessageContext) -> None: - """Handle a publish now message. This method generates a response and - publishes it.""" - # Generate a response. - response = await self._generate_response(message.response_format, ctx) - - # Publish the response. - await self.publish_message(response, topic_id=DefaultTopicId()) - - @message_handler() - async def on_tool_call_message( - self, message: FunctionCallMessage, ctx: MessageContext - ) -> FunctionExecutionResultMessage: - """Handle a tool call message. This method executes the tools and - returns the results.""" - if len(self._tools) == 0: - raise ValueError("No tools available") - - # Execute the tool calls. - results: List[FunctionExecutionResult] = [] - execution_futures: List[Coroutine[Any, Any, Tuple[str, str]]] = [] - for function_call in message.content: - # Parse the arguments. - try: - arguments = json.loads(function_call.arguments) - except json.JSONDecodeError: - results.append( - FunctionExecutionResult( - content=f"Error: Could not parse arguments for function {function_call.name}.", - call_id=function_call.id, - ) - ) - continue - # Execute the function. - future = self._execute_function( - function_call.name, - arguments, - function_call.id, - cancellation_token=ctx.cancellation_token, - ) - # Append the async result. - execution_futures.append(future) - if execution_futures: - # Wait for all async results. - execution_results = await asyncio.gather(*execution_futures) - # Add the results. - for execution_result, call_id in execution_results: - results.append(FunctionExecutionResult(content=execution_result, call_id=call_id)) - - # Create a tool call result message. - tool_call_result_msg = FunctionExecutionResultMessage(content=results) - - # Return the results. - return tool_call_result_msg - - async def _generate_response( - self, - response_format: ResponseFormat, - ctx: MessageContext, - ) -> TextMessage | FunctionCallMessage: - # Get a response from the model. - response = await self._client.create( - self._system_messages + (await self._model_context.get_messages()), - tools=self._tools, - json_output=response_format == ResponseFormat.json_object, - ) - # Add the response to the chat messages context. - await self._model_context.add_message(AssistantMessage(content=response.content, source=self.metadata["type"])) - - # If the agent has function executor, and the response is a list of - # tool calls, iterate with itself until we get a response that is not a - # list of tool calls. - while ( - len(self._tools) > 0 - and isinstance(response.content, list) - and all(isinstance(x, FunctionCall) for x in response.content) - ): - # Send a function call message to itself. - response = await self.send_message( - message=FunctionCallMessage(content=response.content, source=self.metadata["type"]), - recipient=self.id, - cancellation_token=ctx.cancellation_token, - ) - if not isinstance(response, FunctionExecutionResultMessage): - raise RuntimeError(f"Expect FunctionExecutionResultMessage but got {response}.") - await self._model_context.add_message(response) - # Make an assistant message from the response. - response = await self._client.create( - self._system_messages + (await self._model_context.get_messages()), - tools=self._tools, - json_output=response_format == ResponseFormat.json_object, - ) - await self._model_context.add_message( - AssistantMessage(content=response.content, source=self.metadata["type"]) - ) - - final_response: Message - if isinstance(response.content, str): - # If the response is a string, return a text message. - final_response = TextMessage(content=response.content, source=self.metadata["type"]) - elif isinstance(response.content, list) and all(isinstance(x, FunctionCall) for x in response.content): - # If the response is a list of function calls, return a function call message. - final_response = FunctionCallMessage(content=response.content, source=self.metadata["type"]) - else: - raise ValueError(f"Unexpected response: {response.content}") - - return final_response - - async def _execute_function( - self, - name: str, - args: Dict[str, Any], - call_id: str, - cancellation_token: CancellationToken, - ) -> Tuple[str, str]: - # Find tool - tool = next((t for t in self._tools if t.name == name), None) - if tool is None: - return (f"Error: tool {name} not found.", call_id) - - # Check if the tool needs approval - if self._tool_approver is not None: - # Send a tool approval request. - approval_request = ToolApprovalRequest( - tool_call=FunctionCall(id=call_id, arguments=json.dumps(args), name=name) - ) - approval_response = await self.send_message( - message=approval_request, - recipient=self._tool_approver, - cancellation_token=cancellation_token, - ) - if not isinstance(approval_response, ToolApprovalResponse): - raise ValueError(f"Expecting {ToolApprovalResponse.__name__}, received: {type(approval_response)}") - if not approval_response.approved: - return (f"Error: tool {name} approved, reason: {approval_response.reason}", call_id) - - try: - result = await tool.run_json(args, cancellation_token) - result_as_str = tool.return_value_as_string(result) - except Exception as e: - result_as_str = f"Error: {str(e)}" - return (result_as_str, call_id) - - async def save_state(self) -> Mapping[str, Any]: - return { - "chat_history": await self._model_context.save_state(), - "system_messages": self._system_messages, - } - - async def load_state(self, state: Mapping[str, Any]) -> None: - await self._model_context.load_state(state["chat_history"]) - self._system_messages = state["system_messages"] diff --git a/python/packages/autogen-core/samples/common/patterns/__init__.py b/python/packages/autogen-core/samples/common/patterns/__init__.py deleted file mode 100644 index a667bc89e06c..000000000000 --- a/python/packages/autogen-core/samples/common/patterns/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from ._group_chat_manager import GroupChatManager - -__all__ = ["GroupChatManager"] diff --git a/python/packages/autogen-core/samples/common/patterns/_group_chat_manager.py b/python/packages/autogen-core/samples/common/patterns/_group_chat_manager.py deleted file mode 100644 index f39e354c9d48..000000000000 --- a/python/packages/autogen-core/samples/common/patterns/_group_chat_manager.py +++ /dev/null @@ -1,154 +0,0 @@ -import logging -from typing import Any, Callable, List, Mapping - -from autogen_core import AgentId, AgentProxy, MessageContext, RoutedAgent, message_handler -from autogen_core.model_context import ChatCompletionContext -from autogen_core.models import ChatCompletionClient, UserMessage - -from ..types import ( - MultiModalMessage, - PublishNow, - Reset, - TextMessage, -) -from ._group_chat_utils import select_speaker - -logger = logging.getLogger("autogen_core.events") - - -class GroupChatManager(RoutedAgent): - """An agent that manages a group chat through event-driven orchestration. - - Args: - name (str): The name of the agent. - description (str): The description of the agent. - runtime (AgentRuntime): The runtime to register the agent. - participants (List[AgentId]): The list of participants in the group chat. - model_context (ChatCompletionContext): The context manager for storing - and retrieving ChatCompletion messages. - model_client (ChatCompletionClient, optional): The client to use for the model. - If provided, the agent will use the model to select the next speaker. - If not provided, the agent will select the next speaker from the list of participants - according to the order given. - termination_word (str, optional): The word that terminates the group chat. Defaults to "TERMINATE". - transitions (Mapping[AgentId, List[AgentId]], optional): The transitions between agents. - Keys are the agents, and values are the list of agents that can follow the key agent. Defaults to {}. - If provided, the group chat manager will use the transitions to select the next speaker. - If a transition is not provided for an agent, the choices fallback to all participants. - If no model client is provided, a transition must have a single value. - on_message_received (Callable[[TextMessage], None], optional): A custom handler to call when a message is received. - Defaults to None. - """ - - def __init__( - self, - description: str, - participants: List[AgentId], - model_context: ChatCompletionContext, - model_client: ChatCompletionClient | None = None, - termination_word: str = "TERMINATE", - transitions: Mapping[AgentId, List[AgentId]] = {}, - on_message_received: Callable[[TextMessage | MultiModalMessage], None] | None = None, - ): - super().__init__(description) - self._model_context = model_context - self._client = model_client - self._participants = participants - self._participant_proxies = dict((p, AgentProxy(p, self.runtime)) for p in participants) - self._termination_word = termination_word - for key, value in transitions.items(): - if not value: - # Make sure no empty transitions are provided. - raise ValueError(f"Empty transition list provided for {key.type}.") - if key not in participants: - # Make sure all keys are in the list of participants. - raise ValueError(f"Transition key {key.type} not found in participants.") - for v in value: - if v not in participants: - # Make sure all values are in the list of participants. - raise ValueError(f"Transition value {v.type} not found in participants.") - if self._client is None: - # Make sure there is only one transition for each key if no model client is provided. - if len(value) > 1: - raise ValueError(f"Multiple transitions provided for {key.type} but no model client is provided.") - self._tranistions = transitions - self._on_message_received = on_message_received - - @message_handler() - async def on_reset(self, message: Reset, ctx: MessageContext) -> None: - """Handle a reset message. This method clears the memory.""" - await self._model_context.clear() - - @message_handler() - async def on_new_message(self, message: TextMessage | MultiModalMessage, ctx: MessageContext) -> None: - """Handle a message. This method adds the message to the memory, selects the next speaker, - and sends a message to the selected speaker to publish a response.""" - # Call the custom on_message_received handler if provided. - if self._on_message_received is not None: - self._on_message_received(message) - - # Check if the message contains the termination word. - if isinstance(message, TextMessage) and self._termination_word in message.content: - # Terminate the group chat by not selecting the next speaker. - return - - # Save the message to chat memory. - await self._model_context.add_message(UserMessage(content=message.content, source=message.source)) - - # Get the last speaker. - last_speaker_name = message.source - last_speaker_index = next((i for i, p in enumerate(self._participants) if p.type == last_speaker_name), None) - - # Get the candidates for the next speaker. - if last_speaker_index is not None: - logger.debug(f"Last speaker: {last_speaker_name}") - last_speaker = self._participants[last_speaker_index] - if self._tranistions.get(last_speaker) is not None: - candidates = [c for c in self._participants if c in self._tranistions[last_speaker]] - else: - candidates = self._participants - else: - candidates = self._participants - logger.debug(f"Group chat manager next speaker candidates: {[c.type for c in candidates]}") - - # Select speaker. - if len(candidates) == 0: - speaker = None - elif len(candidates) == 1: - speaker = candidates[0] - else: - # More than one candidate, select the next speaker. - if self._client is None: - # If no model client is provided, candidates must be the list of participants. - assert candidates == self._participants - # If no model client is provided, select the next speaker from the list of participants. - if last_speaker_index is not None: - next_speaker_index = (last_speaker_index + 1) % len(self._participants) - speaker = self._participants[next_speaker_index] - else: - # If no last speaker, select the first speaker. - speaker = candidates[0] - else: - # If a model client is provided, select the speaker based on the transitions and the model. - speaker_index = await select_speaker( - self._model_context, self._client, [self._participant_proxies[c] for c in candidates] - ) - speaker = candidates[speaker_index] - - logger.debug(f"Group chat manager selected speaker: {speaker.type if speaker is not None else None}") - - if speaker is not None: - # Send the message to the selected speaker to ask it to publish a response. - await self.send_message(PublishNow(), speaker) - - async def save_state(self) -> Mapping[str, Any]: - return { - "chat_history": await self._model_context.save_state(), - "termination_word": self._termination_word, - } - - async def load_state(self, state: Mapping[str, Any]) -> None: - # Load the chat history. - await self._model_context.load_state(state["chat_history"]) - # Load the termination word. - self._termination_word = state["termination_word"] diff --git a/python/packages/autogen-core/samples/common/patterns/_group_chat_utils.py b/python/packages/autogen-core/samples/common/patterns/_group_chat_utils.py deleted file mode 100644 index 813bc1747bc2..000000000000 --- a/python/packages/autogen-core/samples/common/patterns/_group_chat_utils.py +++ /dev/null @@ -1,88 +0,0 @@ -"""Credit to the original authors: https://github.com/microsoft/autogen/blob/main/autogen/agentchat/groupchat.py""" - -import re -from typing import Dict, List - -from autogen_core import AgentProxy -from autogen_core.model_context import ChatCompletionContext -from autogen_core.models import ChatCompletionClient, SystemMessage, UserMessage - - -async def select_speaker(context: ChatCompletionContext, client: ChatCompletionClient, agents: List[AgentProxy]) -> int: - """Selects the next speaker in a group chat using a ChatCompletion client.""" - # TODO: Handle multi-modal messages. - - # Construct formated current message history. - history_messages: List[str] = [] - for msg in await context.get_messages(): - assert isinstance(msg, UserMessage) and isinstance(msg.content, str) - history_messages.append(f"{msg.source}: {msg.content}") - history = "\n".join(history_messages) - - # Construct agent roles. - roles = "\n".join( - [f"{(await agent.metadata)['type']}: {(await agent.metadata)['description']}".strip() for agent in agents] - ) - - # Construct agent list. - participants = str([(await agent.metadata)["type"] for agent in agents]) - - # Select the next speaker. - select_speaker_prompt = f"""You are in a role play game. The following roles are available: -{roles}. -Read the following conversation. Then select the next role from {participants} to play. Only return the role. - -{history} - -Read the above conversation. Then select the next role from {participants} to play. Only return the role. -""" - select_speaker_messages = [SystemMessage(content=select_speaker_prompt)] - response = await client.create(messages=select_speaker_messages) - assert isinstance(response.content, str) - mentions = await mentioned_agents(response.content, agents) - if len(mentions) != 1: - raise ValueError(f"Expected exactly one agent to be mentioned, but got {mentions}") - agent_name = list(mentions.keys())[0] - # Get the index of the selected agent by name - agent_index = 0 - for i, agent in enumerate(agents): - if (await agent.metadata)["type"] == agent_name: - agent_index = i - break - - assert agent_index is not None - return agent_index - - -async def mentioned_agents(message_content: str, agents: List[AgentProxy]) -> Dict[str, int]: - """Counts the number of times each agent is mentioned in the provided message content. - Agent names will match under any of the following conditions (all case-sensitive): - - Exact name match - - If the agent name has underscores it will match with spaces instead (e.g. 'Story_writer' == 'Story writer') - - If the agent name has underscores it will match with '\\_' instead of '_' (e.g. 'Story_writer' == 'Story\\_writer') - - Args: - message_content (Union[str, List]): The content of the message, either as a single string or a list of strings. - agents (List[Agent]): A list of Agent objects, each having a 'name' attribute to be searched in the message content. - - Returns: - Dict: a counter for mentioned agents. - """ - mentions: Dict[str, int] = dict() - for agent in agents: - # Finds agent mentions, taking word boundaries into account, - # accommodates escaping underscores and underscores as spaces - name = (await agent.metadata)["type"] - regex = ( - r"(?<=\W)(" - + re.escape(name) - + r"|" - + re.escape(name.replace("_", " ")) - + r"|" - + re.escape(name.replace("_", r"\_")) - + r")(?=\W)" - ) - count = len(re.findall(regex, f" {message_content} ")) # Pad the message to help with matching - if count > 0: - mentions[name] = count - return mentions diff --git a/python/packages/autogen-core/samples/common/types.py b/python/packages/autogen-core/samples/common/types.py deleted file mode 100644 index 5ffe1ca44a67..000000000000 --- a/python/packages/autogen-core/samples/common/types.py +++ /dev/null @@ -1,75 +0,0 @@ -from __future__ import annotations - -from dataclasses import dataclass, field -from enum import Enum -from typing import List, Union - -from autogen_core import FunctionCall, Image -from autogen_core.models import FunctionExecutionResultMessage - - -@dataclass(kw_only=True) -class BaseMessage: - # Name of the agent that sent this message - source: str - - -@dataclass -class TextMessage(BaseMessage): - content: str - - -@dataclass -class MultiModalMessage(BaseMessage): - content: List[Union[str, Image]] - - -@dataclass -class FunctionCallMessage(BaseMessage): - content: List[FunctionCall] - - -Message = Union[TextMessage, MultiModalMessage, FunctionCallMessage, FunctionExecutionResultMessage] - - -class ResponseFormat(Enum): - text = "text" - json_object = "json_object" - - -@dataclass -class RespondNow: - """A message to request a response from the addressed agent. The sender - expects a response upon sening and waits for it synchronously.""" - - response_format: ResponseFormat = field(default=ResponseFormat.text) - - -@dataclass -class PublishNow: - """A message to request an event to be published to the addressed agent. - Unlike RespondNow, the sender does not expect a response upon sending.""" - - response_format: ResponseFormat = field(default=ResponseFormat.text) - - -@dataclass -class Reset: ... - - -@dataclass -class ToolApprovalRequest: - """A message to request approval for a tool call. The sender expects a - response upon sending and waits for it synchronously.""" - - tool_call: FunctionCall - - -@dataclass -class ToolApprovalResponse: - """A message to respond to a tool approval request. The response is sent - synchronously.""" - - tool_call_id: str - approved: bool - reason: str diff --git a/python/pyproject.toml b/python/pyproject.toml index da884484083a..f3b5c9453aad 100644 --- a/python/pyproject.toml +++ b/python/pyproject.toml @@ -18,7 +18,8 @@ dev = [ "cookiecutter", "poethepoet", "tomli", - "tomli-w" + "tomli-w", + "chainlit", ] [tool.uv.workspace] @@ -70,6 +71,7 @@ disallow_any_unimported = true [tool.pyright] include = ["src", "tests", "samples"] +exclude = ["samples/core_xlang_hello_python_agent/protos"] typeCheckingMode = "strict" reportUnnecessaryIsInstance = false reportMissingTypeStubs = false @@ -82,14 +84,14 @@ pyright = "python run_task_in_pkgs_if_exist.py pyright" mypy = "python run_task_in_pkgs_if_exist.py mypy" test = "python run_task_in_pkgs_if_exist.py test" coverage = "python run_task_in_pkgs_if_exist.py coverage" +markdown-code-lint = """python check_md_code_blocks.py ../README.md ./packages/autogen-core/docs/src/**/*.md""" +samples-code-check = """pyright ./samples""" -check = ["fmt", "lint", "pyright", "mypy", "coverage"] +check = ["fmt", "lint", "pyright", "mypy", "coverage", "markdown-code-lint", "samples-code-check"] gen-proto = "python -m grpc_tools.protoc --python_out=./packages/autogen-ext/src/autogen_ext/runtimes/grpc/protos --grpc_python_out=./packages/autogen-ext/src/autogen_ext/runtimes/grpc/protos --mypy_out=./packages/autogen-ext/src/autogen_ext/runtimes/grpc/protos --mypy_grpc_out=./packages/autogen-ext/src/autogen_ext/runtimes/grpc/protos --proto_path ../protos/ agent_worker.proto --proto_path ../protos/ cloudevent.proto" -gen-proto-samples = "python -m grpc_tools.protoc --python_out=./packages/autogen-core/samples/protos --grpc_python_out=./packages/autogen-core/samples/protos --mypy_out=./packages/autogen-core/samples/protos --mypy_grpc_out=./packages/autogen-core/samples/protos --proto_path ../protos/ agent_events.proto" - -markdown-code-lint = """python check_md_code_blocks.py ../README.md ./packages/autogen-core/docs/src/**/*.md""" +gen-proto-samples = "python -m grpc_tools.protoc --python_out=./samples/core_xlang_hello_python_agent/protos --grpc_python_out=./samples/core_xlang_hello_python_agent/protos --mypy_out=./samples/core_xlang_hello_python_agent/protos --mypy_grpc_out=./samples/core_xlang_hello_python_agent/protos --proto_path ../protos/ agent_events.proto" [[tool.poe.tasks.gen-test-proto.sequence]] cmd = "python -m grpc_tools.protoc --python_out=./packages/autogen-core/tests/protos --grpc_python_out=./packages/autogen-core/tests/protos --mypy_out=./packages/autogen-core/tests/protos --mypy_grpc_out=./packages/autogen-core/tests/protos --proto_path ./packages/autogen-core/tests/protos serialization_test.proto" diff --git a/python/samples/agentchat_chainlit/app.py b/python/samples/agentchat_chainlit/app.py index 370b990b2adb..6d0bef38a2ac 100644 --- a/python/samples/agentchat_chainlit/app.py +++ b/python/samples/agentchat_chainlit/app.py @@ -1,21 +1,18 @@ import chainlit as cl from autogen_agentchat.agents import AssistantAgent -from autogen_agentchat.conditions import TextMentionTermination, MaxMessageTermination +from autogen_agentchat.base import TaskResult +from autogen_agentchat.conditions import MaxMessageTermination, TextMentionTermination from autogen_agentchat.teams import RoundRobinGroupChat from autogen_ext.models.openai import OpenAIChatCompletionClient -from autogen_agentchat.base import TaskResult async def get_weather(city: str) -> str: return f"The weather in {city} is 73 degrees and Sunny." -@cl.on_chat_start +@cl.on_chat_start # type: ignore async def start_chat(): - cl.user_session.set( - "prompt_history", - "", - ) + cl.user_session.set("prompt_history", "") # type: ignore async def run_team(query: str): @@ -29,13 +26,13 @@ async def run_team(query: str): response_stream = team.run_stream(task=query) async for msg in response_stream: if hasattr(msg, "content"): - msg = cl.Message(content=msg.content, author="Agent Team") - await msg.send() + cl_msg = cl.Message(content=msg.content, author="Agent Team") # type: ignore + await cl_msg.send() if isinstance(msg, TaskResult): - msg = cl.Message(content="Termination condition met. Team and Agents are reset.", author="Agent Team") - await msg.send() + cl_msg = cl.Message(content="Termination condition met. Team and Agents are reset.", author="Agent Team") + await cl_msg.send() -@cl.on_message +@cl.on_message # type: ignore async def chat(message: cl.Message): - await run_team(message.content) + await run_team(message.content) # type: ignore diff --git a/python/samples/core_async_human_in_the_loop/.gitignore b/python/samples/core_async_human_in_the_loop/.gitignore new file mode 100644 index 000000000000..f228262f0a35 --- /dev/null +++ b/python/samples/core_async_human_in_the_loop/.gitignore @@ -0,0 +1 @@ +model_config.json \ No newline at end of file diff --git a/python/packages/autogen-core/samples/README.md b/python/samples/core_async_human_in_the_loop/README.md similarity index 62% rename from python/packages/autogen-core/samples/README.md rename to python/samples/core_async_human_in_the_loop/README.md index 77227169c213..f1fca50c9cf0 100644 --- a/python/packages/autogen-core/samples/README.md +++ b/python/samples/core_async_human_in_the_loop/README.md @@ -1,12 +1,6 @@ -# Samples +# Async Human-in-the-Loop Example -This directory contains sample apps that use AutoGen Core API. -See [core user guide](../docs/src/user-guide/core-user-guide/) for notebook examples. - -See [Running the examples](#running-the-examples) for instructions on how to run the examples. - -- [`chess_game.py`](chess_game.py): an example with two chess player agents that executes its own tools to demonstrate tool use and reflection on tool use. -- [`slow_human_in_loop.py`](slow_human_in_loop.py): an example showing human-in-the-loop which waits for human input before making the tool call. +An example showing human-in-the-loop which waits for human input before making the tool call. ## Running the examples @@ -14,6 +8,10 @@ See [Running the examples](#running-the-examples) for instructions on how to run First, you need a shell with AutoGen core and required dependencies installed. +```bash +pip install "autogen-core==0.4.0.dev13" "autogen-ext[openai,azure]==0.4.0.dev13" +``` + ### Using Azure OpenAI API For Azure OpenAI API, you need to set the following environment variables: diff --git a/python/packages/autogen-core/samples/slow_human_in_loop.py b/python/samples/core_async_human_in_the_loop/main.py similarity index 95% rename from python/packages/autogen-core/samples/slow_human_in_loop.py rename to python/samples/core_async_human_in_the_loop/main.py index 8762b588a3bf..ccae231b2118 100644 --- a/python/packages/autogen-core/samples/slow_human_in_loop.py +++ b/python/samples/core_async_human_in_the_loop/main.py @@ -28,7 +28,7 @@ import json from concurrent.futures import ThreadPoolExecutor from dataclasses import dataclass -from typing import Any, Mapping, Optional +from typing import Any, Dict, Mapping, Optional from autogen_core import ( CancellationToken, @@ -49,11 +49,15 @@ UserMessage, ) from autogen_core.tools import BaseTool -from common.types import TextMessage -from common.utils import get_chat_completion_client_from_envs from pydantic import BaseModel, Field +@dataclass +class TextMessage: + source: str + content: str + + @dataclass class UserTextMessage(TextMessage): pass @@ -108,7 +112,7 @@ async def handle_message(self, message: AssistantTextMessage, ctx: MessageContex async def save_state(self) -> Mapping[str, Any]: state_to_save = { - "memory": self._model_context.save_state(), + "memory": await self._model_context.save_state(), } return state_to_save @@ -246,7 +250,7 @@ def termination_msg(self) -> str | None: return self.terminateMessage.content -async def main(latest_user_input: Optional[str] = None) -> None | str: +async def main(model_config: Dict[str, Any], latest_user_input: Optional[str] = None) -> None | str: """ Asynchronous function that serves as the entry point of the program. This function initializes the necessary components for the program and registers the user and scheduling assistant agents. @@ -263,6 +267,8 @@ async def main(latest_user_input: Optional[str] = None) -> None | str: """ global state_persister + model_client = ChatCompletionClient.load_component(model_config) + termination_handler = TerminationHandler() needs_user_input_handler = NeedsUserInputHandler() runtime = SingleThreadedAgentRuntime(intervention_handlers=[needs_user_input_handler, termination_handler]) @@ -278,11 +284,12 @@ async def main(latest_user_input: Optional[str] = None) -> None | str: lambda: SchedulingAssistantAgent( "SchedulingAssistant", description="AI that helps you schedule meetings", - model_client=get_chat_completion_client_from_envs(model="gpt-4o-mini"), + model_client=model_client, initial_message=initial_schedule_assistant_message, ), ) + runtime_initiation_message: UserTextMessage | AssistantTextMessage if latest_user_input is not None: runtime_initiation_message = UserTextMessage(content=latest_user_input, source="User") else: @@ -325,6 +332,9 @@ async def ainput(prompt: str = "") -> str: # if os.path.exists("state.json"): # os.remove("state.json") + with open("model_config.json") as f: + model_config = json.load(f) + def get_user_input(question_for_user: str): print("--------------------------QUESTION_FOR_USER--------------------------") print(question_for_user) @@ -337,7 +347,7 @@ async def run_main(question_for_user: str | None = None): user_input = get_user_input(question_for_user) else: user_input = None - user_input_needed = await main(user_input) + user_input_needed = await main(model_config, user_input) if user_input_needed: await run_main(user_input_needed) diff --git a/python/samples/core_async_human_in_the_loop/model_config_template.json b/python/samples/core_async_human_in_the_loop/model_config_template.json new file mode 100644 index 000000000000..bfa6913900ae --- /dev/null +++ b/python/samples/core_async_human_in_the_loop/model_config_template.json @@ -0,0 +1,38 @@ +// Use Azure OpenAI with AD token provider. +// { +// "provider": "AzureOpenAIChatCompletionClient", +// "config": { +// "model": "gpt-4o-2024-05-13", +// "azure_endpoint": "https://{your-custom-endpoint}.openai.azure.com/", +// "azure_deployment": "{your-azure-deployment}", +// "api_version": "2024-06-01", +// "azure_ad_token_provider": { +// "provider": "autogen_ext.models.openai.AzureTokenProvider", +// "config": { +// "provider_kind": "DefaultAzureCredential", +// "scopes": [ +// "https://cognitiveservices.azure.com/.default" +// ] +// } +// } +// } +// } +// Use Azure Open AI with key +// { +// "provider": "AzureOpenAIChatCompletionClient", +// "config": { +// "model": "gpt-4o-2024-05-13", +// "azure_endpoint": "https://{your-custom-endpoint}.openai.azure.com/", +// "azure_deployment": "{your-azure-deployment}", +// "api_version": "2024-06-01", +// "api_key": "REPLACE_WITH_YOUR_API_KEY" +// } +// } +// Use Open AI with key +{ + "provider": "OpenAIChatCompletionClient", + "config": { + "model": "gpt-4o-2024-05-13", + "api_key": "REPLACE_WITH_YOUR_API_KEY" + } +} \ No newline at end of file diff --git a/python/samples/core_async_human_in_the_loop/utils.py b/python/samples/core_async_human_in_the_loop/utils.py new file mode 100644 index 000000000000..ee412c5eefe8 --- /dev/null +++ b/python/samples/core_async_human_in_the_loop/utils.py @@ -0,0 +1,47 @@ +import os +from typing import Any + +from autogen_core.models import ( + ChatCompletionClient, +) +from autogen_ext.models.openai import AzureOpenAIChatCompletionClient, OpenAIChatCompletionClient +from azure.identity import DefaultAzureCredential, get_bearer_token_provider + + +def get_chat_completion_client_from_envs(**kwargs: Any) -> ChatCompletionClient: + # Check API type. + api_type = os.getenv("OPENAI_API_TYPE", "openai") + if api_type == "openai": + # Check API key. + api_key = os.getenv("OPENAI_API_KEY") + if api_key is None: + raise ValueError("OPENAI_API_KEY is not set") + kwargs["api_key"] = api_key + return OpenAIChatCompletionClient(**kwargs) + elif api_type == "azure": + # Check Azure API key. + azure_api_key = os.getenv("AZURE_OPENAI_API_KEY") + if azure_api_key is not None: + kwargs["api_key"] = azure_api_key + else: + # Try to use token from Azure CLI. + token_provider = get_bearer_token_provider( + DefaultAzureCredential(), "https://cognitiveservices.azure.com/.default" + ) + kwargs["azure_ad_token_provider"] = token_provider + # Check Azure API endpoint. + azure_api_endpoint = os.getenv("AZURE_OPENAI_API_ENDPOINT") + if azure_api_endpoint is None: + raise ValueError("AZURE_OPENAI_API_ENDPOINT is not set") + kwargs["azure_endpoint"] = azure_api_endpoint + # Get Azure API version. + kwargs["api_version"] = os.getenv("AZURE_OPENAI_API_VERSION", "2024-06-01") + # Set model capabilities. + if "model_capabilities" not in kwargs or kwargs["model_capabilities"] is None: + kwargs["model_capabilities"] = { + "vision": True, + "function_calling": True, + "json_output": True, + } + return AzureOpenAIChatCompletionClient(**kwargs) # type: ignore + raise ValueError(f"Unknown API type: {api_type}") diff --git a/python/samples/core_chess_game/.gitignore b/python/samples/core_chess_game/.gitignore new file mode 100644 index 000000000000..f228262f0a35 --- /dev/null +++ b/python/samples/core_chess_game/.gitignore @@ -0,0 +1 @@ +model_config.json \ No newline at end of file diff --git a/python/samples/core_chess_game/README.md b/python/samples/core_chess_game/README.md new file mode 100644 index 000000000000..b0f5a9194a2d --- /dev/null +++ b/python/samples/core_chess_game/README.md @@ -0,0 +1,23 @@ +# Chess Game Example + +An example with two chess player agents that executes its own tools to demonstrate tool use and reflection on tool use. + +## Running the example + +### Prerequisites + +First, you need a shell with AutoGen core and required dependencies installed. + +```bash +pip install "autogen-core==0.4.0.dev13" "autogen-ext[openai,azure]==0.4.0.dev13" "chess" +``` +### Model Configuration + +The model configuration should defined in a `model_config.json` file. +Use `model_config_template.json` as a template. + +### Running the example + +```bash +python main.py +``` \ No newline at end of file diff --git a/python/packages/autogen-core/samples/chess_game.py b/python/samples/core_chess_game/main.py similarity index 59% rename from python/packages/autogen-core/samples/chess_game.py rename to python/samples/core_chess_game/main.py index b359772aa460..78b4787750e0 100644 --- a/python/packages/autogen-core/samples/chess_game.py +++ b/python/samples/core_chess_game/main.py @@ -4,26 +4,70 @@ import argparse import asyncio +import json import logging -from typing import Annotated, Literal +from typing import Annotated, Any, Dict, List, Literal from autogen_core import ( AgentId, - AgentInstantiationContext, AgentRuntime, - DefaultSubscription, DefaultTopicId, + MessageContext, + RoutedAgent, SingleThreadedAgentRuntime, + default_subscription, + message_handler, ) -from autogen_core.model_context import BufferedChatCompletionContext -from autogen_core.models import SystemMessage -from autogen_core.tools import FunctionTool +from autogen_core.model_context import BufferedChatCompletionContext, ChatCompletionContext +from autogen_core.models import AssistantMessage, ChatCompletionClient, LLMMessage, SystemMessage, UserMessage +from autogen_core.tool_agent import ToolAgent, tool_agent_caller_loop +from autogen_core.tools import FunctionTool, Tool, ToolSchema from chess import BLACK, SQUARE_NAMES, WHITE, Board, Move from chess import piece_name as get_piece_name -from common.agents._chat_completion_agent import ChatCompletionAgent -from common.patterns._group_chat_manager import GroupChatManager -from common.types import TextMessage -from common.utils import get_chat_completion_client_from_envs +from pydantic import BaseModel + + +class TextMessage(BaseModel): + source: str + content: str + + +@default_subscription +class ToolUseAgent(RoutedAgent): + def __init__( + self, + description: str, + instructions: str, + model_client: ChatCompletionClient, + model_context: ChatCompletionContext, + tool_schema: List[ToolSchema], + tool_agent_type: str, + ) -> None: + super().__init__(description=description) + self._system_messages: List[LLMMessage] = [SystemMessage(content=instructions)] + self._model_client = model_client + self._tool_schema = tool_schema + self._tool_agent_id = AgentId(tool_agent_type, self.id.key) + self._model_context = model_context + + @message_handler + async def handle_message(self, message: TextMessage, ctx: MessageContext) -> None: + # Add the user message to the model context. + await self._model_context.add_message(UserMessage(content=message.content, source=message.source)) + # Run the caller loop to handle tool calls. + messages = await tool_agent_caller_loop( + self, + tool_agent_id=self._tool_agent_id, + model_client=self._model_client, + input_messages=(await self._model_context.get_messages()), + tool_schema=self._tool_schema, + cancellation_token=ctx.cancellation_token, + ) + assert isinstance(messages[-1].content, str) + # Add the assistant message to the model context. + await self._model_context.add_message(AssistantMessage(content=messages[-1].content, source=self.id.type)) + # Publish the final response. + await self.publish_message(TextMessage(content=messages[-1].content, source=self.id.type), DefaultTopicId()) def validate_turn(board: Board, player: Literal["white", "black"]) -> None: @@ -90,29 +134,25 @@ def make_move( return f"Moved {piece_name} ({piece_symbol}) from {SQUARE_NAMES[new_move.from_square]} to {SQUARE_NAMES[new_move.to_square]}." -async def chess_game(runtime: AgentRuntime) -> None: # type: ignore +async def chess_game(runtime: AgentRuntime, model_config: Dict[str, Any]) -> None: # type: ignore """Create agents for a chess game and return the group chat.""" # Create the board. board = Board() # Create tools for each player. - # @functools.wraps(get_legal_moves) def get_legal_moves_black() -> str: return get_legal_moves(board, "black") - # @functools.wraps(get_legal_moves) def get_legal_moves_white() -> str: return get_legal_moves(board, "white") - # @functools.wraps(make_move) def make_move_black( thinking: Annotated[str, "Thinking for the move"], move: Annotated[str, "A move in UCI format"], ) -> str: return make_move(board, "black", thinking, move) - # @functools.wraps(make_move) def make_move_white( thinking: Annotated[str, "Thinking for the move"], move: Annotated[str, "A move in UCI format"], @@ -122,7 +162,7 @@ def make_move_white( def get_board_text() -> Annotated[str, "The current board state"]: return get_board(board) - black_tools = [ + black_tools: List[Tool] = [ FunctionTool( get_legal_moves_black, name="get_legal_moves", @@ -140,7 +180,7 @@ def get_board_text() -> Annotated[str, "The current board state"]: ), ] - white_tools = [ + white_tools: List[Tool] = [ FunctionTool( get_legal_moves_white, name="get_legal_moves", @@ -158,73 +198,59 @@ def get_board_text() -> Annotated[str, "The current board state"]: ), ] - await ChatCompletionAgent.register( + model_client = ChatCompletionClient.load_component(model_config) + + # Register the agents. + await ToolAgent.register( + runtime, + "ToolAgent", + lambda: ToolAgent(description="Tool agent for chess game.", tools=black_tools + white_tools), + ) + + await ToolUseAgent.register( runtime, "PlayerBlack", - lambda: ChatCompletionAgent( + lambda: ToolUseAgent( description="Player playing black.", - system_messages=[ - SystemMessage( - content="You are a chess player and you play as black. " - "Use get_legal_moves() to get list of legal moves. " - "Use get_board() to get the current board state. " - "Think about your strategy and call make_move(thinking, move) to make a move." - ), - ], + instructions="You are a chess player and you play as black. " + "Use get_legal_moves() to get list of legal moves. " + "Use get_board() to get the current board state. " + "Think about your strategy and call make_move(thinking, move) to make a move.", + model_client=model_client, model_context=BufferedChatCompletionContext(buffer_size=10), - model_client=get_chat_completion_client_from_envs(model="gpt-4o"), - tools=black_tools, + tool_schema=[tool.schema for tool in black_tools], + tool_agent_type="ToolAgent", ), ) - await runtime.add_subscription(DefaultSubscription(agent_type="PlayerBlack")) - await ChatCompletionAgent.register( + await ToolUseAgent.register( runtime, "PlayerWhite", - lambda: ChatCompletionAgent( + lambda: ToolUseAgent( description="Player playing white.", - system_messages=[ - SystemMessage( - content="You are a chess player and you play as white. " - "Use get_legal_moves() to get list of legal moves. " - "Use get_board() to get the current board state. " - "Think about your strategy and call make_move(thinking, move) to make a move." - ), - ], + instructions="You are a chess player and you play as white. " + "Use get_legal_moves() to get list of legal moves. " + "Use get_board() to get the current board state. " + "Think about your strategy and call make_move(thinking, move) to make a move.", + model_client=model_client, model_context=BufferedChatCompletionContext(buffer_size=10), - model_client=get_chat_completion_client_from_envs(model="gpt-4o"), - tools=white_tools, + tool_schema=[tool.schema for tool in white_tools], + tool_agent_type="ToolAgent", ), ) - await runtime.add_subscription(DefaultSubscription(agent_type="PlayerWhite")) - - # Create a group chat manager for the chess game to orchestrate a turn-based - # conversation between the two agents. - await GroupChatManager.register( - runtime, - "ChessGame", - lambda: GroupChatManager( - description="A chess game between two agents.", - model_context=BufferedChatCompletionContext(buffer_size=10), - participants=[ - AgentId("PlayerWhite", AgentInstantiationContext.current_agent_id().key), - AgentId("PlayerBlack", AgentInstantiationContext.current_agent_id().key), - ], # white goes first - ), - ) - await runtime.add_subscription(DefaultSubscription(agent_type="ChessGame")) -async def main() -> None: +async def main(model_config: Dict[str, Any]) -> None: """Main Entrypoint.""" runtime = SingleThreadedAgentRuntime() - await chess_game(runtime) + await chess_game(runtime, model_config) runtime.start() # Publish an initial message to trigger the group chat manager to start # orchestration. - await runtime.publish_message( + # Send an initial message to player white to start the game. + await runtime.send_message( TextMessage(content="Game started.", source="System"), - topic_id=DefaultTopicId(), + AgentId("PlayerWhite", "default"), ) await runtime.stop_when_idle() @@ -232,6 +258,9 @@ async def main() -> None: if __name__ == "__main__": parser = argparse.ArgumentParser(description="Run a chess game between two agents.") parser.add_argument("--verbose", action="store_true", help="Enable verbose logging.") + parser.add_argument( + "--model-config", type=str, help="Path to the model configuration file.", default="model_config.json" + ) args = parser.parse_args() if args.verbose: logging.basicConfig(level=logging.WARNING) @@ -239,4 +268,6 @@ async def main() -> None: handler = logging.FileHandler("chess_game.log") logging.getLogger("autogen_core").addHandler(handler) - asyncio.run(main()) + with open(args.model_config, "r") as f: + model_config = json.load(f) + asyncio.run(main(model_config)) diff --git a/python/samples/core_chess_game/model_config_template.json b/python/samples/core_chess_game/model_config_template.json new file mode 100644 index 000000000000..bfa6913900ae --- /dev/null +++ b/python/samples/core_chess_game/model_config_template.json @@ -0,0 +1,38 @@ +// Use Azure OpenAI with AD token provider. +// { +// "provider": "AzureOpenAIChatCompletionClient", +// "config": { +// "model": "gpt-4o-2024-05-13", +// "azure_endpoint": "https://{your-custom-endpoint}.openai.azure.com/", +// "azure_deployment": "{your-azure-deployment}", +// "api_version": "2024-06-01", +// "azure_ad_token_provider": { +// "provider": "autogen_ext.models.openai.AzureTokenProvider", +// "config": { +// "provider_kind": "DefaultAzureCredential", +// "scopes": [ +// "https://cognitiveservices.azure.com/.default" +// ] +// } +// } +// } +// } +// Use Azure Open AI with key +// { +// "provider": "AzureOpenAIChatCompletionClient", +// "config": { +// "model": "gpt-4o-2024-05-13", +// "azure_endpoint": "https://{your-custom-endpoint}.openai.azure.com/", +// "azure_deployment": "{your-azure-deployment}", +// "api_version": "2024-06-01", +// "api_key": "REPLACE_WITH_YOUR_API_KEY" +// } +// } +// Use Open AI with key +{ + "provider": "OpenAIChatCompletionClient", + "config": { + "model": "gpt-4o-2024-05-13", + "api_key": "REPLACE_WITH_YOUR_API_KEY" + } +} \ No newline at end of file diff --git a/python/packages/autogen-core/samples/common/utils.py b/python/samples/core_chess_game/utils.py similarity index 66% rename from python/packages/autogen-core/samples/common/utils.py rename to python/samples/core_chess_game/utils.py index d43283ab79a4..5fc21dc20401 100644 --- a/python/packages/autogen-core/samples/common/utils.py +++ b/python/samples/core_chess_game/utils.py @@ -1,19 +1,15 @@ -import os -from typing import Any, List, Optional, Union +from typing import List, Optional, Union from autogen_core.models import ( AssistantMessage, - ChatCompletionClient, FunctionExecutionResult, FunctionExecutionResultMessage, LLMMessage, UserMessage, ) -from autogen_ext.models.openai import AzureOpenAIChatCompletionClient, OpenAIChatCompletionClient -from azure.identity import DefaultAzureCredential, get_bearer_token_provider from typing_extensions import Literal -from .types import ( +from .messages import ( FunctionCallMessage, Message, MultiModalMessage, @@ -100,42 +96,3 @@ def convert_messages_to_llm_messages( raise AssertionError("unreachable") return result - - -def get_chat_completion_client_from_envs(**kwargs: Any) -> ChatCompletionClient: - # Check API type. - api_type = os.getenv("OPENAI_API_TYPE", "openai") - if api_type == "openai": - # Check API key. - api_key = os.getenv("OPENAI_API_KEY") - if api_key is None: - raise ValueError("OPENAI_API_KEY is not set") - kwargs["api_key"] = api_key - return OpenAIChatCompletionClient(**kwargs) - elif api_type == "azure": - # Check Azure API key. - azure_api_key = os.getenv("AZURE_OPENAI_API_KEY") - if azure_api_key is not None: - kwargs["api_key"] = azure_api_key - else: - # Try to use token from Azure CLI. - token_provider = get_bearer_token_provider( - DefaultAzureCredential(), "https://cognitiveservices.azure.com/.default" - ) - kwargs["azure_ad_token_provider"] = token_provider - # Check Azure API endpoint. - azure_api_endpoint = os.getenv("AZURE_OPENAI_API_ENDPOINT") - if azure_api_endpoint is None: - raise ValueError("AZURE_OPENAI_API_ENDPOINT is not set") - kwargs["azure_endpoint"] = azure_api_endpoint - # Get Azure API version. - kwargs["api_version"] = os.getenv("AZURE_OPENAI_API_VERSION", "2024-06-01") - # Set model capabilities. - if "model_capabilities" not in kwargs or kwargs["model_capabilities"] is None: - kwargs["model_capabilities"] = { - "vision": True, - "function_calling": True, - "json_output": True, - } - return AzureOpenAIChatCompletionClient(**kwargs) # type: ignore - raise ValueError(f"Unknown API type: {api_type}") diff --git a/python/packages/autogen-core/samples/distributed-group-chat/.gitignore b/python/samples/core_distributed-group-chat/.gitignore similarity index 100% rename from python/packages/autogen-core/samples/distributed-group-chat/.gitignore rename to python/samples/core_distributed-group-chat/.gitignore diff --git a/python/packages/autogen-core/samples/distributed-group-chat/README.md b/python/samples/core_distributed-group-chat/README.md similarity index 100% rename from python/packages/autogen-core/samples/distributed-group-chat/README.md rename to python/samples/core_distributed-group-chat/README.md diff --git a/python/packages/autogen-core/samples/distributed-group-chat/_agents.py b/python/samples/core_distributed-group-chat/_agents.py similarity index 100% rename from python/packages/autogen-core/samples/distributed-group-chat/_agents.py rename to python/samples/core_distributed-group-chat/_agents.py diff --git a/python/packages/autogen-core/samples/distributed-group-chat/_types.py b/python/samples/core_distributed-group-chat/_types.py similarity index 100% rename from python/packages/autogen-core/samples/distributed-group-chat/_types.py rename to python/samples/core_distributed-group-chat/_types.py diff --git a/python/packages/autogen-core/samples/distributed-group-chat/_utils.py b/python/samples/core_distributed-group-chat/_utils.py similarity index 100% rename from python/packages/autogen-core/samples/distributed-group-chat/_utils.py rename to python/samples/core_distributed-group-chat/_utils.py diff --git a/python/packages/autogen-core/samples/distributed-group-chat/config.yaml b/python/samples/core_distributed-group-chat/config.yaml similarity index 100% rename from python/packages/autogen-core/samples/distributed-group-chat/config.yaml rename to python/samples/core_distributed-group-chat/config.yaml diff --git a/python/packages/autogen-core/samples/distributed-group-chat/public/avatars/editor.png b/python/samples/core_distributed-group-chat/public/avatars/editor.png similarity index 100% rename from python/packages/autogen-core/samples/distributed-group-chat/public/avatars/editor.png rename to python/samples/core_distributed-group-chat/public/avatars/editor.png diff --git a/python/packages/autogen-core/samples/distributed-group-chat/public/avatars/group_chat_manager.png b/python/samples/core_distributed-group-chat/public/avatars/group_chat_manager.png similarity index 100% rename from python/packages/autogen-core/samples/distributed-group-chat/public/avatars/group_chat_manager.png rename to python/samples/core_distributed-group-chat/public/avatars/group_chat_manager.png diff --git a/python/packages/autogen-core/samples/distributed-group-chat/public/avatars/user.png b/python/samples/core_distributed-group-chat/public/avatars/user.png similarity index 100% rename from python/packages/autogen-core/samples/distributed-group-chat/public/avatars/user.png rename to python/samples/core_distributed-group-chat/public/avatars/user.png diff --git a/python/packages/autogen-core/samples/distributed-group-chat/public/avatars/writer.png b/python/samples/core_distributed-group-chat/public/avatars/writer.png similarity index 100% rename from python/packages/autogen-core/samples/distributed-group-chat/public/avatars/writer.png rename to python/samples/core_distributed-group-chat/public/avatars/writer.png diff --git a/python/packages/autogen-core/samples/distributed-group-chat/public/favicon.png b/python/samples/core_distributed-group-chat/public/favicon.png similarity index 100% rename from python/packages/autogen-core/samples/distributed-group-chat/public/favicon.png rename to python/samples/core_distributed-group-chat/public/favicon.png diff --git a/python/packages/autogen-core/samples/distributed-group-chat/public/logo.png b/python/samples/core_distributed-group-chat/public/logo.png similarity index 100% rename from python/packages/autogen-core/samples/distributed-group-chat/public/logo.png rename to python/samples/core_distributed-group-chat/public/logo.png diff --git a/python/packages/autogen-core/samples/distributed-group-chat/run.sh b/python/samples/core_distributed-group-chat/run.sh similarity index 100% rename from python/packages/autogen-core/samples/distributed-group-chat/run.sh rename to python/samples/core_distributed-group-chat/run.sh diff --git a/python/packages/autogen-core/samples/distributed-group-chat/run_editor_agent.py b/python/samples/core_distributed-group-chat/run_editor_agent.py similarity index 100% rename from python/packages/autogen-core/samples/distributed-group-chat/run_editor_agent.py rename to python/samples/core_distributed-group-chat/run_editor_agent.py diff --git a/python/packages/autogen-core/samples/distributed-group-chat/run_group_chat_manager.py b/python/samples/core_distributed-group-chat/run_group_chat_manager.py similarity index 100% rename from python/packages/autogen-core/samples/distributed-group-chat/run_group_chat_manager.py rename to python/samples/core_distributed-group-chat/run_group_chat_manager.py diff --git a/python/packages/autogen-core/samples/distributed-group-chat/run_host.py b/python/samples/core_distributed-group-chat/run_host.py similarity index 100% rename from python/packages/autogen-core/samples/distributed-group-chat/run_host.py rename to python/samples/core_distributed-group-chat/run_host.py diff --git a/python/packages/autogen-core/samples/distributed-group-chat/run_ui.py b/python/samples/core_distributed-group-chat/run_ui.py similarity index 100% rename from python/packages/autogen-core/samples/distributed-group-chat/run_ui.py rename to python/samples/core_distributed-group-chat/run_ui.py diff --git a/python/packages/autogen-core/samples/distributed-group-chat/run_writer_agent.py b/python/samples/core_distributed-group-chat/run_writer_agent.py similarity index 100% rename from python/packages/autogen-core/samples/distributed-group-chat/run_writer_agent.py rename to python/samples/core_distributed-group-chat/run_writer_agent.py diff --git a/python/packages/autogen-core/samples/worker/agents.py b/python/samples/core_grpc_worker_runtime/agents.py similarity index 100% rename from python/packages/autogen-core/samples/worker/agents.py rename to python/samples/core_grpc_worker_runtime/agents.py diff --git a/python/packages/autogen-core/samples/worker/run_cascading_publisher.py b/python/samples/core_grpc_worker_runtime/run_cascading_publisher.py similarity index 100% rename from python/packages/autogen-core/samples/worker/run_cascading_publisher.py rename to python/samples/core_grpc_worker_runtime/run_cascading_publisher.py diff --git a/python/packages/autogen-core/samples/worker/run_cascading_worker.py b/python/samples/core_grpc_worker_runtime/run_cascading_worker.py similarity index 100% rename from python/packages/autogen-core/samples/worker/run_cascading_worker.py rename to python/samples/core_grpc_worker_runtime/run_cascading_worker.py diff --git a/python/packages/autogen-core/samples/worker/run_host.py b/python/samples/core_grpc_worker_runtime/run_host.py similarity index 100% rename from python/packages/autogen-core/samples/worker/run_host.py rename to python/samples/core_grpc_worker_runtime/run_host.py diff --git a/python/packages/autogen-core/samples/worker/run_worker_pub_sub.py b/python/samples/core_grpc_worker_runtime/run_worker_pub_sub.py similarity index 100% rename from python/packages/autogen-core/samples/worker/run_worker_pub_sub.py rename to python/samples/core_grpc_worker_runtime/run_worker_pub_sub.py diff --git a/python/packages/autogen-core/samples/worker/run_worker_rpc.py b/python/samples/core_grpc_worker_runtime/run_worker_rpc.py similarity index 100% rename from python/packages/autogen-core/samples/worker/run_worker_rpc.py rename to python/samples/core_grpc_worker_runtime/run_worker_rpc.py diff --git a/python/packages/autogen-core/samples/semantic_router/README.md b/python/samples/core_semantic_router/README.md similarity index 100% rename from python/packages/autogen-core/samples/semantic_router/README.md rename to python/samples/core_semantic_router/README.md diff --git a/python/packages/autogen-core/samples/semantic_router/_agents.py b/python/samples/core_semantic_router/_agents.py similarity index 100% rename from python/packages/autogen-core/samples/semantic_router/_agents.py rename to python/samples/core_semantic_router/_agents.py diff --git a/python/packages/autogen-core/samples/semantic_router/_semantic_router_agent.py b/python/samples/core_semantic_router/_semantic_router_agent.py similarity index 100% rename from python/packages/autogen-core/samples/semantic_router/_semantic_router_agent.py rename to python/samples/core_semantic_router/_semantic_router_agent.py diff --git a/python/packages/autogen-core/samples/semantic_router/_semantic_router_components.py b/python/samples/core_semantic_router/_semantic_router_components.py similarity index 100% rename from python/packages/autogen-core/samples/semantic_router/_semantic_router_components.py rename to python/samples/core_semantic_router/_semantic_router_components.py diff --git a/python/packages/autogen-core/samples/semantic_router/run_host.py b/python/samples/core_semantic_router/run_host.py similarity index 100% rename from python/packages/autogen-core/samples/semantic_router/run_host.py rename to python/samples/core_semantic_router/run_host.py diff --git a/python/packages/autogen-core/samples/semantic_router/run_semantic_router.py b/python/samples/core_semantic_router/run_semantic_router.py similarity index 100% rename from python/packages/autogen-core/samples/semantic_router/run_semantic_router.py rename to python/samples/core_semantic_router/run_semantic_router.py diff --git a/python/packages/autogen-core/samples/xlang/hello_python_agent/README.md b/python/samples/core_xlang_hello_python_agent/README.md similarity index 100% rename from python/packages/autogen-core/samples/xlang/hello_python_agent/README.md rename to python/samples/core_xlang_hello_python_agent/README.md diff --git a/python/packages/autogen-core/samples/xlang/hello_python_agent/hello_python_agent.py b/python/samples/core_xlang_hello_python_agent/hello_python_agent.py similarity index 100% rename from python/packages/autogen-core/samples/xlang/hello_python_agent/hello_python_agent.py rename to python/samples/core_xlang_hello_python_agent/hello_python_agent.py diff --git a/python/packages/autogen-core/samples/protos/__init__.py b/python/samples/core_xlang_hello_python_agent/protos/__init__.py similarity index 100% rename from python/packages/autogen-core/samples/protos/__init__.py rename to python/samples/core_xlang_hello_python_agent/protos/__init__.py diff --git a/python/packages/autogen-core/samples/protos/agent_events_pb2.py b/python/samples/core_xlang_hello_python_agent/protos/agent_events_pb2.py similarity index 100% rename from python/packages/autogen-core/samples/protos/agent_events_pb2.py rename to python/samples/core_xlang_hello_python_agent/protos/agent_events_pb2.py diff --git a/python/packages/autogen-core/samples/protos/agent_events_pb2.pyi b/python/samples/core_xlang_hello_python_agent/protos/agent_events_pb2.pyi similarity index 100% rename from python/packages/autogen-core/samples/protos/agent_events_pb2.pyi rename to python/samples/core_xlang_hello_python_agent/protos/agent_events_pb2.pyi diff --git a/python/packages/autogen-core/samples/protos/agent_events_pb2_grpc.py b/python/samples/core_xlang_hello_python_agent/protos/agent_events_pb2_grpc.py similarity index 100% rename from python/packages/autogen-core/samples/protos/agent_events_pb2_grpc.py rename to python/samples/core_xlang_hello_python_agent/protos/agent_events_pb2_grpc.py diff --git a/python/packages/autogen-core/samples/protos/agent_events_pb2_grpc.pyi b/python/samples/core_xlang_hello_python_agent/protos/agent_events_pb2_grpc.pyi similarity index 100% rename from python/packages/autogen-core/samples/protos/agent_events_pb2_grpc.pyi rename to python/samples/core_xlang_hello_python_agent/protos/agent_events_pb2_grpc.pyi diff --git a/python/packages/autogen-core/samples/xlang/hello_python_agent/user_input.py b/python/samples/core_xlang_hello_python_agent/user_input.py similarity index 100% rename from python/packages/autogen-core/samples/xlang/hello_python_agent/user_input.py rename to python/samples/core_xlang_hello_python_agent/user_input.py diff --git a/python/uv.lock b/python/uv.lock index 84913dfed925..9f8ed0980e56 100644 --- a/python/uv.lock +++ b/python/uv.lock @@ -30,6 +30,7 @@ members = [ [manifest.dependency-groups] dev = [ + { name = "chainlit" }, { name = "cookiecutter" }, { name = "grpcio-tools", specifier = "~=1.62.0" }, { name = "mypy", specifier = "==1.13.0" }, @@ -110,11 +111,11 @@ wheels = [ [[package]] name = "aiofiles" -version = "24.1.0" +version = "23.2.1" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/0b/03/a88171e277e8caa88a4c77808c20ebb04ba74cc4681bf1e9416c862de237/aiofiles-24.1.0.tar.gz", hash = "sha256:22a075c9e5a3810f0c2e48f3008c94d68c65d763b9b03857924c99e57355166c", size = 30247 } +sdist = { url = "https://files.pythonhosted.org/packages/af/41/cfed10bc64d774f497a86e5ede9248e1d062db675504b41c320954d99641/aiofiles-23.2.1.tar.gz", hash = "sha256:84ec2218d8419404abcb9f0c02df3f34c6e0a68ed41072acfb1cef5cbc29051a", size = 32072 } wheels = [ - { url = "https://files.pythonhosted.org/packages/a5/45/30bb92d442636f570cb5651bc661f52b610e2eec3f891a5dc3a4c3667db0/aiofiles-24.1.0-py3-none-any.whl", hash = "sha256:b4ec55f4195e3eb5d7abd1bf7e061763e864dd4954231fb8539a0ef8bb8260e5", size = 15896 }, + { url = "https://files.pythonhosted.org/packages/c5/19/5af6804c4cc0fed83f47bff6e413a98a36618e7d40185cd36e69737f3b0e/aiofiles-23.2.1-py3-none-any.whl", hash = "sha256:19297512c647d4b27a2cf7c34caa7e405c0d60b5560618a29a9fe027b18b0107", size = 15727 }, ] [[package]] @@ -300,6 +301,18 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/a7/fa/e01228c2938de91d47b307831c62ab9e4001e747789d0b05baf779a6488c/async_timeout-4.0.3-py3-none-any.whl", hash = "sha256:7405140ff1230c310e51dc27b3145b9092d659ce68ff733fb0cefe3ee42be028", size = 5721 }, ] +[[package]] +name = "asyncer" +version = "0.0.7" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "anyio" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/39/29/245ba9fa5769a1e3226c1157aedb372fe9dab28c4e1dcf6911d84d3a5e04/asyncer-0.0.7.tar.gz", hash = "sha256:d5e563fb0f56eb87b97257984703658a4f5bbdb52ff851b3e8ed864cc200b1d2", size = 14437 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/3e/4b/40a1dc52fc26695b1e80a9e67dfb0fe7e6ddc57bbc5b61348e40c0045abb/asyncer-0.0.7-py3-none-any.whl", hash = "sha256:f0d579d4f67c4ead52ede3a45c854f462cae569058a8a6a68a4ebccac1c335d8", size = 8476 }, +] + [[package]] name = "asyncio-atexit" version = "1.0.1" @@ -755,6 +768,15 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/b1/fe/e8c672695b37eecc5cbf43e1d0638d88d66ba3a44c4d321c796f4e59167f/beautifulsoup4-4.12.3-py3-none-any.whl", hash = "sha256:b80878c9f40111313e55da8ba20bdba06d8fa3969fc68304167741bbf9e082ed", size = 147925 }, ] +[[package]] +name = "bidict" +version = "0.23.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/9a/6e/026678aa5a830e07cd9498a05d3e7e650a4f56a42f267a53d22bcda1bdc9/bidict-0.23.1.tar.gz", hash = "sha256:03069d763bc387bbd20e7d49914e75fc4132a41937fa3405417e1a5a2d006d71", size = 29093 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/99/37/e8730c3587a65eb5645d4aba2d27aae48e8003614d6aaf15dda67f702f1f/bidict-0.23.1-py3-none-any.whl", hash = "sha256:5dae8d4d79b552a71cbabc7deb25dfe8ce710b17ff41711e13010ead2abfc3e5", size = 32764 }, +] + [[package]] name = "binaryornot" version = "0.4.4" @@ -822,6 +844,40 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/50/b9/db34c4755a7bd1cb2d1603ac3863f22bcecbd1ba29e5ee841a4bc510b294/cffi-1.17.1-cp312-cp312-win_amd64.whl", hash = "sha256:51392eae71afec0d0c8fb1a53b204dbb3bcabcb3c9b807eedf3e1e6ccf2de903", size = 181976 }, ] +[[package]] +name = "chainlit" +version = "2.0.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "aiofiles" }, + { name = "asyncer" }, + { name = "click" }, + { name = "dataclasses-json" }, + { name = "fastapi" }, + { name = "filetype" }, + { name = "httpx" }, + { name = "lazify" }, + { name = "literalai" }, + { name = "nest-asyncio" }, + { name = "numpy" }, + { name = "packaging" }, + { name = "pydantic" }, + { name = "pyjwt" }, + { name = "python-dotenv" }, + { name = "python-multipart" }, + { name = "python-socketio" }, + { name = "starlette" }, + { name = "syncer" }, + { name = "tomli" }, + { name = "uptrace" }, + { name = "uvicorn" }, + { name = "watchfiles" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/45/24/424679b769664876093b3e42167911535d1739bc1bc88f3963c69affed9e/chainlit-2.0.0.tar.gz", hash = "sha256:47b3a274a20cefb443f356d69f1c6a48818d67eb4a11552c749bfa6f414423ed", size = 4637040 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/87/2a/e2bbb86fc3a34c7bf798644edb95bf14fd79a8b3f6c99e4b27e5df1e24f0/chainlit-2.0.0-py3-none-any.whl", hash = "sha256:2b58ac6b513d94aef0380d1d68b73f74718c0c844586b050ce8d5e0a82eb8133", size = 4703622 }, +] + [[package]] name = "chardet" version = "5.2.0" @@ -891,6 +947,15 @@ version = "1.11.1" source = { registry = "https://pypi.org/simple" } sdist = { url = "https://files.pythonhosted.org/packages/74/16/53b895bb4fccede8e506de820fa94db03a2dc8bd2ca4bec0aac4a112fb65/chess-1.11.1.tar.gz", hash = "sha256:b7f66a32dc599ab260e2b688e6ac4e868dad840377a54b61357e2dec2a5fed00", size = 156529 } +[[package]] +name = "chevron" +version = "0.14.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/15/1f/ca74b65b19798895d63a6e92874162f44233467c9e7c1ed8afd19016ebe9/chevron-0.14.0.tar.gz", hash = "sha256:87613aafdf6d77b6a90ff073165a61ae5086e21ad49057aa0e53681601800ebf", size = 11440 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/52/93/342cc62a70ab727e093ed98e02a725d85b746345f05d2b5e5034649f4ec8/chevron-0.14.0-py3-none-any.whl", hash = "sha256:fbf996a709f8da2e745ef763f482ce2d311aa817d287593a5b990d6d6e4f0443", size = 11595 }, +] + [[package]] name = "chromedriver-autoinstaller" version = "0.6.4" @@ -1345,6 +1410,18 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/da/71/ae30dadffc90b9006d77af76b393cb9dfbfc9629f339fc1574a1c52e6806/future-1.0.0-py3-none-any.whl", hash = "sha256:929292d34f5872e70396626ef385ec22355a1fae8ad29e1a734c3e43f9fbc216", size = 491326 }, ] +[[package]] +name = "googleapis-common-protos" +version = "1.66.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "protobuf" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/ff/a7/8e9cccdb1c49870de6faea2a2764fa23f627dd290633103540209f03524c/googleapis_common_protos-1.66.0.tar.gz", hash = "sha256:c3e7b33d15fdca5374cc0a7346dd92ffa847425cc4ea941d970f13680052ec8c", size = 114376 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a0/0f/c0713fb2b3d28af4b2fded3291df1c4d4f79a00d15c2374a9e010870016c/googleapis_common_protos-1.66.0-py2.py3-none-any.whl", hash = "sha256:d7abcd75fabb2e0ec9f74466401f6c119a0b498e27370e9be4c94cb7e382b8ed", size = 221682 }, +] + [[package]] name = "greenlet" version = "3.1.1" @@ -1955,6 +2032,15 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/71/fd/7713b0e737f4e171112e44134790823ccec4aabe31f07d6e836fcbeb3b8a/langsmith-0.1.137-py3-none-any.whl", hash = "sha256:4256d5c61133749890f7b5c88321dbb133ce0f440c621ea28e76513285859b81", size = 296895 }, ] +[[package]] +name = "lazify" +version = "0.4.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/24/2c/b55c4a27a56dd9a00bb2812c404b57f8b7aec0cdbff9fdc61acdd73359bc/Lazify-0.4.0.tar.gz", hash = "sha256:7102bfe63e56de2ab62b3bc661a7190c4056771a8624f04a8b785275c3dd1f9b", size = 2968 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/03/a5/866b44697cee47d1cae429ed370281d937ad4439f71af82a6baaa139d26a/Lazify-0.4.0-py2.py3-none-any.whl", hash = "sha256:c2c17a7a33e9406897e3f66fde4cd3f84716218d580330e5af10cfe5a0cd195a", size = 3107 }, +] + [[package]] name = "linkify-it-py" version = "2.0.3" @@ -1967,6 +2053,18 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/04/1e/b832de447dee8b582cac175871d2f6c3d5077cc56d5575cadba1fd1cccfa/linkify_it_py-2.0.3-py3-none-any.whl", hash = "sha256:6bcbc417b0ac14323382aef5c5192c0075bf8a9d6b41820a2b66371eac6b6d79", size = 19820 }, ] +[[package]] +name = "literalai" +version = "0.0.623" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "chevron" }, + { name = "httpx" }, + { name = "packaging" }, + { name = "pydantic" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/41/af/07d943e62a1297a7b44777297c0dca8f4bfcd6ae18b9df7d3cd9c1970e29/literalai-0.0.623.tar.gz", hash = "sha256:d65c04dde6b1e99d585e4112a607e5fd574d282b70f600c55a671018340dfb0f", size = 57081 } + [[package]] name = "llama-cloud" version = "0.1.7" @@ -3128,6 +3226,93 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/fb/1f/737dcdbc9fea2fa96c1b392ae47275165a7c641663fbb08a8d252968eed2/opentelemetry_api-1.27.0-py3-none-any.whl", hash = "sha256:953d5871815e7c30c81b56d910c707588000fff7a3ca1c73e6531911d53065e7", size = 63970 }, ] +[[package]] +name = "opentelemetry-exporter-otlp" +version = "1.27.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "opentelemetry-exporter-otlp-proto-grpc" }, + { name = "opentelemetry-exporter-otlp-proto-http" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/fc/d3/8156cc14e8f4573a3572ee7f30badc7aabd02961a09acc72ab5f2c789ef1/opentelemetry_exporter_otlp-1.27.0.tar.gz", hash = "sha256:4a599459e623868cc95d933c301199c2367e530f089750e115599fccd67cb2a1", size = 6166 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/59/6d/95e1fc2c8d945a734db32e87a5aa7a804f847c1657a21351df9338bd1c9c/opentelemetry_exporter_otlp-1.27.0-py3-none-any.whl", hash = "sha256:7688791cbdd951d71eb6445951d1cfbb7b6b2d7ee5948fac805d404802931145", size = 7001 }, +] + +[[package]] +name = "opentelemetry-exporter-otlp-proto-common" +version = "1.27.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "opentelemetry-proto" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/cd/2e/7eaf4ba595fb5213cf639c9158dfb64aacb2e4c7d74bfa664af89fa111f4/opentelemetry_exporter_otlp_proto_common-1.27.0.tar.gz", hash = "sha256:159d27cf49f359e3798c4c3eb8da6ef4020e292571bd8c5604a2a573231dd5c8", size = 17860 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/41/27/4610ab3d9bb3cde4309b6505f98b3aabca04a26aa480aa18cede23149837/opentelemetry_exporter_otlp_proto_common-1.27.0-py3-none-any.whl", hash = "sha256:675db7fffcb60946f3a5c43e17d1168a3307a94a930ecf8d2ea1f286f3d4f79a", size = 17848 }, +] + +[[package]] +name = "opentelemetry-exporter-otlp-proto-grpc" +version = "1.27.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "deprecated" }, + { name = "googleapis-common-protos" }, + { name = "grpcio" }, + { name = "opentelemetry-api" }, + { name = "opentelemetry-exporter-otlp-proto-common" }, + { name = "opentelemetry-proto" }, + { name = "opentelemetry-sdk" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/a1/d0/c1e375b292df26e0ffebf194e82cd197e4c26cc298582bda626ce3ce74c5/opentelemetry_exporter_otlp_proto_grpc-1.27.0.tar.gz", hash = "sha256:af6f72f76bcf425dfb5ad11c1a6d6eca2863b91e63575f89bb7b4b55099d968f", size = 26244 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/8d/80/32217460c2c64c0568cea38410124ff680a9b65f6732867bbf857c4d8626/opentelemetry_exporter_otlp_proto_grpc-1.27.0-py3-none-any.whl", hash = "sha256:56b5bbd5d61aab05e300d9d62a6b3c134827bbd28d0b12f2649c2da368006c9e", size = 18541 }, +] + +[[package]] +name = "opentelemetry-exporter-otlp-proto-http" +version = "1.27.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "deprecated" }, + { name = "googleapis-common-protos" }, + { name = "opentelemetry-api" }, + { name = "opentelemetry-exporter-otlp-proto-common" }, + { name = "opentelemetry-proto" }, + { name = "opentelemetry-sdk" }, + { name = "requests" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/31/0a/f05c55e8913bf58a033583f2580a0ec31a5f4cf2beacc9e286dcb74d6979/opentelemetry_exporter_otlp_proto_http-1.27.0.tar.gz", hash = "sha256:2103479092d8eb18f61f3fbff084f67cc7f2d4a7d37e75304b8b56c1d09ebef5", size = 15059 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/2d/8d/4755884afc0b1db6000527cac0ca17273063b6142c773ce4ecd307a82e72/opentelemetry_exporter_otlp_proto_http-1.27.0-py3-none-any.whl", hash = "sha256:688027575c9da42e179a69fe17e2d1eba9b14d81de8d13553a21d3114f3b4d75", size = 17203 }, +] + +[[package]] +name = "opentelemetry-instrumentation" +version = "0.48b0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "opentelemetry-api" }, + { name = "setuptools" }, + { name = "wrapt" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/04/0e/d9394839af5d55c8feb3b22cd11138b953b49739b20678ca96289e30f904/opentelemetry_instrumentation-0.48b0.tar.gz", hash = "sha256:94929685d906380743a71c3970f76b5f07476eea1834abd5dd9d17abfe23cc35", size = 24724 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/0a/7f/405c41d4f359121376c9d5117dcf68149b8122d3f6c718996d037bd4d800/opentelemetry_instrumentation-0.48b0-py3-none-any.whl", hash = "sha256:a69750dc4ba6a5c3eb67986a337185a25b739966d80479befe37b546fc870b44", size = 29449 }, +] + +[[package]] +name = "opentelemetry-proto" +version = "1.27.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "protobuf" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/9a/59/959f0beea798ae0ee9c979b90f220736fbec924eedbefc60ca581232e659/opentelemetry_proto-1.27.0.tar.gz", hash = "sha256:33c9345d91dafd8a74fc3d7576c5a38f18b7fdf8d02983ac67485386132aedd6", size = 34749 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/94/56/3d2d826834209b19a5141eed717f7922150224d1a982385d19a9444cbf8d/opentelemetry_proto-1.27.0-py3-none-any.whl", hash = "sha256:b133873de5581a50063e1e4b29cdcf0c5e253a8c2d8dc1229add20a4c3830ace", size = 52464 }, +] + [[package]] name = "opentelemetry-sdk" version = "1.27.0" @@ -3207,11 +3392,11 @@ wheels = [ [[package]] name = "packaging" -version = "24.1" +version = "23.2" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/51/65/50db4dda066951078f0a96cf12f4b9ada6e4b811516bf0262c0f4f7064d4/packaging-24.1.tar.gz", hash = "sha256:026ed72c8ed3fcce5bf8950572258698927fd1dbda10a5e981cdf0ac37f4f002", size = 148788 } +sdist = { url = "https://files.pythonhosted.org/packages/fb/2b/9b9c33ffed44ee921d0967086d653047286054117d584f1b1a7c22ceaf7b/packaging-23.2.tar.gz", hash = "sha256:048fb0e9405036518eaaf48a55953c750c11e1a1b68e0dd1a9d62ed0c092cfc5", size = 146714 } wheels = [ - { url = "https://files.pythonhosted.org/packages/08/aa/cc0199a5f0ad350994d660967a8efb233fe0416e4639146c089643407ce6/packaging-24.1-py3-none-any.whl", hash = "sha256:5b8f2217dbdbd2f7f384c41c628544e6d52f2d0f53c6d0c3ea61aa5d1d7ff124", size = 53985 }, + { url = "https://files.pythonhosted.org/packages/ec/1a/610693ac4ee14fcdf2d9bf3c493370e4f2ef7ae2e19217d7a237ff42367d/packaging-23.2-py3-none-any.whl", hash = "sha256:8c491190033a9af7e1d931d0b5dacc2ef47509b34dd0de67ed209b5203fc88c7", size = 53011 }, ] [[package]] @@ -3872,6 +4057,27 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/6a/3e/b68c118422ec867fa7ab88444e1274aa40681c606d59ac27de5a5588f082/python_dotenv-1.0.1-py3-none-any.whl", hash = "sha256:f7b63ef50f1b690dddf550d03497b66d609393b40b564ed0d674909a68ebf16a", size = 19863 }, ] +[[package]] +name = "python-engineio" +version = "4.11.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "simple-websocket" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/52/e0/a9e0fe427ce7f1b7dbf9531fa00ffe4b557c4a7bc8e71891c115af123170/python_engineio-4.11.2.tar.gz", hash = "sha256:145bb0daceb904b4bb2d3eb2d93f7dbb7bb87a6a0c4f20a94cc8654dec977129", size = 91381 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/07/8f/978a0b913e3f8ad33a9a2fe204d32efe3d1ee34ecb1f2829c1cfbdd92082/python_engineio-4.11.2-py3-none-any.whl", hash = "sha256:f0971ac4c65accc489154fe12efd88f53ca8caf04754c46a66e85f5102ef22ad", size = 59239 }, +] + +[[package]] +name = "python-multipart" +version = "0.0.18" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/b4/86/b6b38677dec2e2e7898fc5b6f7e42c2d011919a92d25339451892f27b89c/python_multipart-0.0.18.tar.gz", hash = "sha256:7a68db60c8bfb82e460637fa4750727b45af1d5e2ed215593f917f64694d34fe", size = 36622 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/13/6b/b60f47101ba2cac66b4a83246630e68ae9bbe2e614cbae5f4465f46dee13/python_multipart-0.0.18-py3-none-any.whl", hash = "sha256:efe91480f485f6a361427a541db4796f9e1591afc0fb8e7a4ba06bfbc6708996", size = 24389 }, +] + [[package]] name = "python-pptx" version = "1.0.2" @@ -3899,6 +4105,19 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/a4/62/02da182e544a51a5c3ccf4b03ab79df279f9c60c5e82d5e8bec7ca26ac11/python_slugify-8.0.4-py2.py3-none-any.whl", hash = "sha256:276540b79961052b66b7d116620b36518847f52d5fd9e3a70164fc8c50faa6b8", size = 10051 }, ] +[[package]] +name = "python-socketio" +version = "5.12.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "bidict" }, + { name = "python-engineio" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/ce/d0/40ed38076e8aee94785d546d3e3a1cae393da5806a8530be877187e2875f/python_socketio-5.12.1.tar.gz", hash = "sha256:0299ff1f470b676c09c1bfab1dead25405077d227b2c13cf217a34dadc68ba9c", size = 119991 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/8a/a3/c69806f30dd81df5a99d592e7db4c930c3a9b098555aa97b0eb866b20b11/python_socketio-5.12.1-py3-none-any.whl", hash = "sha256:24a0ea7cfff0e021eb28c68edbf7914ee4111bdf030b95e4d250c4dc9af7a386", size = 76947 }, +] + [[package]] name = "pytz" version = "2024.2" @@ -4290,6 +4509,18 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/e0/f9/0595336914c5619e5f28a1fb793285925a8cd4b432c9da0a987836c7f822/shellingham-1.5.4-py2.py3-none-any.whl", hash = "sha256:7ecfff8f2fd72616f7481040475a65b2bf8af90a56c89140852d1120324e8686", size = 9755 }, ] +[[package]] +name = "simple-websocket" +version = "1.1.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "wsproto" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/b0/d4/bfa032f961103eba93de583b161f0e6a5b63cebb8f2c7d0c6e6efe1e3d2e/simple_websocket-1.1.0.tar.gz", hash = "sha256:7939234e7aa067c534abdab3a9ed933ec9ce4691b0713c78acb195560aa52ae4", size = 17300 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/52/59/0782e51887ac6b07ffd1570e0364cf901ebc36345fea669969d2084baebb/simple_websocket-1.1.0-py3-none-any.whl", hash = "sha256:4af6069630a38ed6c561010f0e11a5bc0d4ca569b36306eb257cd9a192497c8c", size = 13842 }, +] + [[package]] name = "six" version = "1.16.0" @@ -4576,14 +4807,14 @@ wheels = [ [[package]] name = "starlette" -version = "0.41.0" +version = "0.41.3" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "anyio" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/78/53/c3a36690a923706e7ac841f649c64f5108889ab1ec44218dac45771f252a/starlette-0.41.0.tar.gz", hash = "sha256:39cbd8768b107d68bfe1ff1672b38a2c38b49777de46d2a592841d58e3bf7c2a", size = 2573755 } +sdist = { url = "https://files.pythonhosted.org/packages/1a/4c/9b5764bd22eec91c4039ef4c55334e9187085da2d8a2df7bd570869aae18/starlette-0.41.3.tar.gz", hash = "sha256:0e4ab3d16522a255be6b28260b938eae2482f98ce5cc934cb08dce8dc3ba5835", size = 2574159 } wheels = [ - { url = "https://files.pythonhosted.org/packages/35/c6/a4443bfabf5629129512ca0e07866c4c3c094079ba4e9b2551006927253c/starlette-0.41.0-py3-none-any.whl", hash = "sha256:a0193a3c413ebc9c78bff1c3546a45bb8c8bcb4a84cae8747d650a65bd37210a", size = 73216 }, + { url = "https://files.pythonhosted.org/packages/96/00/2b325970b3060c7cecebab6d295afe763365822b1306a12eeab198f74323/starlette-0.41.3-py3-none-any.whl", hash = "sha256:44cedb2b7c77a9de33a8b74b2b90e9f50d11fcf25d8270ea525ad71a25374ff7", size = 73225 }, ] [[package]] @@ -4607,6 +4838,12 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/b2/fe/81695a1aa331a842b582453b605175f419fe8540355886031328089d840a/sympy-1.13.1-py3-none-any.whl", hash = "sha256:db36cdc64bf61b9b24578b6f7bab1ecdd2452cf008f34faa33776680c26d66f8", size = 6189177 }, ] +[[package]] +name = "syncer" +version = "2.0.3" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/8d/dd/d4dd75843692690d81f0a4b929212a1614b25d4896aa7c72f4c3546c7e3d/syncer-2.0.3.tar.gz", hash = "sha256:4340eb54b54368724a78c5c0763824470201804fe9180129daf3635cb500550f", size = 11512 } + [[package]] name = "tabulate" version = "0.9.0" @@ -5038,6 +5275,21 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/37/87/1f677586e8ac487e29672e4b17455758fce261de06a0d086167bb760361a/uc_micro_py-1.0.3-py3-none-any.whl", hash = "sha256:db1dffff340817673d7b466ec86114a9dc0e9d4d9b5ba229d9d60e5c12600cd5", size = 6229 }, ] +[[package]] +name = "uptrace" +version = "1.27.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "opentelemetry-api" }, + { name = "opentelemetry-exporter-otlp" }, + { name = "opentelemetry-instrumentation" }, + { name = "opentelemetry-sdk" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/f3/89/ba1df9328e4bd4b440ac6979e20ec8c63a26f6400598e806cc9dfef764f4/uptrace-1.27.0.tar.gz", hash = "sha256:983f783b2f4303d1d2bdfaf6ace1b7a5f072af47f78a7815f82c51fcf5099cac", size = 7633 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/77/00/054ac30e9e8312c3c79371c495dd570865eab2a05bfcd640f6242d460c8b/uptrace-1.27.0-py3-none-any.whl", hash = "sha256:d5473efa33c34e3d5738d32d19301dbf004d4e19598c658f2fa9f3f09458f630", size = 8627 }, +] + [[package]] name = "urllib3" version = "2.2.3" @@ -5054,69 +5306,40 @@ socks = [ [[package]] name = "uvicorn" -version = "0.32.0" +version = "0.25.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "click" }, { name = "h11" }, { name = "typing-extensions", marker = "python_full_version < '3.11'" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/e0/fc/1d785078eefd6945f3e5bab5c076e4230698046231eb0f3747bc5c8fa992/uvicorn-0.32.0.tar.gz", hash = "sha256:f78b36b143c16f54ccdb8190d0a26b5f1901fe5a3c777e1ab29f26391af8551e", size = 77564 } +sdist = { url = "https://files.pythonhosted.org/packages/ec/54/0eb4441bf38c70f6ed1886dddb2e29d1650026041d19e49fc373e332fa60/uvicorn-0.25.0.tar.gz", hash = "sha256:6dddbad1d7ee0f5140aba5ec138ddc9612c5109399903828b4874c9937f009c2", size = 40724 } wheels = [ - { url = "https://files.pythonhosted.org/packages/eb/14/78bd0e95dd2444b6caacbca2b730671d4295ccb628ef58b81bee903629df/uvicorn-0.32.0-py3-none-any.whl", hash = "sha256:60b8f3a5ac027dcd31448f411ced12b5ef452c646f76f02f8cc3f25d8d26fd82", size = 63723 }, + { url = "https://files.pythonhosted.org/packages/26/59/fddd9df489fe27f492cc97626e03663fb3b9b6ef7ce8597a7cdc5f2cbbad/uvicorn-0.25.0-py3-none-any.whl", hash = "sha256:ce107f5d9bd02b4636001a77a4e74aab5e1e2b146868ebbad565237145af444c", size = 60303 }, ] [[package]] name = "watchfiles" -version = "1.0.3" +version = "0.20.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "anyio" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/3c/7e/4569184ea04b501840771b8fcecee19b2233a8b72c196061263c0ef23c0b/watchfiles-1.0.3.tar.gz", hash = "sha256:f3ff7da165c99a5412fe5dd2304dd2dbaaaa5da718aad942dcb3a178eaa70c56", size = 38185 } -wheels = [ - { url = "https://files.pythonhosted.org/packages/cd/6c/7be04641c81209ea281b83b1174aa9d5ba53bec2a896d75a6b10428b4063/watchfiles-1.0.3-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:1da46bb1eefb5a37a8fb6fd52ad5d14822d67c498d99bda8754222396164ae42", size = 395213 }, - { url = "https://files.pythonhosted.org/packages/bd/d6/99438baa225891bda882adefefc14c9023ef3cdaf9772cd47973bb566e96/watchfiles-1.0.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:2b961b86cd3973f5822826017cad7f5a75795168cb645c3a6b30c349094e02e3", size = 384755 }, - { url = "https://files.pythonhosted.org/packages/88/93/b10295ce8696e5e37f480ba4ae89e387e88ba425d72808c87d30f4cdefb1/watchfiles-1.0.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:34e87c7b3464d02af87f1059fedda5484e43b153ef519e4085fe1a03dd94801e", size = 441701 }, - { url = "https://files.pythonhosted.org/packages/c5/3a/0359b7bddb1b7cbe6fb7096805b6e2f859f0de3d6130dcab9ac635db87e2/watchfiles-1.0.3-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d9dd2b89a16cf7ab9c1170b5863e68de6bf83db51544875b25a5f05a7269e678", size = 447540 }, - { url = "https://files.pythonhosted.org/packages/e2/a7/3400b4f105c68804495b76398165ffe6c00af93eab395279285f43cd0e42/watchfiles-1.0.3-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2b4691234d31686dca133c920f94e478b548a8e7c750f28dbbc2e4333e0d3da9", size = 472467 }, - { url = "https://files.pythonhosted.org/packages/c3/1a/8f928800d038d4fdb1e9df6e0c380c8cee17e6fb180e1faceb3f94de6df7/watchfiles-1.0.3-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:90b0fe1fcea9bd6e3084b44875e179b4adcc4057a3b81402658d0eb58c98edf8", size = 494467 }, - { url = "https://files.pythonhosted.org/packages/13/70/af75edf5b763f09e31a0f19ce045f3731db22599cb521807760b7d82b196/watchfiles-1.0.3-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0b90651b4cf9e158d01faa0833b073e2e37719264bcee3eac49fc3c74e7d304b", size = 492671 }, - { url = "https://files.pythonhosted.org/packages/4a/6e/8723f4b0967cc8d94f33fc531c33d66b596090b024f449983d3a8d97cfca/watchfiles-1.0.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c2e9fe695ff151b42ab06501820f40d01310fbd58ba24da8923ace79cf6d702d", size = 443811 }, - { url = "https://files.pythonhosted.org/packages/ee/5d/f3ca68a71d978d43168a65a1b4e1f72290c5350379aa148917e4ed0b2c46/watchfiles-1.0.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:62691f1c0894b001c7cde1195c03b7801aaa794a837bd6eef24da87d1542838d", size = 615477 }, - { url = "https://files.pythonhosted.org/packages/0d/d0/3d27a26f276ef07ca4cd3c6766684444317ddd147943e00bdb157cfdf3c3/watchfiles-1.0.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:275c1b0e942d335fccb6014d79267d1b9fa45b5ac0639c297f1e856f2f532552", size = 614237 }, - { url = "https://files.pythonhosted.org/packages/97/e9/ff30b210099d75cfa407924b3c265d3054f14b83ddf02072bd637394aab6/watchfiles-1.0.3-cp310-cp310-win32.whl", hash = "sha256:06ce08549e49ba69ccc36fc5659a3d0ff4e3a07d542b895b8a9013fcab46c2dc", size = 270798 }, - { url = "https://files.pythonhosted.org/packages/ed/86/694f07eb91d3e81a359661b48ff6984543e50be767c50c08196155d417bf/watchfiles-1.0.3-cp310-cp310-win_amd64.whl", hash = "sha256:f280b02827adc9d87f764972fbeb701cf5611f80b619c20568e1982a277d6146", size = 284192 }, - { url = "https://files.pythonhosted.org/packages/24/a8/06e2d5f840b285718a09be7c71ea19b7177b005cec87b8923dd7e8541b20/watchfiles-1.0.3-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:ffe709b1d0bc2e9921257569675674cafb3a5f8af689ab9f3f2b3f88775b960f", size = 394821 }, - { url = "https://files.pythonhosted.org/packages/57/9f/f98a57ada3d4b1fcd0e325aa6c307e2248ecb048f71c96fba34a602f02e7/watchfiles-1.0.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:418c5ce332f74939ff60691e5293e27c206c8164ce2b8ce0d9abf013003fb7fe", size = 384898 }, - { url = "https://files.pythonhosted.org/packages/a3/31/33ba914010cbfd01033ca3727aff6585b6b2ea2b051b6fbaecdf4e2160b9/watchfiles-1.0.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2f492d2907263d6d0d52f897a68647195bc093dafed14508a8d6817973586b6b", size = 441710 }, - { url = "https://files.pythonhosted.org/packages/d9/dd/e56b2ef07c2c34e4152950f0ce98a1081215ef027cf39e5dab61a0f8bd95/watchfiles-1.0.3-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:48c9f3bc90c556a854f4cab6a79c16974099ccfa3e3e150673d82d47a4bc92c9", size = 447681 }, - { url = "https://files.pythonhosted.org/packages/60/8f/3837df33f3d0cbef8ae59559891d688490bf2960373ea077ff11cbf79115/watchfiles-1.0.3-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:75d3bcfa90454dba8df12adc86b13b6d85fda97d90e708efc036c2760cc6ba44", size = 472312 }, - { url = "https://files.pythonhosted.org/packages/5a/b3/95d103e5bb609b20f175e8acdf8b32c4b091f96f781c066fd3bff2b17778/watchfiles-1.0.3-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5691340f259b8f76b45fb31b98e594d46c36d1dc8285efa7975f7f50230c9093", size = 494779 }, - { url = "https://files.pythonhosted.org/packages/4f/f0/9fdc60daf5abf7b0deb225c9b0a37fd72dc407fa33f071ae2f70e84e268c/watchfiles-1.0.3-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1e263cc718545b7f897baeac1f00299ab6fabe3e18caaacacb0edf6d5f35513c", size = 492090 }, - { url = "https://files.pythonhosted.org/packages/96/e5/a9967e77f173280ab1abbfd7ead90f2b94060574968baf5e6d7cbe9dd490/watchfiles-1.0.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1c6cf7709ed3e55704cc06f6e835bf43c03bc8e3cb8ff946bf69a2e0a78d9d77", size = 443713 }, - { url = "https://files.pythonhosted.org/packages/60/38/e5390d4633a558878113e45d32e39d30cf58eb94e0359f41737be209321b/watchfiles-1.0.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:703aa5e50e465be901e0e0f9d5739add15e696d8c26c53bc6fc00eb65d7b9469", size = 615306 }, - { url = "https://files.pythonhosted.org/packages/5c/27/8a1ee74544c93e3242ca073087b45c64367aeb6897b622e43c8172c2b421/watchfiles-1.0.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:bfcae6aecd9e0cb425f5145afee871465b98b75862e038d42fe91fd753ddd780", size = 614333 }, - { url = "https://files.pythonhosted.org/packages/fc/f8/25698f5b734907662b50acf3e81996053abdfe26fcf38804d028412876a8/watchfiles-1.0.3-cp311-cp311-win32.whl", hash = "sha256:6a76494d2c5311584f22416c5a87c1e2cb954ff9b5f0988027bc4ef2a8a67181", size = 270987 }, - { url = "https://files.pythonhosted.org/packages/39/78/f600dee7b387e6088c8d1f4c898a4340d07aecfe6406bd90ec4c1925ef08/watchfiles-1.0.3-cp311-cp311-win_amd64.whl", hash = "sha256:cf745cbfad6389c0e331786e5fe9ae3f06e9d9c2ce2432378e1267954793975c", size = 284098 }, - { url = "https://files.pythonhosted.org/packages/ca/6f/27ba8aec0a4b45a6063454465eefb42777158081d9df18eab5f1d6a3bd8a/watchfiles-1.0.3-cp311-cp311-win_arm64.whl", hash = "sha256:2dcc3f60c445f8ce14156854a072ceb36b83807ed803d37fdea2a50e898635d6", size = 276804 }, - { url = "https://files.pythonhosted.org/packages/bf/a9/c8b5ab33444306e1a324cb2b51644f8458dd459e30c3841f925012893e6a/watchfiles-1.0.3-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:93436ed550e429da007fbafb723e0769f25bae178fbb287a94cb4ccdf42d3af3", size = 391395 }, - { url = "https://files.pythonhosted.org/packages/ad/d3/403af5f07359863c03951796ddab265ee8cce1a6147510203d0bf43950e7/watchfiles-1.0.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:c18f3502ad0737813c7dad70e3e1cc966cc147fbaeef47a09463bbffe70b0a00", size = 381432 }, - { url = "https://files.pythonhosted.org/packages/f6/5f/921f2f2beabaf24b1ad81ac22bb69df8dd5771fdb68d6f34a5912a420941/watchfiles-1.0.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6a5bc3ca468bb58a2ef50441f953e1f77b9a61bd1b8c347c8223403dc9b4ac9a", size = 441448 }, - { url = "https://files.pythonhosted.org/packages/63/d7/67d0d750b246f248ccdb400a85a253e93e419ea5b6cbe968fa48b97a5f30/watchfiles-1.0.3-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:0d1ec043f02ca04bf21b1b32cab155ce90c651aaf5540db8eb8ad7f7e645cba8", size = 446852 }, - { url = "https://files.pythonhosted.org/packages/53/7c/d7cd94c7d0905f1e2f1c2232ea9bc39b1a48affd007e09c547ead96edb8f/watchfiles-1.0.3-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f58d3bfafecf3d81c15d99fc0ecf4319e80ac712c77cf0ce2661c8cf8bf84066", size = 471662 }, - { url = "https://files.pythonhosted.org/packages/26/81/738f8e66f7525753996b8aa292f78dcec1ef77887d62e6cdfb04cc2f352f/watchfiles-1.0.3-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1df924ba82ae9e77340101c28d56cbaff2c991bd6fe8444a545d24075abb0a87", size = 493765 }, - { url = "https://files.pythonhosted.org/packages/d2/50/78e21f5da24ab39114e9b24f7b0945ea1c6fc7bc9ae86cd87f8eaeb47325/watchfiles-1.0.3-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:632a52dcaee44792d0965c17bdfe5dc0edad5b86d6a29e53d6ad4bf92dc0ff49", size = 490558 }, - { url = "https://files.pythonhosted.org/packages/a8/93/1873fea6354b2858eae8970991d64e9a449d87726d596490d46bf00af8ed/watchfiles-1.0.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:80bf4b459d94a0387617a1b499f314aa04d8a64b7a0747d15d425b8c8b151da0", size = 442808 }, - { url = "https://files.pythonhosted.org/packages/4f/b4/2fc4c92fb28b029f66d04a4d430fe929284e9ff717b04bb7a3bb8a7a5605/watchfiles-1.0.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:ca94c85911601b097d53caeeec30201736ad69a93f30d15672b967558df02885", size = 615287 }, - { url = "https://files.pythonhosted.org/packages/1e/d4/93da24db39257e440240d338b617c5153ad11d361c34108f5c0e1e0743eb/watchfiles-1.0.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:65ab1fb635476f6170b07e8e21db0424de94877e4b76b7feabfe11f9a5fc12b5", size = 612812 }, - { url = "https://files.pythonhosted.org/packages/c6/67/9fd3661c2dc0309abd6021876653d91e8b64fb279529e2cadaa3520ef3e3/watchfiles-1.0.3-cp312-cp312-win32.whl", hash = "sha256:49bc1bc26abf4f32e132652f4b3bfeec77d8f8f62f57652703ef127e85a3e38d", size = 271642 }, - { url = "https://files.pythonhosted.org/packages/ae/aa/8c887edb78cd67f5d4d6a35c3aeb46d748643ebf962163130fb1871e2ee0/watchfiles-1.0.3-cp312-cp312-win_amd64.whl", hash = "sha256:48681c86f2cb08348631fed788a116c89c787fdf1e6381c5febafd782f6c3b44", size = 285505 }, - { url = "https://files.pythonhosted.org/packages/7b/31/d212fa6390f0e73a91913ada0b925b294a78d67794795371208baf73f0b5/watchfiles-1.0.3-cp312-cp312-win_arm64.whl", hash = "sha256:9e080cf917b35b20c889225a13f290f2716748362f6071b859b60b8847a6aa43", size = 277263 }, - { url = "https://files.pythonhosted.org/packages/26/48/5a75b18ad40cc69ea6e0003bb748db162a3215bbe44a1293e073876d51bd/watchfiles-1.0.3-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:84fac88278f42d61c519a6c75fb5296fd56710b05bbdcc74bdf85db409a03780", size = 396233 }, - { url = "https://files.pythonhosted.org/packages/dc/b2/03ce3447a3271483b030b8bafc39be19739f9a4a23edec31c6688e8a066d/watchfiles-1.0.3-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:c68be72b1666d93b266714f2d4092d78dc53bd11cf91ed5a3c16527587a52e29", size = 386050 }, - { url = "https://files.pythonhosted.org/packages/ab/0c/38914f56a95aa6ec911bb7cee617762d93aaf5a11efecadbb698d6b0b9a2/watchfiles-1.0.3-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:889a37e2acf43c377b5124166bece139b4c731b61492ab22e64d371cce0e6e80", size = 442404 }, - { url = "https://files.pythonhosted.org/packages/4d/8c/a95d3ba1ccfa33a43649668f699150cce1ea795e4300c33b4c3e974a444b/watchfiles-1.0.3-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7ca05cacf2e5c4a97d02a2878a24020daca21dbb8823b023b978210a75c79098", size = 444461 }, +sdist = { url = "https://files.pythonhosted.org/packages/ef/48/02d2d2cbf54e134810b2cb40ac79fdb8ce08476184536a4764717a7bc9f4/watchfiles-0.20.0.tar.gz", hash = "sha256:728575b6b94c90dd531514677201e8851708e6e4b5fe7028ac506a200b622019", size = 37041 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/4d/db/899832e11fef2d468bf8b3c1c13289b1db4cb7c3410bb2a9612a52fc8b22/watchfiles-0.20.0-cp37-abi3-macosx_10_7_x86_64.whl", hash = "sha256:3796312bd3587e14926013612b23066912cf45a14af71cf2b20db1c12dadf4e9", size = 417357 }, + { url = "https://files.pythonhosted.org/packages/9f/1a/85c914e4db62a3f8197daa98a271ea380a5d200a8d3058bd9f417752bc26/watchfiles-0.20.0-cp37-abi3-macosx_11_0_arm64.whl", hash = "sha256:d0002d81c89a662b595645fb684a371b98ff90a9c7d8f8630c82f0fde8310458", size = 407258 }, + { url = "https://files.pythonhosted.org/packages/25/ae/b7bddad421af5e33079a2ce639aa58837b715a2da98df16e25ecd310af52/watchfiles-0.20.0-cp37-abi3-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:570848706440373b4cd8017f3e850ae17f76dbdf1e9045fc79023b11e1afe490", size = 1331327 }, + { url = "https://files.pythonhosted.org/packages/21/e5/b080cec4e841b1cf338ccbd958cf3232ad1691a590653b2d124b5c79cf6b/watchfiles-0.20.0-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9a0351d20d03c6f7ad6b2e8a226a5efafb924c7755ee1e34f04c77c3682417fa", size = 1301371 }, + { url = "https://files.pythonhosted.org/packages/05/a0/2fb2c36730995a6b3f060187195dc08ad9ceee67426bdca8a4296024071c/watchfiles-0.20.0-cp37-abi3-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:007dcc4a401093010b389c044e81172c8a2520dba257c88f8828b3d460c6bb38", size = 1302438 }, + { url = "https://files.pythonhosted.org/packages/13/ea/d11971958ae703cfe443b21f672169cb8bc12dbec5781b910633fa2186ec/watchfiles-0.20.0-cp37-abi3-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:0d82dbc1832da83e441d112069833eedd4cf583d983fb8dd666fbefbea9d99c0", size = 1410655 }, + { url = "https://files.pythonhosted.org/packages/6b/81/3f922f3ede53ca9c0b4095f63688ffeea19a49592d0ac62db1eb9632b1e3/watchfiles-0.20.0-cp37-abi3-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:99f4c65fd2fce61a571b2a6fcf747d6868db0bef8a934e8ca235cc8533944d95", size = 1494222 }, + { url = "https://files.pythonhosted.org/packages/e1/46/c9d5ee4871b187d291d62e61c41f9a4d67d4866a89704b0ad16b6949e9bd/watchfiles-0.20.0-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5392dd327a05f538c56edb1c6ebba6af91afc81b40822452342f6da54907bbdf", size = 1294171 }, + { url = "https://files.pythonhosted.org/packages/59/5e/6b64e3bf9fd4422250f3c716d992dd76dbe55e6fa1e7ebaf2bf88f389707/watchfiles-0.20.0-cp37-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:08dc702529bb06a2b23859110c214db245455532da5eaea602921687cfcd23db", size = 1462256 }, + { url = "https://files.pythonhosted.org/packages/11/c0/75f5a71ac24118ab11bd898e0114cedc72b25924ff2d960d473bddb4ec6e/watchfiles-0.20.0-cp37-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:7d4e66a857621584869cfbad87039e65dadd7119f0d9bb9dbc957e089e32c164", size = 1461725 }, + { url = "https://files.pythonhosted.org/packages/91/d4/0c0fdcc4293ad1b73db54896fa0de4b37439ae4f25971b5eb1708dd04f9a/watchfiles-0.20.0-cp37-abi3-win32.whl", hash = "sha256:a03d1e6feb7966b417f43c3e3783188167fd69c2063e86bad31e62c4ea794cc5", size = 268193 }, + { url = "https://files.pythonhosted.org/packages/87/79/098b1b1fcb6de16149d23283a2ab5dadce6a06b864e7a182d231f57a1f9e/watchfiles-0.20.0-cp37-abi3-win_amd64.whl", hash = "sha256:eccc8942bcdc7d638a01435d915b913255bbd66f018f1af051cd8afddb339ea3", size = 276723 }, + { url = "https://files.pythonhosted.org/packages/3f/82/45dddf4f5bf8b73ba27382cebb2bb3c0ee922c7ef77d936b86276aa39dca/watchfiles-0.20.0-cp37-abi3-win_arm64.whl", hash = "sha256:b17d4176c49d207865630da5b59a91779468dd3e08692fe943064da260de2c7c", size = 265344 }, ] [[package]] From 52c2a70e95df2006e0094e96ad192243148ec4bb Mon Sep 17 00:00:00 2001 From: Eric Zhu Date: Tue, 7 Jan 2025 16:06:14 -0800 Subject: [PATCH 16/61] Fix chess sample (#4932) --------- Co-authored-by: Jack Gerrits --- python/samples/core_chess_game/main.py | 53 +++++++------ python/samples/core_chess_game/utils.py | 98 ------------------------- 2 files changed, 30 insertions(+), 121 deletions(-) delete mode 100644 python/samples/core_chess_game/utils.py diff --git a/python/samples/core_chess_game/main.py b/python/samples/core_chess_game/main.py index 78b4787750e0..ccc77deba0f2 100644 --- a/python/samples/core_chess_game/main.py +++ b/python/samples/core_chess_game/main.py @@ -1,6 +1,7 @@ """This is an example of simulating a chess game with two agents that play against each other, using tools to reason about the game state -and make moves, and using a group chat manager to orchestrate the conversation.""" +and make moves. The agents subscribe to the default topic and publish their +moves to the default topic.""" import argparse import asyncio @@ -19,7 +20,12 @@ message_handler, ) from autogen_core.model_context import BufferedChatCompletionContext, ChatCompletionContext -from autogen_core.models import AssistantMessage, ChatCompletionClient, LLMMessage, SystemMessage, UserMessage +from autogen_core.models import ( + ChatCompletionClient, + LLMMessage, + SystemMessage, + UserMessage, +) from autogen_core.tool_agent import ToolAgent, tool_agent_caller_loop from autogen_core.tools import FunctionTool, Tool, ToolSchema from chess import BLACK, SQUARE_NAMES, WHITE, Board, Move @@ -33,7 +39,7 @@ class TextMessage(BaseModel): @default_subscription -class ToolUseAgent(RoutedAgent): +class PlayerAgent(RoutedAgent): def __init__( self, description: str, @@ -59,14 +65,15 @@ async def handle_message(self, message: TextMessage, ctx: MessageContext) -> Non self, tool_agent_id=self._tool_agent_id, model_client=self._model_client, - input_messages=(await self._model_context.get_messages()), + input_messages=self._system_messages + (await self._model_context.get_messages()), tool_schema=self._tool_schema, cancellation_token=ctx.cancellation_token, ) - assert isinstance(messages[-1].content, str) # Add the assistant message to the model context. - await self._model_context.add_message(AssistantMessage(content=messages[-1].content, source=self.id.type)) + for msg in messages: + await self._model_context.add_message(msg) # Publish the final response. + assert isinstance(messages[-1].content, str) await self.publish_message(TextMessage(content=messages[-1].content, source=self.id.type), DefaultTopicId()) @@ -203,39 +210,39 @@ def get_board_text() -> Annotated[str, "The current board state"]: # Register the agents. await ToolAgent.register( runtime, - "ToolAgent", - lambda: ToolAgent(description="Tool agent for chess game.", tools=black_tools + white_tools), + "PlayerBlackToolAgent", + lambda: ToolAgent(description="Tool agent for chess game.", tools=black_tools), + ) + + await ToolAgent.register( + runtime, + "PlayerWhiteToolAgent", + lambda: ToolAgent(description="Tool agent for chess game.", tools=white_tools), ) - await ToolUseAgent.register( + await PlayerAgent.register( runtime, "PlayerBlack", - lambda: ToolUseAgent( + lambda: PlayerAgent( description="Player playing black.", - instructions="You are a chess player and you play as black. " - "Use get_legal_moves() to get list of legal moves. " - "Use get_board() to get the current board state. " - "Think about your strategy and call make_move(thinking, move) to make a move.", + instructions="You are a chess player and you play as black. Use the tool 'get_board' and 'get_legal_moves' to get the legal moves and 'make_move' to make a move.", model_client=model_client, model_context=BufferedChatCompletionContext(buffer_size=10), tool_schema=[tool.schema for tool in black_tools], - tool_agent_type="ToolAgent", + tool_agent_type="PlayerBlackToolAgent", ), ) - await ToolUseAgent.register( + await PlayerAgent.register( runtime, "PlayerWhite", - lambda: ToolUseAgent( + lambda: PlayerAgent( description="Player playing white.", - instructions="You are a chess player and you play as white. " - "Use get_legal_moves() to get list of legal moves. " - "Use get_board() to get the current board state. " - "Think about your strategy and call make_move(thinking, move) to make a move.", + instructions="You are a chess player and you play as white. Use the tool 'get_board' and 'get_legal_moves' to get the legal moves and 'make_move' to make a move.", model_client=model_client, model_context=BufferedChatCompletionContext(buffer_size=10), tool_schema=[tool.schema for tool in white_tools], - tool_agent_type="ToolAgent", + tool_agent_type="PlayerWhiteToolAgent", ), ) @@ -249,7 +256,7 @@ async def main(model_config: Dict[str, Any]) -> None: # orchestration. # Send an initial message to player white to start the game. await runtime.send_message( - TextMessage(content="Game started.", source="System"), + TextMessage(content="Game started, white player your move.", source="System"), AgentId("PlayerWhite", "default"), ) await runtime.stop_when_idle() diff --git a/python/samples/core_chess_game/utils.py b/python/samples/core_chess_game/utils.py deleted file mode 100644 index 5fc21dc20401..000000000000 --- a/python/samples/core_chess_game/utils.py +++ /dev/null @@ -1,98 +0,0 @@ -from typing import List, Optional, Union - -from autogen_core.models import ( - AssistantMessage, - FunctionExecutionResult, - FunctionExecutionResultMessage, - LLMMessage, - UserMessage, -) -from typing_extensions import Literal - -from .messages import ( - FunctionCallMessage, - Message, - MultiModalMessage, - TextMessage, -) - - -def convert_content_message_to_assistant_message( - message: Union[TextMessage, MultiModalMessage, FunctionCallMessage], - handle_unrepresentable: Literal["error", "ignore", "try_slice"] = "error", -) -> Optional[AssistantMessage]: - match message: - case TextMessage() | FunctionCallMessage(): - return AssistantMessage(content=message.content, source=message.source) - case MultiModalMessage(): - if handle_unrepresentable == "error": - raise ValueError("Cannot represent multimodal message as AssistantMessage") - elif handle_unrepresentable == "ignore": - return None - elif handle_unrepresentable == "try_slice": - return AssistantMessage( - content="".join([x for x in message.content if isinstance(x, str)]), - source=message.source, - ) - - -def convert_content_message_to_user_message( - message: Union[TextMessage, MultiModalMessage, FunctionCallMessage], - handle_unrepresentable: Literal["error", "ignore", "try_slice"] = "error", -) -> Optional[UserMessage]: - match message: - case TextMessage() | MultiModalMessage(): - return UserMessage(content=message.content, source=message.source) - case FunctionCallMessage(): - if handle_unrepresentable == "error": - raise ValueError("Cannot represent multimodal message as UserMessage") - elif handle_unrepresentable == "ignore": - return None - elif handle_unrepresentable == "try_slice": - # TODO: what is a sliced function call? - raise NotImplementedError("Sliced function calls not yet implemented") - - -def convert_tool_call_response_message( - message: FunctionExecutionResultMessage, - handle_unrepresentable: Literal["error", "ignore", "try_slice"] = "error", -) -> Optional[FunctionExecutionResultMessage]: - match message: - case FunctionExecutionResultMessage(): - return FunctionExecutionResultMessage( - content=[FunctionExecutionResult(content=x.content, call_id=x.call_id) for x in message.content] - ) - - -def convert_messages_to_llm_messages( - messages: List[Message], - self_name: str, - handle_unrepresentable: Literal["error", "ignore", "try_slice"] = "error", -) -> List[LLMMessage]: - result: List[LLMMessage] = [] - for message in messages: - match message: - case ( - TextMessage(content=_, source=source) - | MultiModalMessage(content=_, source=source) - | FunctionCallMessage(content=_, source=source) - ) if source == self_name: - converted_message_1 = convert_content_message_to_assistant_message(message, handle_unrepresentable) - if converted_message_1 is not None: - result.append(converted_message_1) - case ( - TextMessage(content=_, source=source) - | MultiModalMessage(content=_, source=source) - | FunctionCallMessage(content=_, source=source) - ) if source != self_name: - converted_message_2 = convert_content_message_to_user_message(message, handle_unrepresentable) - if converted_message_2 is not None: - result.append(converted_message_2) - case FunctionExecutionResultMessage(content=_): - converted_message_3 = convert_tool_call_response_message(message, handle_unrepresentable) - if converted_message_3 is not None: - result.append(converted_message_3) - case _: - raise AssertionError("unreachable") - - return result From 00b06ab2e18d189dc145cdb43fef18c16dfae5a6 Mon Sep 17 00:00:00 2001 From: Roy Belio <34023431+r-bit-rry@users.noreply.github.com> Date: Wed, 8 Jan 2025 04:18:42 +0200 Subject: [PATCH 17/61] fix(magentic-one): Enhance error handling in orchestrate_step to manage invalid ledger formats (#4845) * fix(magentic-one): Enhance error handling in orchestrate_step to manage invalid ledger formats * formatting --------- Co-authored-by: Roy Belio Co-authored-by: Jack Gerrits Co-authored-by: Jack Gerrits --- .../_group_chat/_magentic_one/_magentic_one_orchestrator.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_magentic_one/_magentic_one_orchestrator.py b/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_magentic_one/_magentic_one_orchestrator.py index d405bab5b13e..8d3b64ff3afa 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_magentic_one/_magentic_one_orchestrator.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_magentic_one/_magentic_one_orchestrator.py @@ -297,6 +297,7 @@ async def _orchestrate_step(self, cancellation_token: CancellationToken) -> None for key in required_keys: if ( key not in progress_ledger + or not isinstance(progress_ledger[key], dict) or "answer" not in progress_ledger[key] or "reason" not in progress_ledger[key] ): @@ -305,7 +306,9 @@ async def _orchestrate_step(self, cancellation_token: CancellationToken) -> None if not key_error: break await self._log_message(f"Failed to parse ledger information, retrying: {ledger_str}") - except json.JSONDecodeError: + except (json.JSONDecodeError, TypeError): + key_error = True + await self._log_message("Invalid ledger format encountered, retrying...") continue if key_error: raise ValueError("Failed to parse ledger information after multiple retries.") From 973c8b63302df5b13ddbb83628d7cabd09a960fa Mon Sep 17 00:00:00 2001 From: Jack Gerrits Date: Wed, 8 Jan 2025 08:56:08 -0500 Subject: [PATCH 18/61] Remove deprecated items for release (#4927) --- .../src/autogen_agentchat/agents/__init__.py | 7 +- .../agents/_assistant_agent.py | 13 --- .../agents/_coding_assistant_agent.py | 40 --------- .../agents/_tool_use_assistant_agent.py | 46 ---------- .../src/autogen_agentchat/messages.py | 38 +------- .../src/autogen_agentchat/task/__init__.py | 88 ------------------- .../src/autogen_core/base/__init__.py | 0 .../src/autogen_core/base/intervention.py | 22 ----- 8 files changed, 2 insertions(+), 252 deletions(-) delete mode 100644 python/packages/autogen-agentchat/src/autogen_agentchat/agents/_coding_assistant_agent.py delete mode 100644 python/packages/autogen-agentchat/src/autogen_agentchat/agents/_tool_use_assistant_agent.py delete mode 100644 python/packages/autogen-agentchat/src/autogen_agentchat/task/__init__.py delete mode 100644 python/packages/autogen-core/src/autogen_core/base/__init__.py delete mode 100644 python/packages/autogen-core/src/autogen_core/base/intervention.py diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/agents/__init__.py b/python/packages/autogen-agentchat/src/autogen_agentchat/agents/__init__.py index 855e66ae866f..a6732d12ef3f 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/agents/__init__.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/agents/__init__.py @@ -3,21 +3,16 @@ BaseChatAgent is the base class for all agents in AgentChat. """ -from ._assistant_agent import AssistantAgent, Handoff # type: ignore +from ._assistant_agent import AssistantAgent from ._base_chat_agent import BaseChatAgent from ._code_executor_agent import CodeExecutorAgent -from ._coding_assistant_agent import CodingAssistantAgent from ._society_of_mind_agent import SocietyOfMindAgent -from ._tool_use_assistant_agent import ToolUseAssistantAgent from ._user_proxy_agent import UserProxyAgent __all__ = [ "BaseChatAgent", "AssistantAgent", - "Handoff", "CodeExecutorAgent", - "CodingAssistantAgent", - "ToolUseAssistantAgent", "SocietyOfMindAgent", "UserProxyAgent", ] diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_assistant_agent.py b/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_assistant_agent.py index 95f8e8dd8dd5..f60a44dbb90c 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_assistant_agent.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_assistant_agent.py @@ -27,7 +27,6 @@ UserMessage, ) from autogen_core.tools import FunctionTool, Tool -from typing_extensions import deprecated from .. import EVENT_LOGGER_NAME from ..base import Handoff as HandoffBase @@ -48,18 +47,6 @@ event_logger = logging.getLogger(EVENT_LOGGER_NAME) -@deprecated("Moved to autogen_agentchat.base.Handoff. Will remove in 0.4.0.", stacklevel=2) -class Handoff(HandoffBase): - """[DEPRECATED] Handoff configuration. Moved to :class:`autogen_agentchat.base.Handoff`. Will remove in 0.4.0.""" - - def model_post_init(self, __context: Any) -> None: - warnings.warn( - "Handoff was moved to autogen_agentchat.base.Handoff. Importing from this will be removed in 0.4.0.", - DeprecationWarning, - stacklevel=2, - ) - - class AssistantAgent(BaseChatAgent): """An agent that provides assistance with tool use. diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_coding_assistant_agent.py b/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_coding_assistant_agent.py deleted file mode 100644 index 751b0d4404fb..000000000000 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_coding_assistant_agent.py +++ /dev/null @@ -1,40 +0,0 @@ -import warnings - -from autogen_core.models import ( - ChatCompletionClient, -) - -from ._assistant_agent import AssistantAgent - - -class CodingAssistantAgent(AssistantAgent): - """[DEPRECATED] An agent that provides coding assistance using an LLM model client. - - It responds with a StopMessage when 'terminate' is detected in the response. - """ - - def __init__( - self, - name: str, - model_client: ChatCompletionClient, - *, - description: str = "A helpful and general-purpose AI assistant that has strong language skills, Python skills, and Linux command line skills.", - system_message: str = """You are a helpful AI assistant. -Solve tasks using your coding and language skills. -In the following cases, suggest python code (in a python coding block) or shell script (in a sh coding block) for the user to execute. - 1. When you need to collect info, use the code to output the info you need, for example, browse or search the web, download/read a file, print the content of a webpage or a file, get the current date/time, check the operating system. After sufficient info is printed and the task is ready to be solved based on your language skill, you can solve the task by yourself. - 2. When you need to perform some task with code, use the code to perform the task and output the result. Finish the task smartly. -Solve the task step by step if you need to. If a plan is not provided, explain your plan first. Be clear which step uses code, and which step uses your language skill. -When using code, you must indicate the script type in the code block. The user cannot provide any other feedback or perform any other action beyond executing the code you suggest. The user can't modify your code. So do not suggest incomplete code which requires users to modify. Don't use a code block if it's not intended to be executed by the user. -If you want the user to save the code in a file before executing it, put # filename: inside the code block as the first line. Don't include multiple code blocks in one response. Do not ask users to copy and paste the result. Instead, use 'print' function for the output when relevant. Check the execution result returned by the user. -If the result indicates there is an error, fix the error and output the code again. Suggest the full code instead of partial code or code changes. If the error can't be fixed or if the task is not solved even after the code is executed successfully, analyze the problem, revisit your assumption, collect additional info you need, and think of a different approach to try. -When you find an answer, verify the answer carefully. Include verifiable evidence in your response if possible. -Reply "TERMINATE" in the end when code has been executed and task is complete.""", - ): - # Deprecation warning. - warnings.warn( - "CodingAssistantAgent is deprecated. Use AssistantAgent instead.", - DeprecationWarning, - stacklevel=2, - ) - super().__init__(name, model_client, description=description, system_message=system_message) diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_tool_use_assistant_agent.py b/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_tool_use_assistant_agent.py deleted file mode 100644 index 1ebe1f22662b..000000000000 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_tool_use_assistant_agent.py +++ /dev/null @@ -1,46 +0,0 @@ -import logging -import warnings -from typing import Any, Awaitable, Callable, List - -from autogen_core.models import ( - ChatCompletionClient, -) -from autogen_core.tools import Tool - -from .. import EVENT_LOGGER_NAME -from ._assistant_agent import AssistantAgent - -event_logger = logging.getLogger(EVENT_LOGGER_NAME) - - -class ToolUseAssistantAgent(AssistantAgent): - """[DEPRECATED] An agent that provides assistance with tool use. - - It responds with a StopMessage when 'terminate' is detected in the response. - - Args: - name (str): The name of the agent. - model_client (ChatCompletionClient): The model client to use for inference. - registered_tools (List[Tool | Callable[..., Any] | Callable[..., Awaitable[Any]]): The tools to register with the agent. - description (str, optional): The description of the agent. - system_message (str, optional): The system message for the model. - """ - - def __init__( - self, - name: str, - model_client: ChatCompletionClient, - registered_tools: List[Tool | Callable[..., Any] | Callable[..., Awaitable[Any]]], - *, - description: str = "An agent that provides assistance with ability to use tools.", - system_message: str = "You are a helpful AI assistant. Solve tasks using your tools. Reply with 'TERMINATE' when the task has been completed.", - ): - # Deprecation warning. - warnings.warn( - "ToolUseAssistantAgent is deprecated. Use AssistantAgent instead.", - DeprecationWarning, - stacklevel=2, - ) - super().__init__( - name, model_client, tools=registered_tools, description=description, system_message=system_message - ) diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/messages.py b/python/packages/autogen-agentchat/src/autogen_agentchat/messages.py index 923b569602e0..07fc3123eb4c 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/messages.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/messages.py @@ -10,7 +10,7 @@ class and includes specific fields relevant to the type of message being sent. from autogen_core import FunctionCall, Image from autogen_core.models import FunctionExecutionResult, RequestUsage from pydantic import BaseModel, ConfigDict, Field -from typing_extensions import Annotated, deprecated +from typing_extensions import Annotated class BaseMessage(BaseModel, ABC): @@ -76,26 +76,6 @@ class HandoffMessage(BaseChatMessage): type: Literal["HandoffMessage"] = "HandoffMessage" -@deprecated("Will be removed in 0.4.0, use ToolCallRequestEvent instead.") -class ToolCallMessage(BaseMessage): - """A message signaling the use of tools.""" - - content: List[FunctionCall] - """The tool calls.""" - - type: Literal["ToolCallMessage"] = "ToolCallMessage" - - -@deprecated("Will be removed in 0.4.0, use ToolCallExecutionEvent instead.") -class ToolCallResultMessage(BaseMessage): - """A message signaling the results of tool calls.""" - - content: List[FunctionExecutionResult] - """The tool call results.""" - - type: Literal["ToolCallResultMessage"] = "ToolCallResultMessage" - - class ToolCallRequestEvent(BaseAgentEvent): """An event signaling a request to use tools.""" @@ -133,19 +113,6 @@ class ToolCallSummaryMessage(BaseChatMessage): """Events emitted by agents and teams when they work, not used for agent-to-agent communication.""" -AgentMessage = Annotated[ - TextMessage - | MultiModalMessage - | StopMessage - | HandoffMessage - | ToolCallRequestEvent - | ToolCallExecutionEvent - | ToolCallSummaryMessage, - Field(discriminator="type"), -] -"""(Deprecated, will be removed in 0.4.0) All message and event types.""" - - __all__ = [ "BaseMessage", "TextMessage", @@ -154,10 +121,7 @@ class ToolCallSummaryMessage(BaseChatMessage): "HandoffMessage", "ToolCallRequestEvent", "ToolCallExecutionEvent", - "ToolCallMessage", - "ToolCallResultMessage", "ToolCallSummaryMessage", "ChatMessage", "AgentEvent", - "AgentMessage", ] diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/task/__init__.py b/python/packages/autogen-agentchat/src/autogen_agentchat/task/__init__.py deleted file mode 100644 index a45983d83c3a..000000000000 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/task/__init__.py +++ /dev/null @@ -1,88 +0,0 @@ -from typing import AsyncGenerator, TypeVar - -from typing_extensions import deprecated - -from ..base import Response, TaskResult -from ..conditions import ( - ExternalTermination as ExternalTerminationAlias, -) -from ..conditions import ( - HandoffTermination as HandoffTerminationAlias, -) -from ..conditions import ( - MaxMessageTermination as MaxMessageTerminationAlias, -) -from ..conditions import ( - SourceMatchTermination as SourceMatchTerminationAlias, -) -from ..conditions import ( - StopMessageTermination as StopMessageTerminationAlias, -) -from ..conditions import ( - TextMentionTermination as TextMentionTerminationAlias, -) -from ..conditions import ( - TimeoutTermination as TimeoutTerminationAlias, -) -from ..conditions import ( - TokenUsageTermination as TokenUsageTerminationAlias, -) -from ..messages import AgentEvent, ChatMessage -from ..ui import Console as ConsoleAlias - - -@deprecated("Moved to autogen_agentchat.conditions.ExternalTermination. Will remove this in 0.4.0.", stacklevel=2) -class ExternalTermination(ExternalTerminationAlias): ... - - -@deprecated("Moved to autogen_agentchat.conditions.HandoffTermination. Will remove this in 0.4.0.", stacklevel=2) -class HandoffTermination(HandoffTerminationAlias): ... - - -@deprecated("Moved to autogen_agentchat.conditions.MaxMessageTermination. Will remove this in 0.4.0.", stacklevel=2) -class MaxMessageTermination(MaxMessageTerminationAlias): ... - - -@deprecated("Moved to autogen_agentchat.conditions.SourceMatchTermination. Will remove this in 0.4.0.", stacklevel=2) -class SourceMatchTermination(SourceMatchTerminationAlias): ... - - -@deprecated("Moved to autogen_agentchat.conditions.StopMessageTermination. Will remove this in 0.4.0.", stacklevel=2) -class StopMessageTermination(StopMessageTerminationAlias): ... - - -@deprecated("Moved to autogen_agentchat.conditions.TextMentionTermination. Will remove this in 0.4.0.", stacklevel=2) -class TextMentionTermination(TextMentionTerminationAlias): ... - - -@deprecated("Moved to autogen_agentchat.conditions.TimeoutTermination. Will remove this in 0.4.0.", stacklevel=2) -class TimeoutTermination(TimeoutTerminationAlias): ... - - -@deprecated("Moved to autogen_agentchat.conditions.TokenUsageTermination. Will remove this in 0.4.0.", stacklevel=2) -class TokenUsageTermination(TokenUsageTerminationAlias): ... - - -T = TypeVar("T", bound=TaskResult | Response) - - -@deprecated("Moved to autogen_agentchat.ui.Console. Will remove this in 0.4.0.", stacklevel=2) -async def Console( - stream: AsyncGenerator[AgentEvent | ChatMessage | T, None], - *, - no_inline_images: bool = False, -) -> T: - return await ConsoleAlias(stream, no_inline_images=no_inline_images) - - -__all__ = [ - "MaxMessageTermination", - "TextMentionTermination", - "StopMessageTermination", - "TokenUsageTermination", - "HandoffTermination", - "TimeoutTermination", - "ExternalTermination", - "SourceMatchTermination", - "Console", -] diff --git a/python/packages/autogen-core/src/autogen_core/base/__init__.py b/python/packages/autogen-core/src/autogen_core/base/__init__.py deleted file mode 100644 index e69de29bb2d1..000000000000 diff --git a/python/packages/autogen-core/src/autogen_core/base/intervention.py b/python/packages/autogen-core/src/autogen_core/base/intervention.py deleted file mode 100644 index aa4a388a3ded..000000000000 --- a/python/packages/autogen-core/src/autogen_core/base/intervention.py +++ /dev/null @@ -1,22 +0,0 @@ -from typing_extensions import deprecated - -from .._intervention import DefaultInterventionHandler as DefaultInterventionHandlerAlias -from .._intervention import DropMessage as DropMessageAlias -from .._intervention import InterventionHandler as InterventionHandlerAliass - -__all__ = [ - "DropMessage", - "InterventionHandler", - "DefaultInterventionHandler", -] - -# Final so can't inherit and deprecate -DropMessage = DropMessageAlias - - -@deprecated("Moved to autogen_core.InterventionHandler. Will remove this in 0.4.0.", stacklevel=2) -class InterventionHandler(InterventionHandlerAliass): ... - - -@deprecated("Moved to autogen_core.DefaultInterventionHandler. Will remove this in 0.4.0.", stacklevel=2) -class DefaultInterventionHandler(DefaultInterventionHandlerAlias): ... From 7b10f0ad1bb8609e89346cf8531e54dd83093fb2 Mon Sep 17 00:00:00 2001 From: Jack Gerrits Date: Wed, 8 Jan 2025 09:12:48 -0500 Subject: [PATCH 19/61] Improve docs for model clients, add missing docs (#4930) * Improve docs for model clients * formatting * Fix usage --------- Co-authored-by: peterychang <49209570+peterychang@users.noreply.github.com> --- .../autogen-core/docs/src/reference/index.md | 1 + .../python/autogen_ext.auth.azure.rst | 8 + .../python/autogen_ext.models.openai.rst | 1 + .../src/autogen_ext/models/openai/__init__.py | 19 ++- .../models/openai/_openai_client.py | 140 +++++++++++++----- .../models/openai/config/__init__.py | 10 -- .../core_distributed-group-chat/_types.py | 2 +- .../core_distributed-group-chat/_utils.py | 2 +- 8 files changed, 131 insertions(+), 52 deletions(-) create mode 100644 python/packages/autogen-core/docs/src/reference/python/autogen_ext.auth.azure.rst diff --git a/python/packages/autogen-core/docs/src/reference/index.md b/python/packages/autogen-core/docs/src/reference/index.md index 2742ddbc383e..3f1374643931 100644 --- a/python/packages/autogen-core/docs/src/reference/index.md +++ b/python/packages/autogen-core/docs/src/reference/index.md @@ -45,6 +45,7 @@ python/autogen_ext.agents.web_surfer python/autogen_ext.agents.file_surfer python/autogen_ext.agents.video_surfer python/autogen_ext.agents.video_surfer.tools +python/autogen_ext.auth.azure python/autogen_ext.teams.magentic_one python/autogen_ext.models.openai python/autogen_ext.models.replay diff --git a/python/packages/autogen-core/docs/src/reference/python/autogen_ext.auth.azure.rst b/python/packages/autogen-core/docs/src/reference/python/autogen_ext.auth.azure.rst new file mode 100644 index 000000000000..7d27398260df --- /dev/null +++ b/python/packages/autogen-core/docs/src/reference/python/autogen_ext.auth.azure.rst @@ -0,0 +1,8 @@ +autogen\_ext.auth.azure +======================= + + +.. automodule:: autogen_ext.auth.azure + :members: + :undoc-members: + :show-inheritance: diff --git a/python/packages/autogen-core/docs/src/reference/python/autogen_ext.models.openai.rst b/python/packages/autogen-core/docs/src/reference/python/autogen_ext.models.openai.rst index 44703cb70ee9..b74e4903f52b 100644 --- a/python/packages/autogen-core/docs/src/reference/python/autogen_ext.models.openai.rst +++ b/python/packages/autogen-core/docs/src/reference/python/autogen_ext.models.openai.rst @@ -6,3 +6,4 @@ autogen\_ext.models.openai :members: :undoc-members: :show-inheritance: + :member-order: bysource diff --git a/python/packages/autogen-ext/src/autogen_ext/models/openai/__init__.py b/python/packages/autogen-ext/src/autogen_ext/models/openai/__init__.py index bad5690e3cd9..dbe2eb65e045 100644 --- a/python/packages/autogen-ext/src/autogen_ext/models/openai/__init__.py +++ b/python/packages/autogen-ext/src/autogen_ext/models/openai/__init__.py @@ -1,12 +1,17 @@ -from ._openai_client import ( - AzureOpenAIChatCompletionClient, - OpenAIChatCompletionClient, +from ._openai_client import AzureOpenAIChatCompletionClient, OpenAIChatCompletionClient, BaseOpenAIChatCompletionClient +from .config import ( + AzureOpenAIClientConfigurationConfigModel, + OpenAIClientConfigurationConfigModel, + BaseOpenAIClientConfigurationConfigModel, + CreateArgumentsConfigModel, ) -from .config import AzureOpenAIClientConfiguration, OpenAIClientConfiguration __all__ = [ - "AzureOpenAIClientConfiguration", - "AzureOpenAIChatCompletionClient", - "OpenAIClientConfiguration", "OpenAIChatCompletionClient", + "AzureOpenAIChatCompletionClient", + "BaseOpenAIChatCompletionClient", + "AzureOpenAIClientConfigurationConfigModel", + "OpenAIClientConfigurationConfigModel", + "BaseOpenAIClientConfigurationConfigModel", + "CreateArgumentsConfigModel", ] diff --git a/python/packages/autogen-ext/src/autogen_ext/models/openai/_openai_client.py b/python/packages/autogen-ext/src/autogen_ext/models/openai/_openai_client.py index 2975d19b64f6..31db4974bbb0 100644 --- a/python/packages/autogen-ext/src/autogen_ext/models/openai/_openai_client.py +++ b/python/packages/autogen-ext/src/autogen_ext/models/openai/_openai_client.py @@ -907,52 +907,79 @@ class OpenAIChatCompletionClient(BaseOpenAIChatCompletionClient, Component[OpenA for additional model clients. Args: - model (str): The model to use. **Required.** - api_key (str): The API key to use. **Required if 'OPENAI_API_KEY' is not found in the environment variables.** - timeout (optional, int): The timeout for the request in seconds. - max_retries (optional, int): The maximum number of retries to attempt. - organization_id (optional, str): The organization ID to use. + model (str): Which OpenAI model to use. + api_key (optional, str): The API key to use. **Required if 'OPENAI_API_KEY' is not found in the environment variables.** + organization (optional, str): The organization ID to use. base_url (optional, str): The base URL to use. **Required if the model is not hosted on OpenAI.** - model_capabilities (optional, ModelCapabilities): The capabilities of the model. **Required if the model name is not a valid OpenAI model.** + timeout: (optional, float): The timeout for the request in seconds. + max_retries (optional, int): The maximum number of retries to attempt. + model_info (optional, ModelInfo): The capabilities of the model. **Required if the model name is not a valid OpenAI model.** + frequency_penalty (optional, float): + logit_bias: (optional, dict[str, int]): + max_tokens (optional, int): + n (optional, int): + presence_penalty (optional, float): + response_format (optional, literal["json_object", "text"]): + seed (optional, int): + stop (optional, str | List[str]): + temperature (optional, float): + top_p (optional, float): + user (optional, str): + To use this client, you must install the `openai` extension: - .. code-block:: bash + .. code-block:: bash - pip install "autogen-ext[openai]==0.4.0.dev13" + pip install "autogen-ext[openai]==0.4.0.dev13" The following code snippet shows how to use the client with an OpenAI model: - .. code-block:: python + .. code-block:: python - from autogen_ext.models.openai import OpenAIChatCompletionClient - from autogen_core.models import UserMessage + from autogen_ext.models.openai import OpenAIChatCompletionClient + from autogen_core.models import UserMessage - openai_client = OpenAIChatCompletionClient( - model="gpt-4o-2024-08-06", - # api_key="sk-...", # Optional if you have an OPENAI_API_KEY environment variable set. - ) + openai_client = OpenAIChatCompletionClient( + model="gpt-4o-2024-08-06", + # api_key="sk-...", # Optional if you have an OPENAI_API_KEY environment variable set. + ) - result = await openai_client.create([UserMessage(content="What is the capital of France?", source="user")]) # type: ignore - print(result) + result = await openai_client.create([UserMessage(content="What is the capital of France?", source="user")]) # type: ignore + print(result) To use the client with a non-OpenAI model, you need to provide the base URL of the model and the model capabilities: - .. code-block:: python + .. code-block:: python - from autogen_ext.models.openai import OpenAIChatCompletionClient + from autogen_ext.models.openai import OpenAIChatCompletionClient - custom_model_client = OpenAIChatCompletionClient( - model="custom-model-name", - base_url="https://custom-model.com/reset/of/the/path", - api_key="placeholder", - model_capabilities={ - "vision": True, - "function_calling": True, - "json_output": True, - }, - ) + custom_model_client = OpenAIChatCompletionClient( + model="custom-model-name", + base_url="https://custom-model.com/reset/of/the/path", + api_key="placeholder", + model_capabilities={ + "vision": True, + "function_calling": True, + "json_output": True, + }, + ) + + To load the client from a configuration, you can use the `load_component` method: + + .. code-block:: python + + from autogen_core.models import ChatCompletionClient + + config = { + "provider": "OpenAIChatCompletionClient", + "config": {"model": "gpt-4o", "api_key": "REPLACE_WITH_YOUR_API_KEY"}, + } + + client = ChatCompletionClient.load_component(config) + + To view the full list of available configuration options, see the :py:class:`OpenAIClientConfigurationConfigModel` class. """ @@ -1007,15 +1034,29 @@ class AzureOpenAIChatCompletionClient( """Chat completion client for Azure OpenAI hosted models. Args: + + model (str): Which OpenAI model to use. azure_endpoint (str): The endpoint for the Azure model. **Required for Azure models.** - model (str): The deployment ID for the Azure model. **Required for Azure models.** + azure_deployment (str): Deployment name for the Azure model. **Required for Azure models.** api_version (str): The API version to use. **Required for Azure models.** azure_ad_token (str): The Azure AD token to use. Provide this or `azure_ad_token_provider` for token-based authentication. - azure_ad_token_provider (Callable[[], Awaitable[str]]): The Azure AD token provider to use. Provide this or `azure_ad_token` for token-based authentication. - model_capabilities (ModelCapabilities): The capabilities of the model if default resolved values are not correct. + azure_ad_token_provider (optional, Callable[[], Awaitable[str]] | AzureTokenProvider): The Azure AD token provider to use. Provide this or `azure_ad_token` for token-based authentication. api_key (optional, str): The API key to use, use this if you are using key based authentication. It is optional if you are using Azure AD token based authentication or `AZURE_OPENAI_API_KEY` environment variable. - timeout (optional, int): The timeout for the request in seconds. + timeout: (optional, float): The timeout for the request in seconds. max_retries (optional, int): The maximum number of retries to attempt. + model_info (optional, ModelInfo): The capabilities of the model. **Required if the model name is not a valid OpenAI model.** + frequency_penalty (optional, float): + logit_bias: (optional, dict[str, int]): + max_tokens (optional, int): + n (optional, int): + presence_penalty (optional, float): + response_format (optional, literal["json_object", "text"]): + seed (optional, int): + stop (optional, str | List[str]): + temperature (optional, float): + top_p (optional, float): + user (optional, str): + To use this client, you must install the `azure` and `openai` extensions: @@ -1047,6 +1088,39 @@ class AzureOpenAIChatCompletionClient( # api_key="sk-...", # For key-based authentication. `AZURE_OPENAI_API_KEY` environment variable can also be used instead. ) + To load the client that uses identity based aith from a configuration, you can use the `load_component` method: + + .. code-block:: python + + from autogen_core.models import ChatCompletionClient + + config = { + "provider": "AzureOpenAIChatCompletionClient", + "config": { + "model": "gpt-4o-2024-05-13", + "azure_endpoint": "https://{your-custom-endpoint}.openai.azure.com/", + "azure_deployment": "{your-azure-deployment}", + "api_version": "2024-06-01", + "azure_ad_token_provider": { + "provider": "autogen_ext.models.openai.AzureTokenProvider", + "config": { + "provider_kind": "DefaultAzureCredential", + "scopes": ["https://cognitiveservices.azure.com/.default"], + }, + }, + }, + } + + client = ChatCompletionClient.load_component(config) + + + To view the full list of available configuration options, see the :py:class:`AzureOpenAIClientConfigurationConfigModel` class. + + + .. note:: + + Right now only `DefaultAzureCredential` is supported with no additional args passed to it. + See `here `_ for how to use the Azure client directly or for more info. """ diff --git a/python/packages/autogen-ext/src/autogen_ext/models/openai/config/__init__.py b/python/packages/autogen-ext/src/autogen_ext/models/openai/config/__init__.py index 482af3a6fa69..a6b484780212 100644 --- a/python/packages/autogen-ext/src/autogen_ext/models/openai/config/__init__.py +++ b/python/packages/autogen-ext/src/autogen_ext/models/openai/config/__init__.py @@ -52,17 +52,7 @@ class AzureOpenAIClientConfiguration(BaseOpenAIClientConfiguration, total=False) azure_ad_token_provider: AsyncAzureADTokenProvider # Or AzureTokenProvider -__all__ = [ - "AzureOpenAIClientConfiguration", - "OpenAIClientConfiguration", - "AzureOpenAIClientConfigurationConfigModel", - "OpenAIClientConfigurationConfigModel", -] - - # Pydantic equivalents of the above TypedDicts - - class CreateArgumentsConfigModel(BaseModel): frequency_penalty: float | None = None logit_bias: Dict[str, int] | None = None diff --git a/python/samples/core_distributed-group-chat/_types.py b/python/samples/core_distributed-group-chat/_types.py index cf5d8e75263d..033aa835bb0f 100644 --- a/python/samples/core_distributed-group-chat/_types.py +++ b/python/samples/core_distributed-group-chat/_types.py @@ -4,7 +4,7 @@ from autogen_core.models import ( LLMMessage, ) -from autogen_ext.models.openai import AzureOpenAIClientConfiguration +from autogen_ext.models.openai.config import AzureOpenAIClientConfiguration from pydantic import BaseModel diff --git a/python/samples/core_distributed-group-chat/_utils.py b/python/samples/core_distributed-group-chat/_utils.py index 2e329e745b73..9a84f30d0395 100644 --- a/python/samples/core_distributed-group-chat/_utils.py +++ b/python/samples/core_distributed-group-chat/_utils.py @@ -5,7 +5,7 @@ import yaml from _types import AppConfig from autogen_core import MessageSerializer, try_get_known_serializers_for_type -from autogen_ext.models.openai import AzureOpenAIClientConfiguration +from autogen_ext.models.openai.config import AzureOpenAIClientConfiguration from azure.identity import DefaultAzureCredential, get_bearer_token_provider From d610d481cdcda2b3be61b0ba42cecb0622ab410c Mon Sep 17 00:00:00 2001 From: peterychang <49209570+peterychang@users.noreply.github.com> Date: Wed, 8 Jan 2025 09:16:43 -0500 Subject: [PATCH 20/61] Add documentation and TODOs for xlang (#4926) Co-authored-by: Jack Gerrits --- .../framework/distributed-agent-runtime.ipynb | 3 +++ .../runtimes/grpc/_worker_runtime.py | 25 +++++++++++++++++++ 2 files changed, 28 insertions(+) diff --git a/python/packages/autogen-core/docs/src/user-guide/core-user-guide/framework/distributed-agent-runtime.ipynb b/python/packages/autogen-core/docs/src/user-guide/core-user-guide/framework/distributed-agent-runtime.ipynb index 7ba80bb98a18..bdf53792d57e 100644 --- a/python/packages/autogen-core/docs/src/user-guide/core-user-guide/framework/distributed-agent-runtime.ipynb +++ b/python/packages/autogen-core/docs/src/user-guide/core-user-guide/framework/distributed-agent-runtime.ipynb @@ -189,6 +189,9 @@ "cell_type": "markdown", "metadata": {}, "source": [ + "# Cross-Language Runtimes\n", + "The process described above is largely the same, however all message types MUST use shared protobuf schemas for all cross-agent message types.\n", + "\n", "# Next Steps\n", "To see complete examples of using distributed runtime, please take a look at the following samples:\n", "\n", diff --git a/python/packages/autogen-ext/src/autogen_ext/runtimes/grpc/_worker_runtime.py b/python/packages/autogen-ext/src/autogen_ext/runtimes/grpc/_worker_runtime.py index 4ae66e44ccf6..e764b0644191 100644 --- a/python/packages/autogen-ext/src/autogen_ext/runtimes/grpc/_worker_runtime.py +++ b/python/packages/autogen-ext/src/autogen_ext/runtimes/grpc/_worker_runtime.py @@ -178,7 +178,32 @@ async def recv(self) -> agent_worker_pb2.Message: return await self._recv_queue.get() +# TODO: Lots of types need to have protobuf equivalents: +# Core: +# - FunctionCall, CodeResult, possibly CodeBlock +# - All the types in https://github.com/microsoft/autogen/blob/main/python/packages/autogen-core/src/autogen_core/models/_types.py +# +# Agentchat: +# - All the types in https://github.com/microsoft/autogen/blob/main/python/packages/autogen-agentchat/src/autogen_agentchat/messages.py to protobufs. +# +# Ext -- +# CodeExecutor: +# - CommandLineCodeResult + + class GrpcWorkerAgentRuntime(AgentRuntime): + """An agent runtime for running remote or cross-language agents. + + Agent messaging uses protobufs from `agent_worker.proto`_ and ``CloudEvent`` from `cloudevent.proto`_. + + Cross-language agents will additionally require all agents use shared protobuf schemas for any message types that are sent between agents. + + .. _agent_worker.proto: https://github.com/microsoft/autogen/blob/main/protos/agent_worker.proto + + .. _cloudevent.proto: https://github.com/microsoft/autogen/blob/main/protos/cloudevent.proto + + """ + # TODO: Needs to handle agent close() call def __init__( self, From ad123641daf956a4700cd0a67c05296ff1146d7c Mon Sep 17 00:00:00 2001 From: Mohammad Mazraeh Date: Wed, 8 Jan 2025 06:23:31 -0800 Subject: [PATCH 21/61] Update agent and agent runtime doc with routed agent (#4935) * add back removed note Signed-off-by: Mohammad Mazraeh * fix formatting issues Signed-off-by: Mohammad Mazraeh --------- Signed-off-by: Mohammad Mazraeh --- .../framework/agent-and-agent-runtime.ipynb | 565 +++++++++--------- 1 file changed, 282 insertions(+), 283 deletions(-) diff --git a/python/packages/autogen-core/docs/src/user-guide/core-user-guide/framework/agent-and-agent-runtime.ipynb b/python/packages/autogen-core/docs/src/user-guide/core-user-guide/framework/agent-and-agent-runtime.ipynb index bfff326b56d0..f1de10bf494d 100644 --- a/python/packages/autogen-core/docs/src/user-guide/core-user-guide/framework/agent-and-agent-runtime.ipynb +++ b/python/packages/autogen-core/docs/src/user-guide/core-user-guide/framework/agent-and-agent-runtime.ipynb @@ -1,285 +1,284 @@ { - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Agent and Agent Runtime\n", - "\n", - "In this and the following section, we focus on the core concepts of AutoGen:\n", - "agents, agent runtime, messages, and communication.\n", - "You will not find any AI models or tools here, just the foundational\n", - "building blocks for building multi-agent applications.\n", - "\n", - "```{note}\n", - "The Core API is designed to be unopinionated and flexible. So at times, you\n", - "may find it challenging. Continue if you are building\n", - "an interactive, scalable and distributed multi-agent system and want full control\n", - "of all workflows.\n", - "If you just want to get something running\n", - "quickly, you may take a look at the [AgentChat API](../../agentchat-user-guide/index.md).\n", - "```" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "An agent in AutoGen is an entity defined by the base class {py:class}`autogen_core.Agent`.\n", - "It has a unique identifier of the type {py:class}`autogen_core.AgentId`,\n", - "a metadata dictionary of the type {py:class}`autogen_core.AgentMetadata`,\n", - "and method for handling messages {py:meth}`autogen_core.BaseAgent.on_message_impl`.\n", - "\n", - "An agent runtime is the execution environment for agents in AutoGen.\n", - "Similar to the runtime environment of a programming language,\n", - "an agent runtime provides the necessary infrastructure to facilitate communication\n", - "between agents, manage agent lifecycles, enforce security boundaries, and support monitoring and\n", - "debugging.\n", - "For local development, developers can use {py:class}`~autogen_core.SingleThreadedAgentRuntime`,\n", - "which can be embedded in a Python application.\n", - "\n", - "```{note}\n", - "Agents are not directly instantiated and managed by application code.\n", - "Instead, they are created by the runtime when needed and managed by the runtime.\n", - "```" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Implementing an Agent\n", - "\n", - "To implement an agent, the developer must subclass the {py:class}`~autogen_core.BaseAgent` class\n", - "and implement the {py:meth}`~autogen_core.BaseAgent.on_message_impl` method.\n", - "This method is invoked when the agent receives a message. For example,\n", - "the following agent handles a simple message type and prints the message it receives:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from dataclasses import dataclass\n", - "\n", - "from autogen_core import AgentId, BaseAgent, MessageContext\n", - "\n", - "\n", - "@dataclass\n", - "class MyMessageType:\n", - " content: str\n", - "\n", - "\n", - "class MyAgent(BaseAgent):\n", - " def __init__(self) -> None:\n", - " super().__init__(\"MyAgent\")\n", - "\n", - " async def on_message_impl(self, message: MyMessageType, ctx: MessageContext) -> None:\n", - " print(f\"Received message: {message.content}\") # type: ignore" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "This agent only handles `MyMessageType` messages. \n", - "To handle multiple message types, developers can subclass the {py:class}`~autogen_core.RoutedAgent` class\n", - "which provides an easy-to use API to implement different message handlers for different message types.\n", - "See the next section on [message and communication](./message-and-communication.ipynb)." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Registering Agent Type\n", - "\n", - "To make agents available to the runtime, developers can use the\n", - "{py:meth}`~autogen_core.BaseAgent.register` class method of the\n", - "{py:class}`~autogen_core.BaseAgent` class.\n", - "The process of registration associates an agent type, which is uniquely identified by a string, \n", - "and a factory function\n", - "that creates an instance of the agent type of the given class.\n", - "The factory function is used to allow automatic creation of agent instances \n", - "when they are needed.\n", - "\n", - "Agent type ({py:class}`~autogen_core.AgentType`) is not the same as the agent class. In this example,\n", - "the agent type is `AgentType(\"my_agent\")` and the agent class is the Python class `MyAgent`.\n", - "The factory function is expected to return an instance of the agent class \n", - "on which the {py:meth}`~autogen_core.BaseAgent.register` class method is invoked.\n", - "Read [Agent Identity and Lifecycles](../core-concepts/agent-identity-and-lifecycle.md)\n", - "to learn more about agent type and identity.\n", - "\n", - "```{note}\n", - "Different agent types can be registered with factory functions that return \n", - "the same agent class. For example, in the factory functions, \n", - "variations of the constructor parameters\n", - "can be used to create different instances of the same agent class.\n", - "```\n", - "\n", - "To register an agent type with the \n", - "{py:class}`~autogen_core.SingleThreadedAgentRuntime`,\n", - "the following code can be used:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "AgentType(type='my_agent')" - ] - }, - "execution_count": 2, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "from autogen_core import SingleThreadedAgentRuntime\n", - "\n", - "runtime = SingleThreadedAgentRuntime()\n", - "await MyAgent.register(runtime, \"my_agent\", lambda: MyAgent())" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Once an agent type is registered, we can send a direct message to an agent instance\n", - "using an {py:class}`~autogen_core.AgentId`.\n", - "The runtime will create the instance the first time it delivers a\n", - "message to this instance." - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Received message: Hello, World!\n" - ] - } - ], - "source": [ - "agent_id = AgentId(\"my_agent\", \"default\")\n", - "runtime.start() # Start processing messages in the background.\n", - "await runtime.send_message(MyMessageType(\"Hello, World!\"), agent_id)\n", - "await runtime.stop() # Stop processing messages in the background." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "```{note}\n", - "Because the runtime manages the lifecycle of agents, an {py:class}`~autogen_core.AgentId`\n", - "is only used to communicate with the agent or retrieve its metadata (e.g., description).\n", - "```" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Running the Single-Threaded Agent Runtime\n", - "\n", - "The above code snippet uses `runtime.start()` to start a background task\n", - "to process and deliver messages to recepients' message handlers.\n", - "This is a feature of the\n", - "local embedded runtime {py:class}`~autogen_core.SingleThreadedAgentRuntime`.\n", - "\n", - "To stop the background task immediately, use the `stop()` method:" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "metadata": {}, - "outputs": [], - "source": [ - "runtime.start()\n", - "# ... Send messages, publish messages, etc.\n", - "await runtime.stop() # This will return immediately but will not cancel\n", - "# any in-progress message handling." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "You can resume the background task by calling `start()` again.\n", - "\n", - "For batch scenarios such as running benchmarks for evaluating agents,\n", - "you may want to wait for the background task to stop automatically when\n", - "there are no unprocessed messages and no agent is handling messages --\n", - "the batch may considered complete.\n", - "You can achieve this by using the `stop_when_idle()` method:" - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "metadata": {}, - "outputs": [], - "source": [ - "runtime.start()\n", - "# ... Send messages, publish messages, etc.\n", - "await runtime.stop_when_idle() # This will block until the runtime is idle." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "You can also directly process messages one-by-one without a background task using:" - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "metadata": {}, - "outputs": [], - "source": [ - "await runtime.process_next()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Other runtime implementations will have their own ways of running the runtime." - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": ".venv", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.12.6" - } - }, - "nbformat": 4, - "nbformat_minor": 2 + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Agent and Agent Runtime\n", + "\n", + "In this and the following section, we focus on the core concepts of AutoGen:\n", + "agents, agent runtime, messages, and communication.\n", + "You will not find any AI models or tools here, just the foundational\n", + "building blocks for building multi-agent applications.\n", + "\n", + "```{note}\n", + "The Core API is designed to be unopinionated and flexible. So at times, you\n", + "may find it challenging. Continue if you are building\n", + "an interactive, scalable and distributed multi-agent system and want full control\n", + "of all workflows.\n", + "If you just want to get something running\n", + "quickly, you may take a look at the [AgentChat API](../../agentchat-user-guide/index.md).\n", + "```" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "An agent in AutoGen is an entity defined by the base class {py:class}`autogen_core.Agent`.\n", + "It has a unique identifier of the type {py:class}`autogen_core.AgentId`,\n", + "a metadata dictionary of the type {py:class}`autogen_core.AgentMetadata`,\n", + "\n", + "and method for handling messages {py:meth}`autogen_core.BaseAgent.on_message_impl`. In most cases, you can subclass your agents from higher level class {py:class}`autogen_core.RoutedAgent` which enables you to route messages to corresponding message handler specified with {py:meth}`autogen_core.message_handler` decorator and proper type hint for the `message` variable.\n", + "An agent runtime is the execution environment for agents in AutoGen.\n", + "Similar to the runtime environment of a programming language,\n", + "an agent runtime provides the necessary infrastructure to facilitate communication\n", + "between agents, manage agent lifecycles, enforce security boundaries, and support monitoring and\n", + "debugging.\n", + "For local development, developers can use {py:class}`~autogen_core.SingleThreadedAgentRuntime`,\n", + "which can be embedded in a Python application.\n", + "\n", + "```{note}\n", + "Agents are not directly instantiated and managed by application code.\n", + "Instead, they are created by the runtime when needed and managed by the runtime.\n", + "```" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Implementing an Agent\n", + "\n", + "To implement an agent, the developer must subclass the {py:class}`~autogen_core.RoutedAgent` class\n", + "and implement the {py:meth}`~autogen_core.RoutedAgent.on_message_impl` method.\n", + "This method is invoked when the agent receives a message. For example,\n", + "the following agent handles a simple message type `MyMessageType` and prints the message it receives:" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "from dataclasses import dataclass\n", + "\n", + "from autogen_core import AgentId, MessageContext, RoutedAgent, message_handler\n", + "\n", + "\n", + "@dataclass\n", + "class MyMessageType:\n", + " content: str\n", + "\n", + "\n", + "class MyAgent(RoutedAgent):\n", + " def __init__(self) -> None:\n", + " super().__init__(\"MyAgent\")\n", + "\n", + " @message_handler\n", + " async def handle_my_message_type(self, message: MyMessageType, ctx: MessageContext) -> None:\n", + " print(f\"Received message: {message.content}\") # type: ignore" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "This agent only handles `MyMessageType` and messages will be delivered to `handle_my_message_type` method. Developers can have multiple message handlers for different message types by using `@message_handler` decorator and setting the type hint for the `message` variable in the handler function. You can also leverage [python typing union](https://docs.python.org/3/library/typing.html#typing.Union) for the `message` variable in one message handler function if it better suits agent's logic.\n", + "See the next section on [message and communication](./message-and-communication.ipynb)." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Registering Agent Type\n", + "\n", + "To make agents available to the runtime, developers can use the\n", + "{py:meth}`~autogen_core.BaseAgent.register` class method of the\n", + "{py:class}`~autogen_core.BaseAgent` class.\n", + "The process of registration associates an agent type, which is uniquely identified by a string, \n", + "and a factory function\n", + "that creates an instance of the agent type of the given class.\n", + "The factory function is used to allow automatic creation of agent instances \n", + "when they are needed.\n", + "\n", + "Agent type ({py:class}`~autogen_core.AgentType`) is not the same as the agent class. In this example,\n", + "the agent type is `AgentType(\"my_agent\")` and the agent class is the Python class `MyAgent`.\n", + "The factory function is expected to return an instance of the agent class \n", + "on which the {py:meth}`~autogen_core.BaseAgent.register` class method is invoked.\n", + "Read [Agent Identity and Lifecycles](../core-concepts/agent-identity-and-lifecycle.md)\n", + "to learn more about agent type and identity.\n", + "\n", + "```{note}\n", + "Different agent types can be registered with factory functions that return \n", + "the same agent class. For example, in the factory functions, \n", + "variations of the constructor parameters\n", + "can be used to create different instances of the same agent class.\n", + "```\n", + "\n", + "To register an agent type with the \n", + "{py:class}`~autogen_core.SingleThreadedAgentRuntime`,\n", + "the following code can be used:" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "AgentType(type='my_agent')" + ] + }, + "execution_count": 2, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "from autogen_core import SingleThreadedAgentRuntime\n", + "\n", + "runtime = SingleThreadedAgentRuntime()\n", + "await MyAgent.register(runtime, \"my_agent\", lambda: MyAgent())" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Once an agent type is registered, we can send a direct message to an agent instance\n", + "using an {py:class}`~autogen_core.AgentId`.\n", + "The runtime will create the instance the first time it delivers a\n", + "message to this instance." + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Received message: Hello, World!\n" + ] + } + ], + "source": [ + "agent_id = AgentId(\"my_agent\", \"default\")\n", + "runtime.start() # Start processing messages in the background.\n", + "await runtime.send_message(MyMessageType(\"Hello, World!\"), agent_id)\n", + "await runtime.stop() # Stop processing messages in the background." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "```{note}\n", + "Because the runtime manages the lifecycle of agents, an {py:class}`~autogen_core.AgentId`\n", + "is only used to communicate with the agent or retrieve its metadata (e.g., description).\n", + "```" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Running the Single-Threaded Agent Runtime\n", + "\n", + "The above code snippet uses `runtime.start()` to start a background task\n", + "to process and deliver messages to recepients' message handlers.\n", + "This is a feature of the\n", + "local embedded runtime {py:class}`~autogen_core.SingleThreadedAgentRuntime`.\n", + "\n", + "To stop the background task immediately, use the `stop()` method:" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [], + "source": [ + "runtime.start()\n", + "# ... Send messages, publish messages, etc.\n", + "await runtime.stop() # This will return immediately but will not cancel\n", + "# any in-progress message handling." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "You can resume the background task by calling `start()` again.\n", + "\n", + "For batch scenarios such as running benchmarks for evaluating agents,\n", + "you may want to wait for the background task to stop automatically when\n", + "there are no unprocessed messages and no agent is handling messages --\n", + "the batch may considered complete.\n", + "You can achieve this by using the `stop_when_idle()` method:" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [], + "source": [ + "runtime.start()\n", + "# ... Send messages, publish messages, etc.\n", + "await runtime.stop_when_idle() # This will block until the runtime is idle." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "You can also directly process messages one-by-one without a background task using:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "await runtime.process_next()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Other runtime implementations will have their own ways of running the runtime." + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": ".venv", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.11" + } + }, + "nbformat": 4, + "nbformat_minor": 2 } From b850dcd39988db662b513de58fb5d2e0d49b6188 Mon Sep 17 00:00:00 2001 From: Eric Zhu Date: Wed, 8 Jan 2025 06:28:25 -0800 Subject: [PATCH 22/61] Fix link to samples (#4933) * Fix link to samples * fix readme --------- Co-authored-by: Jack Gerrits --- .../framework/distributed-agent-runtime.ipynb | 8 ++--- .../core_async_human_in_the_loop/README.md | 35 ++++--------------- 2 files changed, 10 insertions(+), 33 deletions(-) diff --git a/python/packages/autogen-core/docs/src/user-guide/core-user-guide/framework/distributed-agent-runtime.ipynb b/python/packages/autogen-core/docs/src/user-guide/core-user-guide/framework/distributed-agent-runtime.ipynb index bdf53792d57e..96a80a2f08cb 100644 --- a/python/packages/autogen-core/docs/src/user-guide/core-user-guide/framework/distributed-agent-runtime.ipynb +++ b/python/packages/autogen-core/docs/src/user-guide/core-user-guide/framework/distributed-agent-runtime.ipynb @@ -195,9 +195,9 @@ "# Next Steps\n", "To see complete examples of using distributed runtime, please take a look at the following samples:\n", "\n", - "- [Distributed Workers](https://github.com/microsoft/autogen/tree/main/python/packages/autogen-core/samples/worker) \n", - "- [Distributed Semantic Router](https://github.com/microsoft/autogen/tree/main/python/packages/autogen-core/samples/semantic_router) \n", - "- [Distributed Group Chat](https://github.com/microsoft/autogen/tree/main/python/packages/autogen-core/samples/distributed-group-chat) \n" + "- [Distributed Workers](https://github.com/microsoft/autogen/tree/main/python/samples/core_grpc_worker_runtime) \n", + "- [Distributed Semantic Router](https://github.com/microsoft/autogen/tree/main/python/samples/core_semantic_router) \n", + "- [Distributed Group Chat](https://github.com/microsoft/autogen/tree/main/python/samples/core_distributed-group-chat) \n" ] } ], @@ -222,4 +222,4 @@ }, "nbformat": 4, "nbformat_minor": 2 -} +} \ No newline at end of file diff --git a/python/samples/core_async_human_in_the_loop/README.md b/python/samples/core_async_human_in_the_loop/README.md index f1fca50c9cf0..af8d9e4a1d79 100644 --- a/python/samples/core_async_human_in_the_loop/README.md +++ b/python/samples/core_async_human_in_the_loop/README.md @@ -12,36 +12,13 @@ First, you need a shell with AutoGen core and required dependencies installed. pip install "autogen-core==0.4.0.dev13" "autogen-ext[openai,azure]==0.4.0.dev13" ``` -### Using Azure OpenAI API +### Model Configuration -For Azure OpenAI API, you need to set the following environment variables: +The model configuration should defined in a `model_config.json` file. +Use `model_config_template.json` as a template. -```bash -export OPENAI_API_TYPE=azure -export AZURE_OPENAI_API_ENDPOINT=your_azure_openai_endpoint -export AZURE_OPENAI_API_VERSION=your_azure_openai_api_version -``` - -By default, we use Azure Active Directory (AAD) for authentication. -You need to run `az login` first to authenticate with Azure. -You can also -use API key authentication by setting the following environment variables: - -```bash -export AZURE_OPENAI_API_KEY=your_azure_openai_api_key -``` - -This requires azure-identity installation: +### Running the example ```bash -pip install azure-identity -``` - -### Using OpenAI API - -For OpenAI API, you need to set the following environment variables. - -```bash -export OPENAI_API_TYPE=openai -export OPENAI_API_KEY=your_openai_api_key -``` +python main.py +``` \ No newline at end of file From 538f39497b5e5a10ced116987450ba16f6b51205 Mon Sep 17 00:00:00 2001 From: Jack Gerrits Date: Wed, 8 Jan 2025 09:33:28 -0500 Subject: [PATCH 23/61] Replace create_completion_client_from_env with component config (#4928) * Replace create_completion_client_from_env with component config * json load --- .../Templates/MagenticOne/scenario.py | 11 ++-- .../GAIA/Templates/MagenticOne/scenario.py | 8 +-- .../Templates/MagenticOne/scenario.py | 4 +- .../Templates/MagenticOne/scenario.py | 5 +- .../packages/autogen-magentic-one/README.md | 66 +++++++++++++------ .../autogen-magentic-one/examples/example.py | 7 +- .../examples/example_coder.py | 8 ++- .../examples/example_file_surfer.py | 7 +- .../examples/example_userproxy.py | 7 +- .../examples/example_websurfer.py | 7 +- .../interface/magentic_one_helper.py | 7 +- .../src/autogen_magentic_one/utils.py | 62 ----------------- .../headless_web_surfer/test_web_surfer.py | 27 +++----- 13 files changed, 99 insertions(+), 127 deletions(-) diff --git a/python/packages/agbench/benchmarks/AssistantBench/Templates/MagenticOne/scenario.py b/python/packages/agbench/benchmarks/AssistantBench/Templates/MagenticOne/scenario.py index 9a6ecc9b6a3e..dda79bd8d9ab 100644 --- a/python/packages/agbench/benchmarks/AssistantBench/Templates/MagenticOne/scenario.py +++ b/python/packages/agbench/benchmarks/AssistantBench/Templates/MagenticOne/scenario.py @@ -1,4 +1,5 @@ import asyncio +import json import logging import os import re @@ -10,7 +11,7 @@ from autogen_core import AgentId, AgentProxy, TopicId from autogen_core import SingleThreadedAgentRuntime -from autogen_core.logging import EVENT_LOGGER_NAME +from autogen_core import EVENT_LOGGER_NAME from autogen_core.models import ( ChatCompletionClient, UserMessage, @@ -26,7 +27,7 @@ from autogen_magentic_one.messages import BroadcastMessage from autogen_magentic_one.agents.multimodal_web_surfer import MultimodalWebSurfer from autogen_magentic_one.agents.file_surfer import FileSurfer -from autogen_magentic_one.utils import LogHandler, message_content_to_str, create_completion_client_from_env +from autogen_magentic_one.utils import LogHandler, message_content_to_str encoding = None def count_token(value: str) -> int: @@ -123,10 +124,8 @@ async def main() -> None: runtime = SingleThreadedAgentRuntime() # Create the AzureOpenAI client from the environment file - client = create_completion_client_from_env() - - - mlm_client = create_completion_client_from_env() + client = ChatCompletionClient.load_component(json.loads(os.environ["CHAT_COMPLETION_CLIENT_CONFIG"])) + mlm_client = ChatCompletionClient.load_component(json.loads(os.environ["MLM_CHAT_COMPLETION_CLIENT_CONFIG"])) # Register agents. diff --git a/python/packages/agbench/benchmarks/GAIA/Templates/MagenticOne/scenario.py b/python/packages/agbench/benchmarks/GAIA/Templates/MagenticOne/scenario.py index 5e5b677e4b54..32e0a11d36d1 100644 --- a/python/packages/agbench/benchmarks/GAIA/Templates/MagenticOne/scenario.py +++ b/python/packages/agbench/benchmarks/GAIA/Templates/MagenticOne/scenario.py @@ -1,4 +1,5 @@ import asyncio +import json import logging import os import re @@ -27,7 +28,7 @@ from autogen_magentic_one.messages import BroadcastMessage from autogen_magentic_one.agents.multimodal_web_surfer import MultimodalWebSurfer from autogen_magentic_one.agents.file_surfer import FileSurfer -from autogen_magentic_one.utils import LogHandler, message_content_to_str, create_completion_client_from_env +from autogen_magentic_one.utils import LogHandler, message_content_to_str encoding = None def count_token(value: str) -> int: @@ -124,11 +125,10 @@ async def main() -> None: runtime = SingleThreadedAgentRuntime() # Create the AzureOpenAI client, with AAD auth, from environment - client = create_completion_client_from_env() + client = ChatCompletionClient.load_component(json.loads(os.environ["CHAT_COMPLETION_CLIENT_CONFIG"])) + mlm_client = ChatCompletionClient.load_component(json.loads(os.environ["MLM_CHAT_COMPLETION_CLIENT_CONFIG"])) - mlm_client = create_completion_client_from_env() - # Register agents. await runtime.register( "Assistant", diff --git a/python/packages/agbench/benchmarks/HumanEval/Templates/MagenticOne/scenario.py b/python/packages/agbench/benchmarks/HumanEval/Templates/MagenticOne/scenario.py index c57971ea69e1..7059ac82c8fc 100644 --- a/python/packages/agbench/benchmarks/HumanEval/Templates/MagenticOne/scenario.py +++ b/python/packages/agbench/benchmarks/HumanEval/Templates/MagenticOne/scenario.py @@ -13,7 +13,6 @@ from autogen_magentic_one.agents.coder import Coder, Executor from autogen_magentic_one.agents.orchestrator import RoundRobinOrchestrator from autogen_magentic_one.messages import BroadcastMessage, OrchestrationEvent -from autogen_magentic_one.utils import create_completion_client_from_env async def main() -> None: @@ -21,7 +20,8 @@ async def main() -> None: runtime = SingleThreadedAgentRuntime() # Create the AzureOpenAI client - client = create_completion_client_from_env() + client = ChatCompletionClient.load_component(json.loads(os.environ["CHAT_COMPLETION_CLIENT_CONFIG"])) + # Register agents. await runtime.register( diff --git a/python/packages/agbench/benchmarks/WebArena/Templates/MagenticOne/scenario.py b/python/packages/agbench/benchmarks/WebArena/Templates/MagenticOne/scenario.py index ab387ce598b6..08f158ac9659 100644 --- a/python/packages/agbench/benchmarks/WebArena/Templates/MagenticOne/scenario.py +++ b/python/packages/agbench/benchmarks/WebArena/Templates/MagenticOne/scenario.py @@ -24,7 +24,7 @@ from autogen_magentic_one.messages import BroadcastMessage, OrchestrationEvent, RequestReplyMessage, ResetMessage, DeactivateMessage from autogen_magentic_one.agents.multimodal_web_surfer import MultimodalWebSurfer from autogen_magentic_one.agents.file_surfer import FileSurfer -from autogen_magentic_one.utils import LogHandler, message_content_to_str, create_completion_client_from_env +from autogen_magentic_one.utils import LogHandler, message_content_to_str import evaluation_harness @@ -120,7 +120,8 @@ async def main() -> None: runtime = SingleThreadedAgentRuntime() # Create the AzureOpenAI client, with AAD auth - client = create_completion_client_from_env() + client = ChatCompletionClient.load_component(json.loads(os.environ["CHAT_COMPLETION_CLIENT_CONFIG"])) + # Login assistant await runtime.register( "LoginAssistant", diff --git a/python/packages/autogen-magentic-one/README.md b/python/packages/autogen-magentic-one/README.md index 8d81ad7ceb15..a0573df8c52a 100644 --- a/python/packages/autogen-magentic-one/README.md +++ b/python/packages/autogen-magentic-one/README.md @@ -119,28 +119,39 @@ playwright install --with-deps chromium ## Environment Configuration for Chat Completion Client -This guide outlines how to configure your environment to use the `create_completion_client_from_env` function, which reads environment variables to return an appropriate `ChatCompletionClient`. +This guide outlines how to structure the config to load a ChatCompletionClient for Magentic-One. + +```python +from autogen_core.models import ChatCompletionClient + +config = {} +client = ChatCompletionClient.load_component(config) +``` Currently, Magentic-One only supports OpenAI's GPT-4o as the underlying LLM. ### Azure OpenAI service -To configure for Azure OpenAI service, set the following environment variables: - -- `CHAT_COMPLETION_PROVIDER='azure'` -- `CHAT_COMPLETION_KWARGS_JSON` with the following JSON structure: +To configure for Azure OpenAI service, use the following config: ```json { - "api_version": "2024-02-15-preview", - "azure_endpoint": "REPLACE_WITH_YOUR_ENDPOINT", - "model_capabilities": { - "function_calling": true, - "json_output": true, - "vision": true - }, - "azure_ad_token_provider": "DEFAULT", - "model": "gpt-4o-2024-05-13" + "provider": "AzureOpenAIChatCompletionClient", + "config": { + "model": "gpt-4o-2024-05-13", + "azure_endpoint": "https://{your-custom-endpoint}.openai.azure.com/", + "azure_deployment": "{your-azure-deployment}", + "api_version": "2024-06-01", + "azure_ad_token_provider": { + "provider": "autogen_ext.models.openai.AzureTokenProvider", + "config": { + "provider_kind": "DefaultAzureCredential", + "scopes": [ + "https://cognitiveservices.azure.com/.default" + ] + } + } + } } ``` @@ -150,19 +161,34 @@ Log in to Azure using `az login`, and then run the examples. The account used mu Note that even if you are the owner of the subscription, you still need to grant the necessary Azure Cognitive Services OpenAI permissions to call the API. -### With OpenAI +Or, to use an API key: +```json +{ + "provider": "AzureOpenAIChatCompletionClient", + "config": { + "model": "gpt-4o-2024-05-13", + "azure_endpoint": "https://{your-custom-endpoint}.openai.azure.com/", + "azure_deployment": "{your-azure-deployment}", + "api_version": "2024-06-01", + "api_key": "REPLACE_WITH_YOUR_API_KEY" + } +} +``` -To configure for OpenAI, set the following environment variables: +### With OpenAI -- `CHAT_COMPLETION_PROVIDER='openai'` -- `CHAT_COMPLETION_KWARGS_JSON` with the following JSON structure: +To configure for OpenAI, use the following config: ```json { - "api_key": "REPLACE_WITH_YOUR_API", - "model": "gpt-4o-2024-05-13" + "provider": "OpenAIChatCompletionClient", + "config": { + "model": "gpt-4o-2024-05-13", + "api_key": "REPLACE_WITH_YOUR_API_KEY" + } } ``` + Feel free to replace the model with newer versions of gpt-4o if needed. ### Other Keys (Optional) diff --git a/python/packages/autogen-magentic-one/examples/example.py b/python/packages/autogen-magentic-one/examples/example.py index 5e975d17526e..048e6c0602ea 100644 --- a/python/packages/autogen-magentic-one/examples/example.py +++ b/python/packages/autogen-magentic-one/examples/example.py @@ -2,11 +2,13 @@ import argparse import asyncio +import json import logging import os from autogen_core import EVENT_LOGGER_NAME, AgentId, AgentProxy, SingleThreadedAgentRuntime from autogen_core.code_executor import CodeBlock +from autogen_core.models._model_client import ChatCompletionClient from autogen_ext.code_executors.docker import DockerCommandLineCodeExecutor from autogen_magentic_one.agents.coder import Coder, Executor from autogen_magentic_one.agents.file_surfer import FileSurfer @@ -14,7 +16,7 @@ from autogen_magentic_one.agents.orchestrator import LedgerOrchestrator from autogen_magentic_one.agents.user_proxy import UserProxy from autogen_magentic_one.messages import RequestReplyMessage -from autogen_magentic_one.utils import LogHandler, create_completion_client_from_env +from autogen_magentic_one.utils import LogHandler # NOTE: Don't forget to 'playwright install --with-deps chromium' @@ -32,7 +34,8 @@ async def main(logs_dir: str, hil_mode: bool, save_screenshots: bool) -> None: runtime = SingleThreadedAgentRuntime() # Create an appropriate client - client = create_completion_client_from_env(model="gpt-4o") + client = ChatCompletionClient.load_component(json.loads(os.environ["CHAT_COMPLETION_CLIENT_CONFIG"])) + assert client.model_info["family"] == "gpt-4o", "This example requires the gpt-4o model" async with DockerCommandLineCodeExecutor(work_dir=logs_dir) as code_executor: # Register agents. diff --git a/python/packages/autogen-magentic-one/examples/example_coder.py b/python/packages/autogen-magentic-one/examples/example_coder.py index 3815c1806fcc..2071c78cef05 100644 --- a/python/packages/autogen-magentic-one/examples/example_coder.py +++ b/python/packages/autogen-magentic-one/examples/example_coder.py @@ -5,16 +5,19 @@ """ import asyncio +import json import logging +import os from autogen_core import EVENT_LOGGER_NAME, AgentId, AgentProxy, SingleThreadedAgentRuntime from autogen_core.code_executor import CodeBlock +from autogen_core.models._model_client import ChatCompletionClient from autogen_ext.code_executors.docker import DockerCommandLineCodeExecutor from autogen_magentic_one.agents.coder import Coder, Executor from autogen_magentic_one.agents.orchestrator import RoundRobinOrchestrator from autogen_magentic_one.agents.user_proxy import UserProxy from autogen_magentic_one.messages import RequestReplyMessage -from autogen_magentic_one.utils import LogHandler, create_completion_client_from_env +from autogen_magentic_one.utils import LogHandler async def confirm_code(code: CodeBlock) -> bool: @@ -29,9 +32,10 @@ async def main() -> None: # Create the runtime. runtime = SingleThreadedAgentRuntime() + model_client = ChatCompletionClient.load_component(json.loads(os.environ["CHAT_COMPLETION_CLIENT_CONFIG"])) async with DockerCommandLineCodeExecutor() as code_executor: # Register agents. - await Coder.register(runtime, "Coder", lambda: Coder(model_client=create_completion_client_from_env())) + await Coder.register(runtime, "Coder", lambda: Coder(model_client=model_client)) coder = AgentProxy(AgentId("Coder", "default"), runtime) await Executor.register( diff --git a/python/packages/autogen-magentic-one/examples/example_file_surfer.py b/python/packages/autogen-magentic-one/examples/example_file_surfer.py index 4303d039f5df..e718eb115a34 100644 --- a/python/packages/autogen-magentic-one/examples/example_file_surfer.py +++ b/python/packages/autogen-magentic-one/examples/example_file_surfer.py @@ -3,14 +3,17 @@ to write input or perform actions, orchestrated by an round-robin orchestrator agent.""" import asyncio +import json import logging +import os from autogen_core import EVENT_LOGGER_NAME, AgentId, AgentProxy, SingleThreadedAgentRuntime +from autogen_core.models._model_client import ChatCompletionClient from autogen_magentic_one.agents.file_surfer import FileSurfer from autogen_magentic_one.agents.orchestrator import RoundRobinOrchestrator from autogen_magentic_one.agents.user_proxy import UserProxy from autogen_magentic_one.messages import RequestReplyMessage -from autogen_magentic_one.utils import LogHandler, create_completion_client_from_env +from autogen_magentic_one.utils import LogHandler async def main() -> None: @@ -18,7 +21,7 @@ async def main() -> None: runtime = SingleThreadedAgentRuntime() # Get an appropriate client - client = create_completion_client_from_env() + client = ChatCompletionClient.load_component(json.loads(os.environ["CHAT_COMPLETION_CLIENT_CONFIG"])) # Register agents. await FileSurfer.register(runtime, "file_surfer", lambda: FileSurfer(model_client=client)) diff --git a/python/packages/autogen-magentic-one/examples/example_userproxy.py b/python/packages/autogen-magentic-one/examples/example_userproxy.py index 7096630f6a1e..32561a27c69f 100644 --- a/python/packages/autogen-magentic-one/examples/example_userproxy.py +++ b/python/packages/autogen-magentic-one/examples/example_userproxy.py @@ -5,16 +5,19 @@ The code snippets are not executed in this example.""" import asyncio +import json import logging +import os from autogen_core import EVENT_LOGGER_NAME, AgentId, AgentProxy, SingleThreadedAgentRuntime # from typing import Any, Dict, List, Tuple, Union +from autogen_core.models._model_client import ChatCompletionClient from autogen_magentic_one.agents.coder import Coder from autogen_magentic_one.agents.orchestrator import RoundRobinOrchestrator from autogen_magentic_one.agents.user_proxy import UserProxy from autogen_magentic_one.messages import RequestReplyMessage -from autogen_magentic_one.utils import LogHandler, create_completion_client_from_env +from autogen_magentic_one.utils import LogHandler async def main() -> None: @@ -22,7 +25,7 @@ async def main() -> None: runtime = SingleThreadedAgentRuntime() # Get an appropriate client - client = create_completion_client_from_env() + client = ChatCompletionClient.load_component(json.loads(os.environ["CHAT_COMPLETION_CLIENT_CONFIG"])) # Register agents. await Coder.register(runtime, "Coder", lambda: Coder(model_client=client)) diff --git a/python/packages/autogen-magentic-one/examples/example_websurfer.py b/python/packages/autogen-magentic-one/examples/example_websurfer.py index 16b806f336fb..825492a626f4 100644 --- a/python/packages/autogen-magentic-one/examples/example_websurfer.py +++ b/python/packages/autogen-magentic-one/examples/example_websurfer.py @@ -4,15 +4,17 @@ orchestrated by an round-robin orchestrator agent.""" import asyncio +import json import logging import os from autogen_core import EVENT_LOGGER_NAME, AgentId, AgentProxy, SingleThreadedAgentRuntime +from autogen_core.models import ChatCompletionClient from autogen_magentic_one.agents.multimodal_web_surfer import MultimodalWebSurfer from autogen_magentic_one.agents.orchestrator import RoundRobinOrchestrator from autogen_magentic_one.agents.user_proxy import UserProxy from autogen_magentic_one.messages import RequestReplyMessage -from autogen_magentic_one.utils import LogHandler, create_completion_client_from_env +from autogen_magentic_one.utils import LogHandler # NOTE: Don't forget to 'playwright install --with-deps chromium' @@ -22,7 +24,8 @@ async def main() -> None: runtime = SingleThreadedAgentRuntime() # Create an appropriate client - client = create_completion_client_from_env(model="gpt-4o") + client = ChatCompletionClient.load_component(json.loads(os.environ["CHAT_COMPLETION_CLIENT_CONFIG"])) + assert client.model_info["family"] == "gpt-4o", "This example requires the gpt-4o model" # Register agents. await MultimodalWebSurfer.register(runtime, "WebSurfer", MultimodalWebSurfer) diff --git a/python/packages/autogen-magentic-one/interface/magentic_one_helper.py b/python/packages/autogen-magentic-one/interface/magentic_one_helper.py index 94de714067d7..e082853f64d5 100644 --- a/python/packages/autogen-magentic-one/interface/magentic_one_helper.py +++ b/python/packages/autogen-magentic-one/interface/magentic_one_helper.py @@ -10,6 +10,7 @@ from autogen_core import EVENT_LOGGER_NAME from autogen_core import AgentId, AgentProxy from autogen_core import DefaultTopicId +from autogen_core.models._model_client import ChatCompletionClient from autogen_ext.code_executors.local import LocalCommandLineCodeExecutor from autogen_ext.code_executors.docker import DockerCommandLineCodeExecutor from autogen_core.code_executor import CodeBlock @@ -19,7 +20,7 @@ from autogen_magentic_one.agents.orchestrator import LedgerOrchestrator from autogen_magentic_one.agents.user_proxy import UserProxy from autogen_magentic_one.messages import BroadcastMessage -from autogen_magentic_one.utils import LogHandler, create_completion_client_from_env +from autogen_magentic_one.utils import LogHandler from autogen_core.models import UserMessage from threading import Lock @@ -60,7 +61,9 @@ async def initialize(self) -> None: logger.handlers = [self.log_handler] # Create client - client = create_completion_client_from_env(model="gpt-4o") + client = ChatCompletionClient.load_component(json.loads(os.environ["CHAT_COMPLETION_CLIENT_CONFIG"])) + assert client.model_info["family"] == "gpt-4o", "This example requires the gpt-4o model" + # Set up code executor self.code_executor = DockerCommandLineCodeExecutor(work_dir=self.logs_dir) diff --git a/python/packages/autogen-magentic-one/src/autogen_magentic_one/utils.py b/python/packages/autogen-magentic-one/src/autogen_magentic_one/utils.py index 0537142f0ffa..1f9c07525027 100644 --- a/python/packages/autogen-magentic-one/src/autogen_magentic_one/utils.py +++ b/python/packages/autogen-magentic-one/src/autogen_magentic_one/utils.py @@ -1,17 +1,11 @@ import json import logging -import os from dataclasses import asdict from datetime import datetime from typing import Any, Dict, List, Literal from autogen_core import Image from autogen_core.logging import LLMCallEvent -from autogen_core.models import ( - ChatCompletionClient, - ModelCapabilities, # type: ignore -) -from autogen_ext.models.openai import AzureOpenAIChatCompletionClient, OpenAIChatCompletionClient from .messages import ( AgentEvent, @@ -23,62 +17,6 @@ WebSurferEvent, ) -ENVIRON_KEY_CHAT_COMPLETION_PROVIDER = "CHAT_COMPLETION_PROVIDER" -ENVIRON_KEY_CHAT_COMPLETION_KWARGS_JSON = "CHAT_COMPLETION_KWARGS_JSON" - -# The singleton _default_azure_ad_token_provider, which will be created if needed -_default_azure_ad_token_provider = None - - -# Create a model client based on information provided in environment variables. -def create_completion_client_from_env(env: Dict[str, str] | None = None, **kwargs: Any) -> ChatCompletionClient: - global _default_azure_ad_token_provider - - """ - Create a model client based on information provided in environment variables. - env (Optional): When provied, read from this dictionary rather than os.environ - kwargs**: ChatClient arguments to override (e.g., model) - - NOTE: If 'azure_ad_token_provider' is included, and euquals the string 'DEFAULT' then replace it with - azure.identity.get_bearer_token_provider(DefaultAzureCredential(), "https://cognitiveservices.azure.com/.default") - """ - - # If a dictionary was not provided, load it from the environment - if env is None: - env = dict() - env.update(os.environ) - - # Load the kwargs, and override with provided kwargs - _kwargs = json.loads(env.get(ENVIRON_KEY_CHAT_COMPLETION_KWARGS_JSON, "{}")) - _kwargs.update(kwargs) - - # If model capabilities were provided, deserialize them as well - if "model_capabilities" in _kwargs: - _kwargs["model_capabilities"] = ModelCapabilities( # type: ignore - vision=_kwargs["model_capabilities"].get("vision"), - function_calling=_kwargs["model_capabilities"].get("function_calling"), - json_output=_kwargs["model_capabilities"].get("json_output"), - ) - - # Figure out what provider we are using. Default to OpenAI - _provider = env.get(ENVIRON_KEY_CHAT_COMPLETION_PROVIDER, "openai").lower().strip() - - # Instantiate the correct client - if _provider == "openai": - return OpenAIChatCompletionClient(**_kwargs) # type: ignore - elif _provider == "azure": - if _kwargs.get("azure_ad_token_provider", "").lower() == "default": - if _default_azure_ad_token_provider is None: - from azure.identity import DefaultAzureCredential, get_bearer_token_provider - - _default_azure_ad_token_provider = get_bearer_token_provider( - DefaultAzureCredential(), "https://cognitiveservices.azure.com/.default" - ) - _kwargs["azure_ad_token_provider"] = _default_azure_ad_token_provider - return AzureOpenAIChatCompletionClient(**_kwargs) # type: ignore - else: - raise ValueError(f"Unknown OAI provider '{_provider}'") - # Convert UserContent to a string def message_content_to_str( diff --git a/python/packages/autogen-magentic-one/tests/headless_web_surfer/test_web_surfer.py b/python/packages/autogen-magentic-one/tests/headless_web_surfer/test_web_surfer.py index b9a7d4ceeb41..140bc82f2891 100644 --- a/python/packages/autogen-magentic-one/tests/headless_web_surfer/test_web_surfer.py +++ b/python/packages/autogen-magentic-one/tests/headless_web_surfer/test_web_surfer.py @@ -1,6 +1,7 @@ #!/usr/bin/env python3 -m pytest import asyncio +import json import os import re from json import dumps @@ -12,6 +13,7 @@ from autogen_core.models import ( UserMessage, ) +from autogen_core.models._model_client import ChatCompletionClient from autogen_core.tools._base import ToolSchema from autogen_magentic_one.agents.multimodal_web_surfer import MultimodalWebSurfer from autogen_magentic_one.agents.multimodal_web_surfer.tool_definitions import ( @@ -25,11 +27,6 @@ from autogen_magentic_one.agents.orchestrator import RoundRobinOrchestrator from autogen_magentic_one.agents.user_proxy import UserProxy from autogen_magentic_one.messages import BroadcastMessage -from autogen_magentic_one.utils import ( - ENVIRON_KEY_CHAT_COMPLETION_KWARGS_JSON, - ENVIRON_KEY_CHAT_COMPLETION_PROVIDER, - create_completion_client_from_env, -) from conftest import MOCK_CHAT_COMPLETION_KWARGS, reason from openai import AuthenticationError @@ -57,7 +54,7 @@ # Search currently does not require an API key skip_bing = False -if os.getenv(ENVIRON_KEY_CHAT_COMPLETION_KWARGS_JSON): +if os.getenv("CHAT_COMPLETION_CLIENT_CONFIG"): skip_openai = False else: skip_openai = True @@ -99,14 +96,10 @@ async def make_browser_request(browser: MultimodalWebSurfer, tool: ToolSchema, a @pytest.mark.skip(reason="Need to fix this test to use a local website instead of a public one.") @pytest.mark.asyncio async def test_web_surfer() -> None: - env = { - ENVIRON_KEY_CHAT_COMPLETION_PROVIDER: "openai", - ENVIRON_KEY_CHAT_COMPLETION_KWARGS_JSON: MOCK_CHAT_COMPLETION_KWARGS, - } - runtime = SingleThreadedAgentRuntime() # Create an appropriate client - client = create_completion_client_from_env(env) + config = {"provider": "OpenAIChatCompletionClient", "config": json.loads(MOCK_CHAT_COMPLETION_KWARGS)} + client = ChatCompletionClient.load_component(config) # Register agents. @@ -183,7 +176,7 @@ async def test_web_surfer_oai() -> None: runtime = SingleThreadedAgentRuntime() # Create an appropriate client - client = create_completion_client_from_env() + client = ChatCompletionClient.load_component(json.loads(os.environ["CHAT_COMPLETION_CLIENT_CONFIG"])) # Register agents. await MultimodalWebSurfer.register( @@ -247,14 +240,10 @@ async def test_web_surfer_oai() -> None: ) @pytest.mark.asyncio async def test_web_surfer_bing() -> None: - env = { - ENVIRON_KEY_CHAT_COMPLETION_PROVIDER: "openai", - ENVIRON_KEY_CHAT_COMPLETION_KWARGS_JSON: MOCK_CHAT_COMPLETION_KWARGS, - } - runtime = SingleThreadedAgentRuntime() # Create an appropriate client - client = create_completion_client_from_env(env) + config = {"provider": "OpenAIChatCompletionClient", "config": json.loads(MOCK_CHAT_COMPLETION_KWARGS)} + client = ChatCompletionClient.load_component(config) # Register agents. await MultimodalWebSurfer.register( From 50b1721d159a3eadf75618a39cde917d6f4da837 Mon Sep 17 00:00:00 2001 From: Jack Gerrits Date: Wed, 8 Jan 2025 09:38:08 -0500 Subject: [PATCH 24/61] Only check diffs not project for codecov (#4922) * Only check diffs not project * disable pr comment * Update codecov.yml to remove comments --- codecov.yml | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 codecov.yml diff --git a/codecov.yml b/codecov.yml new file mode 100644 index 000000000000..1c76b01a147d --- /dev/null +++ b/codecov.yml @@ -0,0 +1,3 @@ +coverage: + status: + project: off From b06ff9d5d6cfca2f21e6982e37ad97600731f52d Mon Sep 17 00:00:00 2001 From: Eric Zhu Date: Wed, 8 Jan 2025 09:48:37 -0800 Subject: [PATCH 25/61] Fix agent and agent runtime in Core doc (#4943) --- .../framework/agent-and-agent-runtime.ipynb | 27 ++++++++++--------- 1 file changed, 14 insertions(+), 13 deletions(-) diff --git a/python/packages/autogen-core/docs/src/user-guide/core-user-guide/framework/agent-and-agent-runtime.ipynb b/python/packages/autogen-core/docs/src/user-guide/core-user-guide/framework/agent-and-agent-runtime.ipynb index f1de10bf494d..b704944b835b 100644 --- a/python/packages/autogen-core/docs/src/user-guide/core-user-guide/framework/agent-and-agent-runtime.ipynb +++ b/python/packages/autogen-core/docs/src/user-guide/core-user-guide/framework/agent-and-agent-runtime.ipynb @@ -51,8 +51,9 @@ "## Implementing an Agent\n", "\n", "To implement an agent, the developer must subclass the {py:class}`~autogen_core.RoutedAgent` class\n", - "and implement the {py:meth}`~autogen_core.RoutedAgent.on_message_impl` method.\n", - "This method is invoked when the agent receives a message. For example,\n", + "and implement a message handler method for each message type the agent is expected to handle using\n", + "the {py:meth}`~autogen_core.message_handler` decorator.\n", + "For example,\n", "the following agent handles a simple message type `MyMessageType` and prints the message it receives:" ] }, @@ -85,7 +86,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "This agent only handles `MyMessageType` and messages will be delivered to `handle_my_message_type` method. Developers can have multiple message handlers for different message types by using `@message_handler` decorator and setting the type hint for the `message` variable in the handler function. You can also leverage [python typing union](https://docs.python.org/3/library/typing.html#typing.Union) for the `message` variable in one message handler function if it better suits agent's logic.\n", + "This agent only handles `MyMessageType` and messages will be delivered to `handle_my_message_type` method. Developers can have multiple message handlers for different message types by using {py:meth}`~autogen_core.message_handler` decorator and setting the type hint for the `message` variable in the handler function. You can also leverage [python typing union](https://docs.python.org/3/library/typing.html#typing.Union) for the `message` variable in one message handler function if it better suits agent's logic.\n", "See the next section on [message and communication](./message-and-communication.ipynb)." ] }, @@ -192,17 +193,17 @@ "source": [ "## Running the Single-Threaded Agent Runtime\n", "\n", - "The above code snippet uses `runtime.start()` to start a background task\n", + "The above code snippet uses {py:meth}`~autogen_core.SingleThreadedAgentRuntime.start` to start a background task\n", "to process and deliver messages to recepients' message handlers.\n", "This is a feature of the\n", "local embedded runtime {py:class}`~autogen_core.SingleThreadedAgentRuntime`.\n", "\n", - "To stop the background task immediately, use the `stop()` method:" + "To stop the background task immediately, use the {py:meth}`~autogen_core.SingleThreadedAgentRuntime.stop` method:" ] }, { "cell_type": "code", - "execution_count": 6, + "execution_count": 4, "metadata": {}, "outputs": [], "source": [ @@ -216,18 +217,18 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "You can resume the background task by calling `start()` again.\n", + "You can resume the background task by calling {py:meth}`~autogen_core.SingleThreadedAgentRuntime.start` again.\n", "\n", "For batch scenarios such as running benchmarks for evaluating agents,\n", "you may want to wait for the background task to stop automatically when\n", "there are no unprocessed messages and no agent is handling messages --\n", "the batch may considered complete.\n", - "You can achieve this by using the `stop_when_idle()` method:" + "You can achieve this by using the {py:meth}`~autogen_core.SingleThreadedAgentRuntime.stop_when_idle` method:" ] }, { "cell_type": "code", - "execution_count": 7, + "execution_count": 5, "metadata": {}, "outputs": [], "source": [ @@ -240,16 +241,16 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "You can also directly process messages one-by-one without a background task using:" + "To close the runtime and release resources, use the {py:meth}`~autogen_core.SingleThreadedAgentRuntime.close` method:" ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 6, "metadata": {}, "outputs": [], "source": [ - "await runtime.process_next()" + "await runtime.close()" ] }, { @@ -276,7 +277,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.11.11" + "version": "3.11.5" } }, "nbformat": 4, From 9ff1ddae5911fae59d6f203abc9cd5c51acb6444 Mon Sep 17 00:00:00 2001 From: Jack Gerrits Date: Wed, 8 Jan 2025 12:59:05 -0500 Subject: [PATCH 26/61] Update cookiecutter instructions (#4945) --- python/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/README.md b/python/README.md index f17a7288d924..53859a0132b7 100644 --- a/python/README.md +++ b/python/README.md @@ -61,7 +61,7 @@ Alternatively, you can run all the checks with: To create a new package, similar to `autogen-core` or `autogen-chat`, use the following: ```sh -uv sync +uv sync --python 3.12 source .venv/bin/activate cookiecutter ./templates/new-package/ ``` From 08addac2d42e1a076950ae99f321fd43e61e2e52 Mon Sep 17 00:00:00 2001 From: Jack Gerrits Date: Wed, 8 Jan 2025 13:11:23 -0500 Subject: [PATCH 27/61] Remove wording override of switcher (#4939) --- .../src/_static/override-switcher-button.js | 43 ------------------- python/packages/autogen-core/docs/src/conf.py | 2 +- 2 files changed, 1 insertion(+), 44 deletions(-) delete mode 100644 python/packages/autogen-core/docs/src/_static/override-switcher-button.js diff --git a/python/packages/autogen-core/docs/src/_static/override-switcher-button.js b/python/packages/autogen-core/docs/src/_static/override-switcher-button.js deleted file mode 100644 index 3d74310b6d13..000000000000 --- a/python/packages/autogen-core/docs/src/_static/override-switcher-button.js +++ /dev/null @@ -1,43 +0,0 @@ -// When body is ready -document.addEventListener('DOMContentLoaded', async function() { - - const styles = ` - #bd-header-version-warning { - display: none !important; - } - `; - - // Fetch version list - // https://raw.githubusercontent.com/microsoft/autogen/refs/heads/main/docs/switcher.json - const response = await fetch('https://raw.githubusercontent.com/microsoft/autogen/refs/heads/main/docs/switcher.json'); - const data = await response.json(); - - // Find the entry where preferred is true - const preferred = data.find(entry => entry.preferred); - if (preferred) { - // Get current rendered version - const currentVersion = DOCUMENTATION_OPTIONS.VERSION; - const urlVersionPath = DOCUMENTATION_OPTIONS.theme_switcher_version_match; - // The version compare library seems to not like the dev suffix without - so we're going to do an exact match and hide the banner if so - // For the "dev" version which is always latest we don't want to consider hiding the banner - if ((currentVersion === preferred.version) && (urlVersionPath !== "dev")) { - // Hide the banner with id bd-header-version-warning - const styleSheet = document.createElement("style"); - styleSheet.textContent = styles; - document.head.appendChild(styleSheet); - return; - } - } - - // TODO: Please find a better way to override the button text... - setTimeout(async function() { - // Get the button with class "pst-button-link-to-stable-version". There is only one. - var button = document.querySelector('.pst-button-link-to-stable-version'); - if (!button) { - // If the button is not found, return. - return; - } - // Set the button's text to "Switch to latest dev release" - button.textContent = "Switch to latest dev release"; - }, 500); -}); diff --git a/python/packages/autogen-core/docs/src/conf.py b/python/packages/autogen-core/docs/src/conf.py index 3b5181fc602c..7aa75ebd1acc 100644 --- a/python/packages/autogen-core/docs/src/conf.py +++ b/python/packages/autogen-core/docs/src/conf.py @@ -131,7 +131,7 @@ ] } -html_js_files = ["custom-icon.js", "override-switcher-button.js"] +html_js_files = ["custom-icon.js"] html_sidebars = { "packages/index": [], "user-guide/core-user-guide/**": ["sidebar-nav-bs-core"], From 30cbbf7cadbe014bbf6fa58931a96470515fd389 Mon Sep 17 00:00:00 2001 From: Eric Zhu Date: Wed, 8 Jan 2025 10:52:33 -0800 Subject: [PATCH 28/61] Add pip install for magentic one and studio to homepage; update doc link (#4946) * Add pip install for magentic one and studio to homepage; update doc link * Update links --- python/packages/autogen-agentchat/README.md | 2 +- python/packages/autogen-core/README.md | 2 +- python/packages/autogen-core/docs/src/index.md | 11 +++++++---- python/packages/autogen-ext/README.md | 2 ++ 4 files changed, 11 insertions(+), 6 deletions(-) diff --git a/python/packages/autogen-agentchat/README.md b/python/packages/autogen-agentchat/README.md index ed698b1f3898..07fa5d8c5208 100644 --- a/python/packages/autogen-agentchat/README.md +++ b/python/packages/autogen-agentchat/README.md @@ -1,3 +1,3 @@ # AutoGen AgentChat -- [Documentation](https://microsoft.github.io/autogen/dev/user-guide/agentchat-user-guide/index.html) +- [Documentation](https://microsoft.github.io/autogen/stable/user-guide/agentchat-user-guide/index.html) diff --git a/python/packages/autogen-core/README.md b/python/packages/autogen-core/README.md index 722623b2e2d5..b09e22b90a4e 100644 --- a/python/packages/autogen-core/README.md +++ b/python/packages/autogen-core/README.md @@ -1,3 +1,3 @@ # AutoGen Core -- [Documentation](https://microsoft.github.io/autogen/dev/user-guide/core-user-guide/index.html) +- [Documentation](https://microsoft.github.io/autogen/stable/user-guide/core-user-guide/index.html) diff --git a/python/packages/autogen-core/docs/src/index.md b/python/packages/autogen-core/docs/src/index.md index e729679e055c..0af7d1079ee3 100644 --- a/python/packages/autogen-core/docs/src/index.md +++ b/python/packages/autogen-core/docs/src/index.md @@ -54,12 +54,14 @@ A framework for building AI agents and applications
{fas}`book;pst-color-primary` -Magentic-One
+Magentic-One [![PyPi magentic-one](https://img.shields.io/badge/PyPi-magentic--one-blue?logo=pypi)](https://pypi.org/project/magentic-one/) + A multi-agent assistant for web and file-based tasks. Built on AgentChat. ```bash -% m1 "Find flights from Seattle to Paris and format the result in a table" +pip install magentic-one +m1 "Find flights from Seattle to Paris and format the result in a table" ``` +++ @@ -72,7 +74,7 @@ Get Started ::: -:::{grid-item-card} {fas}`palette;pst-color-primary` Studio [![PyPi autogenstudio](https://img.shields.io/badge/PyPi-autogen--studio-blue?logo=pypi)](https://pypi.org/project/autogenstudio/) +:::{grid-item-card} {fas}`palette;pst-color-primary` Studio [![PyPi autogenstudio](https://img.shields.io/badge/PyPi-autogenstudio-blue?logo=pypi)](https://pypi.org/project/autogenstudio/) :shadow: none :margin: 2 0 0 0 :columns: 12 12 6 6 @@ -81,7 +83,8 @@ An app for prototyping and managing agents without writing code. Built on AgentChat. ```bash -% autogenstudio ui --port 8080 +pip install autogenstudio +autogenstudio ui --port 8080 ``` +++ diff --git a/python/packages/autogen-ext/README.md b/python/packages/autogen-ext/README.md index 2291ed9c23a7..402c411e224f 100644 --- a/python/packages/autogen-ext/README.md +++ b/python/packages/autogen-ext/README.md @@ -1 +1,3 @@ # autogen-ext + +[Documentation](https://microsoft.github.io/autogen/stable/user-guide/extensions-user-guide/index.html) From a427b38000ea5087ba7e2b725f198b0ea7c3d34e Mon Sep 17 00:00:00 2001 From: Jack Gerrits Date: Wed, 8 Jan 2025 14:48:09 -0500 Subject: [PATCH 29/61] Add stable website dir, to be updated (#4948) * Add stable * stable redirect --- .github/workflows/docs.yml | 1 + python/packages/autogen-core/docs/redirects/redirect_urls.txt | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml index 876b39496ded..0afb1745ba10 100644 --- a/.github/workflows/docs.yml +++ b/.github/workflows/docs.yml @@ -33,6 +33,7 @@ jobs: [ # For main use the workflow target { ref: "${{github.ref}}", dest-dir: dev, uv-version: "0.5.13" }, + { ref: "${{github.ref}}", dest-dir: stable, uv-version: "0.5.13" }, { ref: "v0.4.0.dev0", dest-dir: "0.4.0.dev0", uv-version: "0.5.11" }, { ref: "v0.4.0.dev1", dest-dir: "0.4.0.dev1", uv-version: "0.5.11" }, { ref: "v0.4.0.dev2", dest-dir: "0.4.0.dev2", uv-version: "0.5.11" }, diff --git a/python/packages/autogen-core/docs/redirects/redirect_urls.txt b/python/packages/autogen-core/docs/redirects/redirect_urls.txt index 779023764a1c..6d1b0c9c7ce5 100644 --- a/python/packages/autogen-core/docs/redirects/redirect_urls.txt +++ b/python/packages/autogen-core/docs/redirects/redirect_urls.txt @@ -1,4 +1,4 @@ -/autogen/,/autogen/0.2/ +/autogen/,/autogen/stable/ /autogen/docs/Getting-Started,/autogen/0.2/docs/Getting-Started /autogen/docs/installation/,/autogen/0.2/docs/installation/ /autogen/docs/tutorial/introduction,/autogen/0.2/docs/tutorial/introduction From 7131dc945d93831069e20fc8b984a52305da791b Mon Sep 17 00:00:00 2001 From: afourney Date: Wed, 8 Jan 2025 14:05:08 -0800 Subject: [PATCH 30/61] Added m1 cli package (#4949) * Added m1 cli package * update CI, install card, deprecations * Update python/packages/magentic-one-cli/pyproject.toml * fix mypy and pyright * add package * Suppress 'ResourceWarning: unclosed socket' --------- Co-authored-by: Jack Gerrits --- .github/workflows/checks.yml | 6 ++- .github/workflows/single-python-package.yml | 1 + .../packages/autogen-core/docs/src/index.md | 2 +- python/packages/autogen-ext/pyproject.toml | 5 +- .../models/openai/_openai_client.py | 12 ++--- python/packages/magentic-one-cli/LICENSE-CODE | 21 +++++++++ python/packages/magentic-one-cli/README.md | 1 + .../packages/magentic-one-cli/pyproject.toml | 47 +++++++++++++++++++ .../src/magentic_one_cli/__init__.py | 0 .../src/magentic_one_cli/__main__.py | 3 ++ .../src/magentic_one_cli/_m1.py} | 5 +- .../src/magentic_one_cli/py.typed | 0 python/pyproject.toml | 3 +- python/uv.lock | 21 +++++++++ 14 files changed, 112 insertions(+), 15 deletions(-) create mode 100644 python/packages/magentic-one-cli/LICENSE-CODE create mode 100644 python/packages/magentic-one-cli/README.md create mode 100644 python/packages/magentic-one-cli/pyproject.toml create mode 100644 python/packages/magentic-one-cli/src/magentic_one_cli/__init__.py create mode 100644 python/packages/magentic-one-cli/src/magentic_one_cli/__main__.py rename python/packages/{autogen-ext/src/autogen_ext/teams/magentic_one_cli.py => magentic-one-cli/src/magentic_one_cli/_m1.py} (90%) create mode 100644 python/packages/magentic-one-cli/src/magentic_one_cli/py.typed diff --git a/.github/workflows/checks.yml b/.github/workflows/checks.yml index 347e3f7042f0..34d71e3b2996 100644 --- a/.github/workflows/checks.yml +++ b/.github/workflows/checks.yml @@ -58,6 +58,7 @@ jobs: "./packages/agbench", "./packages/autogen-ext", "./packages/autogen-agentchat", + "./packages/magentic-one-cli", ] steps: - uses: actions/checkout@v4 @@ -86,6 +87,7 @@ jobs: "./packages/agbench", "./packages/autogen-ext", "./packages/autogen-agentchat", + "./packages/magentic-one-cli", ] steps: - uses: actions/checkout@v4 @@ -132,7 +134,7 @@ jobs: source ${{ github.workspace }}/python/.venv/bin/activate poe --directory ${{ matrix.package }} test working-directory: ./python - + codecov: runs-on: ubuntu-latest strategy: @@ -213,7 +215,7 @@ jobs: source ${{ github.workspace }}/python/.venv/bin/activate poe --directory ${{ matrix.package }} docs-check-examples working-directory: ./python - + samples-code-check: runs-on: ubuntu-latest steps: diff --git a/.github/workflows/single-python-package.yml b/.github/workflows/single-python-package.yml index b4657a5f5163..1c73ceb35bb0 100644 --- a/.github/workflows/single-python-package.yml +++ b/.github/workflows/single-python-package.yml @@ -14,6 +14,7 @@ on: - agbench - autogen-magentic-one - autogen-studio + - magentic-one-cli ref: description: 'Tag to deploy' required: true diff --git a/python/packages/autogen-core/docs/src/index.md b/python/packages/autogen-core/docs/src/index.md index 0af7d1079ee3..469f697f822f 100644 --- a/python/packages/autogen-core/docs/src/index.md +++ b/python/packages/autogen-core/docs/src/index.md @@ -60,7 +60,7 @@ A multi-agent assistant for web and file-based tasks. Built on AgentChat. ```bash -pip install magentic-one +pip install magentic-one-cli m1 "Find flights from Seattle to Paris and format the result in a table" ``` diff --git a/python/packages/autogen-ext/pyproject.toml b/python/packages/autogen-ext/pyproject.toml index a4e7d0d9f1e2..8b972e98b8b0 100644 --- a/python/packages/autogen-ext/pyproject.toml +++ b/python/packages/autogen-ext/pyproject.toml @@ -18,14 +18,11 @@ dependencies = [ "autogen-core==0.4.0.dev13", ] -[project.scripts] -m1 = "autogen_ext.teams.magentic_one_cli:main" - [project.optional-dependencies] langchain = ["langchain_core~= 0.3.3"] azure = ["azure-core", "azure-identity"] docker = ["docker~=7.0"] -openai = ["openai>=1.52.2", "aiofiles"] +openai = ["openai>=1.52.2", "tiktoken>=0.8.0", "aiofiles"] file-surfer = [ "autogen-agentchat==0.4.0.dev13", "markitdown>=0.0.1a2", diff --git a/python/packages/autogen-ext/src/autogen_ext/models/openai/_openai_client.py b/python/packages/autogen-ext/src/autogen_ext/models/openai/_openai_client.py index 31db4974bbb0..0a811dacce83 100644 --- a/python/packages/autogen-ext/src/autogen_ext/models/openai/_openai_client.py +++ b/python/packages/autogen-ext/src/autogen_ext/models/openai/_openai_client.py @@ -409,14 +409,14 @@ async def create( # TODO: allow custom handling. # For now we raise an error if images are present and vision is not supported - if self.capabilities["vision"] is False: + if self.model_info["vision"] is False: for message in messages: if isinstance(message, UserMessage): if isinstance(message.content, list) and any(isinstance(x, Image) for x in message.content): raise ValueError("Model does not support vision and image was provided") if json_output is not None: - if self.capabilities["json_output"] is False and json_output is True: + if self.model_info["json_output"] is False and json_output is True: raise ValueError("Model does not support JSON output") if json_output is True: @@ -424,13 +424,13 @@ async def create( else: create_args["response_format"] = {"type": "text"} - if self.capabilities["json_output"] is False and json_output is True: + if self.model_info["json_output"] is False and json_output is True: raise ValueError("Model does not support JSON output") oai_messages_nested = [to_oai_type(m) for m in messages] oai_messages = [item for sublist in oai_messages_nested for item in sublist] - if self.capabilities["function_calling"] is False and len(tools) > 0: + if self.model_info["function_calling"] is False and len(tools) > 0: raise ValueError("Model does not support function calling") future: Union[Task[ParsedChatCompletion[BaseModel]], Task[ChatCompletion]] if len(tools) > 0: @@ -622,14 +622,14 @@ async def create_stream( # TODO: allow custom handling. # For now we raise an error if images are present and vision is not supported - if self.capabilities["vision"] is False: + if self.model_info["vision"] is False: for message in messages: if isinstance(message, UserMessage): if isinstance(message.content, list) and any(isinstance(x, Image) for x in message.content): raise ValueError("Model does not support vision and image was provided") if json_output is not None: - if self.capabilities["json_output"] is False and json_output is True: + if self.model_info["json_output"] is False and json_output is True: raise ValueError("Model does not support JSON output") if json_output is True: diff --git a/python/packages/magentic-one-cli/LICENSE-CODE b/python/packages/magentic-one-cli/LICENSE-CODE new file mode 100644 index 000000000000..9e841e7a26e4 --- /dev/null +++ b/python/packages/magentic-one-cli/LICENSE-CODE @@ -0,0 +1,21 @@ + MIT License + + Copyright (c) Microsoft Corporation. + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in all + copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + SOFTWARE diff --git a/python/packages/magentic-one-cli/README.md b/python/packages/magentic-one-cli/README.md new file mode 100644 index 000000000000..ccc319776481 --- /dev/null +++ b/python/packages/magentic-one-cli/README.md @@ -0,0 +1 @@ +# magentic-one-cli diff --git a/python/packages/magentic-one-cli/pyproject.toml b/python/packages/magentic-one-cli/pyproject.toml new file mode 100644 index 000000000000..b5db07ed79e5 --- /dev/null +++ b/python/packages/magentic-one-cli/pyproject.toml @@ -0,0 +1,47 @@ +[build-system] +requires = ["hatchling"] +build-backend = "hatchling.build" + +[project] +name = "magentic-one-cli" +version = "0.1.0" +license = {file = "LICENSE-CODE"} +description = "Magentic-One is a generalist multi-agent system, built on `AutoGen-AgentChat`, for solving complex web and file-based tasks. This package installs the `m1` command-line utility to quickly get started with Magentic-One." +readme = "README.md" +requires-python = ">=3.10" +classifiers = [ + "Programming Language :: Python :: 3", + "License :: OSI Approved :: MIT License", + "Operating System :: OS Independent", +] +dependencies = [ + "autogen-agentchat", #>=0.4.0<0.5 + "autogen-ext[openai,magentic-one]", #>=0.4.0<0.5 +] + +[project.scripts] +m1 = "magentic_one_cli._m1:main" + +[dependency-groups] +dev = [] + + +[tool.ruff] +extend = "../../pyproject.toml" +include = ["src/**", "tests/*.py"] + +[tool.pyright] +extends = "../../pyproject.toml" +include = ["src"] + +[tool.pytest.ini_options] +minversion = "6.0" +testpaths = ["tests"] + +[tool.poe] +include = "../../shared_tasks.toml" + +[tool.poe.tasks] +mypy = "mypy --config-file $POE_ROOT/../../pyproject.toml src" +test = "true" +coverage = "true" diff --git a/python/packages/magentic-one-cli/src/magentic_one_cli/__init__.py b/python/packages/magentic-one-cli/src/magentic_one_cli/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/python/packages/magentic-one-cli/src/magentic_one_cli/__main__.py b/python/packages/magentic-one-cli/src/magentic_one_cli/__main__.py new file mode 100644 index 000000000000..44220c725c7b --- /dev/null +++ b/python/packages/magentic-one-cli/src/magentic_one_cli/__main__.py @@ -0,0 +1,3 @@ +from ._m1 import main + +main() diff --git a/python/packages/autogen-ext/src/autogen_ext/teams/magentic_one_cli.py b/python/packages/magentic-one-cli/src/magentic_one_cli/_m1.py similarity index 90% rename from python/packages/autogen-ext/src/autogen_ext/teams/magentic_one_cli.py rename to python/packages/magentic-one-cli/src/magentic_one_cli/_m1.py index 97abac5933b7..d2698d23f598 100644 --- a/python/packages/autogen-ext/src/autogen_ext/teams/magentic_one_cli.py +++ b/python/packages/magentic-one-cli/src/magentic_one_cli/_m1.py @@ -1,11 +1,14 @@ import argparse import asyncio +import warnings from autogen_agentchat.ui import Console - from autogen_ext.models.openai import OpenAIChatCompletionClient from autogen_ext.teams.magentic_one import MagenticOne +# Suppress warnings about the requests.Session() not being closed +warnings.filterwarnings(action="ignore", message="unclosed", category=ResourceWarning) + def main() -> None: """ diff --git a/python/packages/magentic-one-cli/src/magentic_one_cli/py.typed b/python/packages/magentic-one-cli/src/magentic_one_cli/py.typed new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/python/pyproject.toml b/python/pyproject.toml index f3b5c9453aad..56e7c1f61274 100644 --- a/python/pyproject.toml +++ b/python/pyproject.toml @@ -31,9 +31,10 @@ autogen-agentchat = { workspace = true } autogen-core = { workspace = true } autogen-ext = { workspace = true } autogen-magentic-one = { workspace = true } -autogenstudio = { workspace = true } autogen-test-utils = { workspace = true } +autogenstudio = { workspace = true } component-schema-gen = { workspace = true } +magentic-one-cli = { workspace = true } [tool.ruff] line-length = 120 diff --git a/python/uv.lock b/python/uv.lock index 9f8ed0980e56..330f570ef432 100644 --- a/python/uv.lock +++ b/python/uv.lock @@ -26,6 +26,7 @@ members = [ "autogen-test-utils", "autogenstudio", "component-schema-gen", + "magentic-one-cli", ] [manifest.dependency-groups] @@ -509,6 +510,7 @@ magentic-one = [ openai = [ { name = "aiofiles" }, { name = "openai" }, + { name = "tiktoken" }, ] video-surfer = [ { name = "autogen-agentchat" }, @@ -553,6 +555,7 @@ requires-dist = [ { name = "pillow", marker = "extra == 'web-surfer'", specifier = ">=11.0.0" }, { name = "playwright", marker = "extra == 'magentic-one'", specifier = ">=1.48.0" }, { name = "playwright", marker = "extra == 'web-surfer'", specifier = ">=1.48.0" }, + { name = "tiktoken", marker = "extra == 'openai'", specifier = ">=0.8.0" }, ] [package.metadata.requires-dev] @@ -2463,6 +2466,24 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/ba/b2/6a22fb5c0885da3b00e116aee81f0b829ec9ac8f736cd414b4a09413fc7d/lxml-5.3.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:6e91cf736959057f7aac7adfc83481e03615a8e8dd5758aa1d95ea69e8931dba", size = 3487557 }, ] +[[package]] +name = "magentic-one-cli" +version = "0.1.0" +source = { editable = "packages/magentic-one-cli" } +dependencies = [ + { name = "autogen-agentchat" }, + { name = "autogen-ext", extra = ["magentic-one", "openai"] }, +] + +[package.metadata] +requires-dist = [ + { name = "autogen-agentchat", editable = "packages/autogen-agentchat" }, + { name = "autogen-ext", extras = ["openai", "magentic-one"], editable = "packages/autogen-ext" }, +] + +[package.metadata.requires-dev] +dev = [] + [[package]] name = "mako" version = "1.3.6" From 318820e5ed6b98c01b6698bf9a0c5ab5d98e966e Mon Sep 17 00:00:00 2001 From: Eric Zhu Date: Wed, 8 Jan 2025 15:18:08 -0800 Subject: [PATCH 31/61] "magentic one" --> "magentic one cli" on landing page (#4951) --- python/packages/autogen-core/docs/src/index.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/python/packages/autogen-core/docs/src/index.md b/python/packages/autogen-core/docs/src/index.md index 469f697f822f..763da49b622a 100644 --- a/python/packages/autogen-core/docs/src/index.md +++ b/python/packages/autogen-core/docs/src/index.md @@ -54,9 +54,9 @@ A framework for building AI agents and applications
{fas}`book;pst-color-primary` -Magentic-One [![PyPi magentic-one](https://img.shields.io/badge/PyPi-magentic--one-blue?logo=pypi)](https://pypi.org/project/magentic-one/) +Magentic-One CLI [![PyPi magentic-one-cli](https://img.shields.io/badge/PyPi-magentic--one--cli-blue?logo=pypi)](https://pypi.org/project/magentic-one-cli/)
-A multi-agent assistant for web and file-based tasks. +A console-based multi-agent assistant for web and file-based tasks. Built on AgentChat. ```bash From 903305e810286e82ef94846f3740861e3659ec55 Mon Sep 17 00:00:00 2001 From: Eric Zhu Date: Wed, 8 Jan 2025 15:24:12 -0800 Subject: [PATCH 32/61] Add tutorial index page; improve installation pages; improve Core tutorial to mention how to use AgentChat agent in Core. (#4950) --- .../user-guide/agentchat-user-guide/index.md | 9 +- .../agentchat-user-guide/installation.md | 17 +-- .../agentchat-user-guide/tutorial/index.md | 72 ++++++++++++ .../framework/agent-and-agent-runtime.ipynb | 104 ++++++++++++++---- .../core-user-guide/installation.md | 71 ++++++++++++ 5 files changed, 235 insertions(+), 38 deletions(-) create mode 100644 python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/index.md diff --git a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/index.md b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/index.md index 46f526c6c3aa..9754bd8ca5ed 100644 --- a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/index.md +++ b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/index.md @@ -31,19 +31,19 @@ How to install AgentChat Build your first agent ::: -:::{grid-item-card} {fas}`graduation-cap;pst-color-primary` Tutorial -:link: ./tutorial/models.html +:::{grid-item-card} {fas}`school;pst-color-primary` Tutorial +:link: ./tutorial/index.html Step-by-step guide to using AgentChat, learn about agents, teams, and more ::: -:::{grid-item-card} {fas}`book;pst-color-primary` Selector Group Chat +:::{grid-item-card} {fas}`sitemap;pst-color-primary` Selector Group Chat :link: ./selector-group-chat.html Multi-agent coordination through a shared context and centralized, customizable selector ::: -:::{grid-item-card} {fas}`book;pst-color-primary` Swarm +:::{grid-item-card} {fas}`dove;pst-color-primary` Swarm :link: ./swarm.html Multi-agent coordination through a shared context and localized, tool-based selector @@ -82,6 +82,7 @@ migration-guide :hidden: :caption: Tutorial +tutorial/index tutorial/models tutorial/messages tutorial/agents diff --git a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/installation.md b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/installation.md index ce6fecb0b2ff..b55fcccd54ca 100644 --- a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/installation.md +++ b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/installation.md @@ -38,7 +38,7 @@ deactivate Create and activate: ```bash -conda create -n autogen python=3.10 +conda create -n autogen python=3.12 conda activate autogen ``` @@ -77,15 +77,8 @@ extensions: pip install "autogen-ext[openai]==0.4.0.dev13" ``` -## Install Docker for Code Execution +If you are using Azure OpenAI with AAD authentication, you need to install the following: -We recommend using Docker for code execution. -To install Docker, follow the instructions for your operating system on the [Docker website](https://docs.docker.com/get-docker/). - -A simple example of how to use Docker for code execution is shown below: - - - -To learn more about agents that execute code, see the [agents tutorial](./tutorial/agents.ipynb). +```bash +pip install "autogen-ext[azure]==0.4.0.dev13" +``` diff --git a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/index.md b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/index.md new file mode 100644 index 000000000000..abb8bc72f291 --- /dev/null +++ b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/index.md @@ -0,0 +1,72 @@ +--- +myst: + html_meta: + "description lang=en": | + Tutorial for AgentChat, a high-level API for AutoGen +--- + +# Introduction + +This tutorial provides a step-by-step guide to using AgentChat. +Make sure you have first followed the [installation instructions](../installation.md) +to prepare your environment. + +At any point you are stuck, feel free to ask for help on +[GitHub Discussions](https://github.com/microsoft/autogen/discussions) +or [Discord](https://aka.ms/autogen-discord). + +```{note} +If you are coming from AutoGen v0.2, please read the [migration guide](../migration-guide.md). +``` + +::::{grid} 2 2 2 2 +:gutter: 3 + +:::{grid-item-card} {fas}`brain;pst-color-primary` Models +:link: ./models.html + +How to use LLM model clients +::: + +:::{grid-item-card} {fas}`envelope;pst-color-primary` Messages +:link: ./messages.html + +Understand the message types +::: + +:::{grid-item-card} {fas}`robot;pst-color-primary` Agents +:link: ./agents.html + +Work with AgentChat agents and get started with {py:class}`~autogen_agentchat.agents.AssistantAgent` +::: + +:::{grid-item-card} {fas}`sitemap;pst-color-primary` Teams +:link: ./teams.html + +Work with teams of agents and get started with {py:class}`~autogen_agentchat.teams.RoundRobinGroupChat`. +::: + +:::{grid-item-card} {fas}`person-chalkboard;pst-color-primary` Human-in-the-Loop +:link: ./human-in-the-loop.html + +Best practices for providing feedback to a team +::: + +:::{grid-item-card} {fas}`circle-stop;pst-color-primary` Termination +:link: ./termination.html + +Control a team using termination conditions +::: + +:::{grid-item-card} {fas}`code;pst-color-primary` Custom Agents +:link: ./custom-agents.html + +Create your own agents +::: + +:::{grid-item-card} {fas}`database;pst-color-primary` Managing State +:link: ./state.html + +Save and load agents and teams for persistent sessions +::: +:::: diff --git a/python/packages/autogen-core/docs/src/user-guide/core-user-guide/framework/agent-and-agent-runtime.ipynb b/python/packages/autogen-core/docs/src/user-guide/core-user-guide/framework/agent-and-agent-runtime.ipynb index b704944b835b..2c1b9179b7c5 100644 --- a/python/packages/autogen-core/docs/src/user-guide/core-user-guide/framework/agent-and-agent-runtime.ipynb +++ b/python/packages/autogen-core/docs/src/user-guide/core-user-guide/framework/agent-and-agent-runtime.ipynb @@ -7,9 +7,8 @@ "# Agent and Agent Runtime\n", "\n", "In this and the following section, we focus on the core concepts of AutoGen:\n", - "agents, agent runtime, messages, and communication.\n", - "You will not find any AI models or tools here, just the foundational\n", - "building blocks for building multi-agent applications.\n", + "agents, agent runtime, messages, and communication -- \n", + "the foundational building blocks for an multi-agent applications.\n", "\n", "```{note}\n", "The Core API is designed to be unopinionated and flexible. So at times, you\n", @@ -25,22 +24,31 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "An agent in AutoGen is an entity defined by the base class {py:class}`autogen_core.Agent`.\n", - "It has a unique identifier of the type {py:class}`autogen_core.AgentId`,\n", - "a metadata dictionary of the type {py:class}`autogen_core.AgentMetadata`,\n", + "An agent in AutoGen is an entity defined by the base interface {py:class}`~autogen_core.Agent`.\n", + "It has a unique identifier of the type {py:class}`~autogen_core.AgentId`,\n", + "a metadata dictionary of the type {py:class}`~autogen_core.AgentMetadata`.\n", "\n", - "and method for handling messages {py:meth}`autogen_core.BaseAgent.on_message_impl`. In most cases, you can subclass your agents from higher level class {py:class}`autogen_core.RoutedAgent` which enables you to route messages to corresponding message handler specified with {py:meth}`autogen_core.message_handler` decorator and proper type hint for the `message` variable.\n", + "In most cases, you can subclass your agents from higher level class {py:class}`~autogen_core.RoutedAgent` which enables you to route messages to corresponding message handler specified with {py:meth}`~autogen_core.message_handler` decorator and proper type hint for the `message` variable.\n", "An agent runtime is the execution environment for agents in AutoGen.\n", + "\n", "Similar to the runtime environment of a programming language,\n", "an agent runtime provides the necessary infrastructure to facilitate communication\n", "between agents, manage agent lifecycles, enforce security boundaries, and support monitoring and\n", "debugging.\n", + "\n", "For local development, developers can use {py:class}`~autogen_core.SingleThreadedAgentRuntime`,\n", "which can be embedded in a Python application.\n", "\n", "```{note}\n", "Agents are not directly instantiated and managed by application code.\n", "Instead, they are created by the runtime when needed and managed by the runtime.\n", + "\n", + "If you are already familiar with [AgentChat](../../agentchat-user-guide/index.md),\n", + "it is important to note that AgentChat's agents such as\n", + "{py:class}`~autogen_agentchat.agents.AssistantAgent` are created by application \n", + "and thus not directly managed by the runtime. To use an AgentChat agent in Core,\n", + "you need to create a wrapper Core agent that delegates messages to the AgentChat agent\n", + "and let the runtime manage the wrapper agent.\n", "```" ] }, @@ -59,7 +67,7 @@ }, { "cell_type": "code", - "execution_count": 1, + "execution_count": 11, "metadata": {}, "outputs": [], "source": [ @@ -79,7 +87,7 @@ "\n", " @message_handler\n", " async def handle_my_message_type(self, message: MyMessageType, ctx: MessageContext) -> None:\n", - " print(f\"Received message: {message.content}\") # type: ignore" + " print(f\"{self.id.type} received message: {message.content}\")" ] }, { @@ -90,6 +98,55 @@ "See the next section on [message and communication](./message-and-communication.ipynb)." ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Using an AgentChat Agent\n", + "\n", + "If you have an [AgentChat](../../agentchat-user-guide/index.md) agent and want to use it in the Core API, you can create\n", + "a wrapper {py:class}`~autogen_core.RoutedAgent` that delegates messages to the AgentChat agent.\n", + "The following example shows how to create a wrapper agent for the {py:class}`~autogen_agentchat.agents.AssistantAgent`\n", + "in AgentChat." + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "metadata": {}, + "outputs": [], + "source": [ + "from autogen_agentchat.agents import AssistantAgent\n", + "from autogen_agentchat.messages import TextMessage\n", + "from autogen_ext.models.openai import OpenAIChatCompletionClient\n", + "\n", + "\n", + "class MyAssistant(RoutedAgent):\n", + " def __init__(self, name: str) -> None:\n", + " super().__init__(name)\n", + " model_client = OpenAIChatCompletionClient(model=\"gpt-4o\")\n", + " self._delegate = AssistantAgent(name, model_client=model_client)\n", + "\n", + " @message_handler\n", + " async def handle_my_message_type(self, message: MyMessageType, ctx: MessageContext) -> None:\n", + " print(f\"{self.id.type} received message: {message.content}\")\n", + " response = await self._delegate.on_messages(\n", + " [TextMessage(content=message.content, source=\"user\")], ctx.cancellation_token\n", + " )\n", + " print(f\"{self.id.type} responded: {response.chat_message.content}\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "For how to use model client, see the [Model Client](./model-clients.ipynb) section.\n", + "\n", + "Since the Core API is unopinionated,\n", + "you are not required to use the AgentChat API to use the Core API.\n", + "You can implement your own agents or use another agent framework." + ] + }, { "cell_type": "markdown", "metadata": {}, @@ -106,7 +163,7 @@ "when they are needed.\n", "\n", "Agent type ({py:class}`~autogen_core.AgentType`) is not the same as the agent class. In this example,\n", - "the agent type is `AgentType(\"my_agent\")` and the agent class is the Python class `MyAgent`.\n", + "the agent type is `AgentType(\"my_agent\")` or `AgentType(\"my_assistant\")` and the agent class is the Python class `MyAgent` or `MyAssistantAgent`.\n", "The factory function is expected to return an instance of the agent class \n", "on which the {py:meth}`~autogen_core.BaseAgent.register` class method is invoked.\n", "Read [Agent Identity and Lifecycles](../core-concepts/agent-identity-and-lifecycle.md)\n", @@ -119,23 +176,23 @@ "can be used to create different instances of the same agent class.\n", "```\n", "\n", - "To register an agent type with the \n", + "To register our agent types with the \n", "{py:class}`~autogen_core.SingleThreadedAgentRuntime`,\n", "the following code can be used:" ] }, { "cell_type": "code", - "execution_count": 2, + "execution_count": 13, "metadata": {}, "outputs": [ { "data": { "text/plain": [ - "AgentType(type='my_agent')" + "AgentType(type='my_assistant')" ] }, - "execution_count": 2, + "execution_count": 13, "metadata": {}, "output_type": "execute_result" } @@ -144,7 +201,8 @@ "from autogen_core import SingleThreadedAgentRuntime\n", "\n", "runtime = SingleThreadedAgentRuntime()\n", - "await MyAgent.register(runtime, \"my_agent\", lambda: MyAgent())" + "await MyAgent.register(runtime, \"my_agent\", lambda: MyAgent())\n", + "await MyAssistant.register(runtime, \"my_assistant\", lambda: MyAssistant(\"my_assistant\"))" ] }, { @@ -159,21 +217,23 @@ }, { "cell_type": "code", - "execution_count": 3, + "execution_count": 14, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "Received message: Hello, World!\n" + "my_agent received message: Hello, World!\n", + "my_assistant received message: Hello, World!\n", + "my_assistant responded: Hello! How can I assist you today?\n" ] } ], "source": [ - "agent_id = AgentId(\"my_agent\", \"default\")\n", "runtime.start() # Start processing messages in the background.\n", - "await runtime.send_message(MyMessageType(\"Hello, World!\"), agent_id)\n", + "await runtime.send_message(MyMessageType(\"Hello, World!\"), AgentId(\"my_agent\", \"default\"))\n", + "await runtime.send_message(MyMessageType(\"Hello, World!\"), AgentId(\"my_assistant\", \"default\"))\n", "await runtime.stop() # Stop processing messages in the background." ] }, @@ -203,7 +263,7 @@ }, { "cell_type": "code", - "execution_count": 4, + "execution_count": 15, "metadata": {}, "outputs": [], "source": [ @@ -228,7 +288,7 @@ }, { "cell_type": "code", - "execution_count": 5, + "execution_count": 16, "metadata": {}, "outputs": [], "source": [ @@ -246,7 +306,7 @@ }, { "cell_type": "code", - "execution_count": 6, + "execution_count": 17, "metadata": {}, "outputs": [], "source": [ diff --git a/python/packages/autogen-core/docs/src/user-guide/core-user-guide/installation.md b/python/packages/autogen-core/docs/src/user-guide/core-user-guide/installation.md index ff468a1775db..8b7d0dc2dfeb 100644 --- a/python/packages/autogen-core/docs/src/user-guide/core-user-guide/installation.md +++ b/python/packages/autogen-core/docs/src/user-guide/core-user-guide/installation.md @@ -1,5 +1,53 @@ # Installation +## Create a Virtual Environment (optional) + +When installing AgentChat locally, we recommend using a virtual environment for the installation. This will ensure that the dependencies for AgentChat are isolated from the rest of your system. + +``````{tab-set} + +`````{tab-item} venv + +Create and activate: + +```bash +python3 -m venv .venv +source .venv/bin/activate +``` + +To deactivate later, run: + +```bash +deactivate +``` + +````` + +`````{tab-item} conda + +[Install Conda](https://docs.conda.io/projects/conda/en/stable/user-guide/install/index.html) if you have not already. + + +Create and activate: + +```bash +conda create -n autogen python=3.12 +conda activate autogen +``` + +To deactivate later, run: + +```bash +conda deactivate +``` + + +````` + + + +`````` + ## Install using pip Install the `autogen-core` package using pip: @@ -12,3 +60,26 @@ pip install "autogen-core==0.4.0.dev13" ```{note} Python 3.10 or later is required. ``` + +## Install OpenAI for Model Client + +To use the OpenAI and Azure OpenAI models, you need to install the following +extensions: + +```bash +pip install "autogen-ext[openai]==0.4.0.dev13" +``` + +If you are using Azure OpenAI with AAD authentication, you need to install the following: + +```bash +pip install "autogen-ext[azure]==0.4.0.dev13" +``` + +## Install Docker for Code Execution (Optional) + +We recommend using Docker to use {py:class}`~autogen_ext.code_executors.docker.DockerCommandLineCodeExecutor` for execution of model-generated code. +To install Docker, follow the instructions for your operating system on the [Docker website](https://docs.docker.com/get-docker/). + +To learn more code execution, see [Command Line Code Executors](./framework/command-line-code-executors.ipynb) +and [Code Execution](./design-patterns/code-execution-groupchat.ipynb). From 79b0b6d0588fc3e1a4390e28dab45c797e51847f Mon Sep 17 00:00:00 2001 From: Jack Gerrits Date: Thu, 9 Jan 2025 09:35:41 -0500 Subject: [PATCH 33/61] Override linguist file classifications (#4952) --- .gitattributes | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/.gitattributes b/.gitattributes index 877d0a1fb12e..eb4fe7fb047f 100644 --- a/.gitattributes +++ b/.gitattributes @@ -87,3 +87,12 @@ makefile text eol=lf *.jpeg filter=lfs diff=lfs merge=lfs -text python/packages/autogen-magentic-one/imgs/autogen-magentic-one-example.png filter=lfs diff=lfs merge=lfs -text python/packages/autogen-magentic-one/imgs/autogen-magentic-one-agents.png filter=lfs diff=lfs merge=lfs -text + +python/packages/autogen-magentic-one/tests/browser_utils/test_files/test_blog.html linguist-vendored +python/packages/autogen-ext/src/autogen_ext/runtimes/grpc/protos/*.py linguist-generated +python/packages/autogen-ext/src/autogen_ext/runtimes/grpc/protos/*.pyi linguist-generated +python/packages/autogen-ext/tests/protos/*.py linguist-generated +python/packages/autogen-ext/tests/protos/*.pyi linguist-generated +docs/** linguist-documentation +python/packages/autogen-core/docs/** linguist-documentation +dotnet/website/** linguist-documentation From 02ad98bcb35b5fc84eb5462f9bff419904d7734b Mon Sep 17 00:00:00 2001 From: Jack Gerrits Date: Thu, 9 Jan 2025 11:02:15 -0500 Subject: [PATCH 34/61] Console async printing and optional stats (#4956) * async printing * Make stats output option --- .../src/autogen_agentchat/ui/_console.py | 61 ++++++++++--------- 1 file changed, 32 insertions(+), 29 deletions(-) diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/ui/_console.py b/python/packages/autogen-agentchat/src/autogen_agentchat/ui/_console.py index 6315b504977c..79d39d6add7f 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/ui/_console.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/ui/_console.py @@ -3,6 +3,7 @@ import time from typing import AsyncGenerator, List, Optional, TypeVar, cast +from aioconsole import aprint # type: ignore from autogen_core import Image from autogen_core.models import RequestUsage @@ -25,6 +26,7 @@ async def Console( stream: AsyncGenerator[AgentEvent | ChatMessage | T, None], *, no_inline_images: bool = False, + output_stats: bool = True, ) -> T: """ Consumes the message stream from :meth:`~autogen_agentchat.base.TaskRunner.run_stream` @@ -35,6 +37,7 @@ async def Console( stream (AsyncGenerator[AgentEvent | ChatMessage | TaskResult, None] | AsyncGenerator[AgentEvent | ChatMessage | Response, None]): Message stream to render. This can be from :meth:`~autogen_agentchat.base.TaskRunner.run_stream` or :meth:`~autogen_agentchat.base.ChatAgent.on_messages_stream`. no_inline_images (bool, optional): If terminal is iTerm2 will render images inline. Use this to disable this behavior. Defaults to False. + output_stats (bool, optional): If True, will output a summary of the messages and inline token usage info. Defaults to True. Returns: last_processed: A :class:`~autogen_agentchat.base.TaskResult` if the stream is from :meth:`~autogen_agentchat.base.TaskRunner.run_stream` @@ -49,16 +52,16 @@ async def Console( async for message in stream: if isinstance(message, TaskResult): duration = time.time() - start_time - output = ( - f"{'-' * 10} Summary {'-' * 10}\n" - f"Number of messages: {len(message.messages)}\n" - f"Finish reason: {message.stop_reason}\n" - f"Total prompt tokens: {total_usage.prompt_tokens}\n" - f"Total completion tokens: {total_usage.completion_tokens}\n" - f"Duration: {duration:.2f} seconds\n" - ) - sys.stdout.write(output) - sys.stdout.flush() + if output_stats: + output = ( + f"{'-' * 10} Summary {'-' * 10}\n" + f"Number of messages: {len(message.messages)}\n" + f"Finish reason: {message.stop_reason}\n" + f"Total prompt tokens: {total_usage.prompt_tokens}\n" + f"Total completion tokens: {total_usage.completion_tokens}\n" + f"Duration: {duration:.2f} seconds\n" + ) + await aprint(output, end="") # mypy ignore last_processed = message # type: ignore @@ -68,26 +71,26 @@ async def Console( # Print final response. output = f"{'-' * 10} {message.chat_message.source} {'-' * 10}\n{_message_to_str(message.chat_message, render_image_iterm=render_image_iterm)}\n" if message.chat_message.models_usage: - output += f"[Prompt tokens: {message.chat_message.models_usage.prompt_tokens}, Completion tokens: {message.chat_message.models_usage.completion_tokens}]\n" + if output_stats: + output += f"[Prompt tokens: {message.chat_message.models_usage.prompt_tokens}, Completion tokens: {message.chat_message.models_usage.completion_tokens}]\n" total_usage.completion_tokens += message.chat_message.models_usage.completion_tokens total_usage.prompt_tokens += message.chat_message.models_usage.prompt_tokens - sys.stdout.write(output) - sys.stdout.flush() + await aprint(output, end="") # Print summary. - if message.inner_messages is not None: - num_inner_messages = len(message.inner_messages) - else: - num_inner_messages = 0 - output = ( - f"{'-' * 10} Summary {'-' * 10}\n" - f"Number of inner messages: {num_inner_messages}\n" - f"Total prompt tokens: {total_usage.prompt_tokens}\n" - f"Total completion tokens: {total_usage.completion_tokens}\n" - f"Duration: {duration:.2f} seconds\n" - ) - sys.stdout.write(output) - sys.stdout.flush() + if output_stats: + if message.inner_messages is not None: + num_inner_messages = len(message.inner_messages) + else: + num_inner_messages = 0 + output = ( + f"{'-' * 10} Summary {'-' * 10}\n" + f"Number of inner messages: {num_inner_messages}\n" + f"Total prompt tokens: {total_usage.prompt_tokens}\n" + f"Total completion tokens: {total_usage.completion_tokens}\n" + f"Duration: {duration:.2f} seconds\n" + ) + await aprint(output, end="") # mypy ignore last_processed = message # type: ignore @@ -96,11 +99,11 @@ async def Console( message = cast(AgentEvent | ChatMessage, message) # type: ignore output = f"{'-' * 10} {message.source} {'-' * 10}\n{_message_to_str(message, render_image_iterm=render_image_iterm)}\n" if message.models_usage: - output += f"[Prompt tokens: {message.models_usage.prompt_tokens}, Completion tokens: {message.models_usage.completion_tokens}]\n" + if output_stats: + output += f"[Prompt tokens: {message.models_usage.prompt_tokens}, Completion tokens: {message.models_usage.completion_tokens}]\n" total_usage.completion_tokens += message.models_usage.completion_tokens total_usage.prompt_tokens += message.models_usage.prompt_tokens - sys.stdout.write(output) - sys.stdout.flush() + await aprint(output, end="") if last_processed is None: raise ValueError("No TaskResult or Response was processed.") From 3d6d661f7eeb72c9cbd5c01bd4e8fcad0245d7d3 Mon Sep 17 00:00:00 2001 From: gagb Date: Thu, 9 Jan 2025 10:04:38 -0800 Subject: [PATCH 35/61] Simplify README (#4712) * Simplify README * Update README with improved badge links and section titles * Enhance README with additional AutoGen Studio links and badges * Update README to change autogenstudio badge color to purple * Update README with example of AI agents collaboratively writing a poem * Add Examples section to README with link to examples * Add asyncio import to Minimal Python Example in README * Update README with example of multi-agent system for plotting stock prices * Add Quick Start section to README with installation instructions * Update README to reflect upcoming features with placeholders for installation, quickstart, tutorial, API reference, and packages * Update Tutorial link in README to include additional resource * Update installation link in README to point to the correct user guide * Add landing image to README and enhance visual appeal * Update installation link in README for Autogen Studio user guide * Update README.md Co-authored-by: Jack Gerrits * Update README.md Co-authored-by: Jack Gerrits * Update Studio link in README to point to the correct GitHub directory * Update README.md Co-authored-by: Jack Gerrits * Add migration guide reference for upgrading from AutoGen v0.2 in README * Fix Studio link in README to point to the correct directory * Update README to include links for Core API, AgentChat API, and Extensions API * Update README.md Co-authored-by: Eric Zhu * Update README.md Co-authored-by: Eric Zhu * Fix AutoGen Studio link in README for accurate navigation * Replace PyPi badges with a Documentation badge in README * Update README.md Co-authored-by: Eric Zhu * Update README.md Co-authored-by: Eric Zhu * Update README.md Co-authored-by: Eric Zhu * Improve README.md: clarify installation instructions, enhance descriptions of AutoGen features, and format content for better readability. * Update README.md: add AutoGen Bench section for benchmarking agent performance * Update README.md: clarify AutoGen framework description and add developer tools section * Update README.md: enhance AutoGen framework description and clarify cross-language support * Update README.md: clarify AgentChat API description and its relation to Core API * Update README.md: refine descriptions of AutoGen framework and ecosystem, enhancing clarity and readability * Update README.md: rename "Quick Start" section to "Installation" and enhance developer tools descriptions * Update readme * Update example * Update quickstart --------- Co-authored-by: Jack Gerrits Co-authored-by: Eric Zhu --- FAQ.md | 92 +++++++++++ README.md | 380 ++++++++------------------------------------ autogen-landing.jpg | 3 + 3 files changed, 162 insertions(+), 313 deletions(-) create mode 100644 FAQ.md create mode 100644 autogen-landing.jpg diff --git a/FAQ.md b/FAQ.md new file mode 100644 index 000000000000..fdc0f959428a --- /dev/null +++ b/FAQ.md @@ -0,0 +1,92 @@ +## AutoGen FAQs + +### What is AutoGen 0.4? + +AutoGen v0.4 is a rewrite of AutoGen from the ground up to create a more robust, +scalable, easier to use, cross-language library for building AI Agents. +Some key features include asynchronous messaging, support for scalable distributed agents, +modular extensible design (bring your own agents, implement behaviors however you like), +cross-language support, improved observability, and full typing integration. +It is a breaking change. + +### Why these changes? + +We listened to our AutoGen users, learned from what was working, and adapted to fix what wasn't. +We brought together wide-ranging teams working on many different types of AI Agents +and collaborated to design an improved framework with a more flexible +programming model and better scalability. + +### Is this project still maintained? + +We want to reaffirm our commitment to supporting both the original version of AutoGen (0.2) and the redesign (0.4). AutoGen 0.4 is still work-in-progress, and we shared the code now to build with the community. There are no plans to deprecate the original AutoGen anytime soon, and both versions will be actively maintained. + +### Who should use it 0.4? + +This code is still experimental, so expect changes and bugs while we work towards a stable 0.4 release. We encourage early adopters to +try it out, give us feedback, and contribute. +For those looking for a stable version we recommend to continue using 0.2 + +### I'm using AutoGen 0.2, should I upgrade? + +If you consider yourself an early adopter, you are comfortable making some +changes to your code, and are willing to try it out, then yes. + +### How do I still use AutoGen 0.2? + +AutoGen 0.2 can be installed with: + +```sh +pip install autogen-agentchat~=0.2 +``` + +### Will AutoGen Studio be supported in 0.4? + +Yes, this is on the [roadmap](#roadmap). +Our current plan is to enable an implementation of AutoGen Studio +on the AgentChat high level API which implements a set of agent functionalities +(agents, teams, etc). + +### How do I migrate? + +For users familiar with AutoGen, the AgentChat library in 0.4 provides similar concepts. +We are working on a migration guide. + +### Is 0.4 done? + +We are still actively developing AutoGen 0.4. One exciting new feature is the emergence of new SDKs for .NET. The python SDKs are further ahead at this time but our goal is to achieve parity. We aim to add additional languages in future releases. + +### What is happening next? When will this release be ready? + +We are still working on improving the documentation, samples, and enhancing the code. We are hoping to release before the end of the year when things are ready. + +### What is the history of this project? + +The rearchitecture of the framework started with multiple Microsoft teams coming together +to address the gaps and learnings from AutoGen 0.2 - merging ideas from several predecessor projects. +The team worked on this internally for some time to ensure alignment before moving work back to the open in October 2024. + +### What is the official channel for support? + +Use GitHub [Issues](https://github.com/microsoft/autogen/issues) for bug reports and feature requests. +Use GitHub [Discussions](https://github.com/microsoft/autogen/discussions) for general questions and discussions. + +### Do you use Discord for communications? + +We are unable to use the old Discord for project discussions, many of the maintainers no longer have viewing or posting rights there. Therefore, we request that all discussions take place on or the [new discord server](https://aka.ms/autogen-discord). + +### What about forks? + + remains the only official repo for development and support of AutoGen. +We are aware that there are thousands of forks of AutoGen, including many for personal development and startups building with or on top of the library. We are not involved with any of these forks and are not aware of any plans related to them. + +### What is the status of the license and open source? + +Our project remains fully open-source and accessible to everyone. We understand that some forks use different licenses to align with different interests. We will continue to use the most permissive license (MIT) for the project. + +### Can you clarify the current state of the packages? + +Currently, we are unable to make releases to the `pyautogen` package via Pypi due to a change to package ownership that was done without our involvement. Additionally, we are moving to using multiple packages to align with the new design. Please see details [here](https://microsoft.github.io/autogen/dev/packages/index.html). + +### Can I still be involved? + +We are grateful to all the contributors to AutoGen 0.2 and we look forward to continuing to collaborate with everyone in the AutoGen community. diff --git a/README.md b/README.md index 883dff1ccea4..f2911f4950ec 100644 --- a/README.md +++ b/README.md @@ -3,361 +3,115 @@
AutoGen Logo -[![Twitter](https://img.shields.io/twitter/url/https/twitter.com/cloudposse.svg?style=social&label=Follow%20%40pyautogen)](https://twitter.com/pyautogen) [![LinkedIn](https://img.shields.io/badge/LinkedIn-Company?style=flat&logo=linkedin&logoColor=white)](https://www.linkedin.com/company/105812540) [![Discord](https://img.shields.io/badge/discord-chat-green?logo=discord)](https://aka.ms/autogen-discord) [![GitHub Discussions](https://img.shields.io/badge/Discussions-Q%26A-green?logo=github)](https://github.com/microsoft/autogen/discussions) [![0.2 Docs](https://img.shields.io/badge/Docs-0.2-blue)](https://microsoft.github.io/autogen/0.2/) [![0.4 Docs](https://img.shields.io/badge/Docs-0.4-blue)](https://microsoft.github.io/autogen/dev/) - -[![PyPi autogen-core](https://img.shields.io/badge/PyPi-autogen--core-blue?logo=pypi)](https://pypi.org/project/autogen-core/0.4.0.dev13/) [![PyPi autogen-agentchat](https://img.shields.io/badge/PyPi-autogen--agentchat-blue?logo=pypi)](https://pypi.org/project/autogen-agentchat/0.4.0.dev13/) [![PyPi autogen-ext](https://img.shields.io/badge/PyPi-autogen--ext-blue?logo=pypi)](https://pypi.org/project/autogen-ext/0.4.0.dev13/) +[![Twitter](https://img.shields.io/twitter/url/https/twitter.com/cloudposse.svg?style=social&label=Follow%20%40pyautogen)](https://twitter.com/pyautogen) +[![LinkedIn](https://img.shields.io/badge/LinkedIn-Company?style=flat&logo=linkedin&logoColor=white)](https://www.linkedin.com/company/105812540) +[![Discord](https://img.shields.io/badge/discord-chat-green?logo=discord)](https://aka.ms/autogen-discord) +[![Documentation](https://img.shields.io/badge/Documentation-AutoGen-blue?logo=read-the-docs)](https://microsoft.github.io/autogen/)
# AutoGen -> [!IMPORTANT] -> -> - (12/19/24) Hello! -The majority of the AutoGen Team members will be resting and recharging with family and friends over the holiday period. Activity/responses on the project may be delayed during the period of Dec 20-Jan 06. We will be excited to engage with you in the new year! -> - (12/11/24) We have created a new Discord server for the AutoGen community. Join us at [aka.ms/autogen-discord](https://aka.ms/autogen-discord). -> - (11/14/24) āš ļø In response to a number of asks to clarify and distinguish between official AutoGen and its forks that created confusion, we issued a [clarification statement](https://github.com/microsoft/autogen/discussions/4217). -> - (10/13/24) Interested in the standard AutoGen as a prior user? Find it at the actively-maintained *AutoGen* [0.2 branch](https://github.com/microsoft/autogen/tree/0.2) and `autogen-agentchat~=0.2` PyPi package. -> - (10/02/24) [AutoGen 0.4](https://microsoft.github.io/autogen/dev) is a from-the-ground-up rewrite of AutoGen. Learn more about the history, goals and future at [this blog post](https://microsoft.github.io/autogen/blog). Weā€™re excited to work with the community to gather feedback, refine, and improve the project before we officially release 0.4. This is a big change, so AutoGen 0.2 is still available, maintained, and developed in the [0.2 branch](https://github.com/microsoft/autogen/tree/0.2). -> - *[Join us for Community Office Hours](https://github.com/microsoft/autogen/discussions/4059)* We will host a weekly open discussion to answer questions, talk about Roadmap, etc. - -AutoGen is an open-source framework for building AI agent systems. -It simplifies the creation of event-driven, distributed, scalable, and resilient agentic applications. -It allows you to quickly build systems where AI agents collaborate and perform tasks autonomously -or with human oversight. - -- [Key Features](#key-features) -- [API Layering](#api-layering) -- [Quickstart](#quickstart) -- [Roadmap](#roadmap) -- [FAQs](#faqs) - -AutoGen streamlines AI development and research, enabling the use of multiple large language models (LLMs), integrated tools, and advanced multi-agent design patterns. You can develop and test your agent systems locally, then deploy to a distributed cloud environment as your needs grow. - -## Key Features - -AutoGen offers the following key features: - -- **Asynchronous Messaging**: Agents communicate via asynchronous messages, supporting both event-driven and request/response interaction patterns. -- **Full type support**: use types in all interfaces and enforced type check on build, with a focus on quality and cohesiveness -- **Scalable & Distributed**: Design complex, distributed agent networks that can operate across organizational boundaries. -- **Modular & Extensible**: Customize your system with pluggable components: custom agents, tools, memory, and models. -- **Cross-Language Support**: Interoperate agents across different programming languages. Currently supports Python and .NET, with more languages coming soon. -- **Observability & Debugging**: Built-in features and tools for tracking, tracing, and debugging agent interactions and workflows, including support for industry standard observability with OpenTelemetry - -

- - ā†‘ Back to Top ā†‘ - -

- -# API Layering - -AutoGen has several packages and is built upon a layered architecture. -Currently, there are three main APIs your application can target: - -- [Core](https://microsoft.github.io/autogen/dev/user-guide/core-user-guide/index.html) -- [AgentChat](https://microsoft.github.io/autogen/dev/user-guide/agentchat-user-guide/index.html) -- [Extensions](https://microsoft.github.io/autogen/dev/user-guide/extensions-user-guide/index.html) - -## Core - -- [Installation](https://microsoft.github.io/autogen/dev/user-guide/core-user-guide/installation.html) -- [Quickstart](https://microsoft.github.io/autogen/dev/user-guide/core-user-guide/quickstart.html) - -The core API of AutoGen, `autogen-core`, is built following the -[actor model](https://en.wikipedia.org/wiki/Actor_model). -It supports asynchronous message passing between agents and event-based workflows. -Agents in the core layer handle and produce typed messages, using either direct messaging, -which functions like RPC, or via broadcasting to topics, which is pub-sub. -Agents can be distributed and implemented in different programming languages, -while still communicating with one another. -**Start here if you are building scalable, event-driven agentic systems.** - -## AgentChat - -- [Installation](https://microsoft.github.io/autogen/dev/user-guide/agentchat-user-guide/installation.html) -- [Quickstart](https://microsoft.github.io/autogen/dev/user-guide/agentchat-user-guide/quickstart.html) - -The AgentChat API, `autogen-agentchat`, is task driven and at a high level like AutoGen 0.2. -It allows you to define conversational agents, compose them into teams and then -use them to solve tasks. -AgentChat itself is built on the core layer, but it abstracts away much of its -low-level system concepts. -If your workflows don't fit into the AgentChat API, target core instead. -**Start here if you just want to focus on quickly getting started with multi-agents workflows.** - -## Extensions - -The extension package `autogen-ext` contains implementations of the core interfaces using 3rd party systems, -such as OpenAI model client and Azure code executors. -Besides the built-in extensions, the package accommodates community-contributed -extensions through namespace sub-packages. -We look forward to your contributions! +**AutoGen** is a framework for creating multi-agent AI applications that can act autonomously or work alongside humans. -

- - ā†‘ Back to Top ā†‘ - -

- -## Quickstart - -### Python (AgentChat) - -First install the packages: +## Installation ```bash -pip install "autogen-agentchat==0.4.0.dev13" "autogen-ext[openai]==0.4.0.dev13" +# Install AgentChat and OpenAI client from Extensions +pip install "autogen-agentchat" "autogen-ext[openai]" ``` -The following code uses OpenAI's GPT-4o model and you need to provide your -API key to run. -To use Azure OpenAI models, follow the instruction -[here](https://microsoft.github.io/autogen/dev/user-guide/core-user-guide/cookbook/azure-openai-with-aad-auth.html). +The current stable version is v0.4. If you are upgrading from AutoGen v0.2, please refer to the [Migration Guide](https://microsoft.github.io/autogen/dev/user-guide/agentchat-user-guide/migration-guide.html) for detailed instructions on how to update your code and configurations. + +### Hello World + +Create an assistant agent using OpenAI's GPT-4o model. ```python import asyncio from autogen_agentchat.agents import AssistantAgent -from autogen_agentchat.ui import Console -from autogen_agentchat.conditions import TextMentionTermination -from autogen_agentchat.teams import RoundRobinGroupChat from autogen_ext.models.openai import OpenAIChatCompletionClient -# Define a tool -async def get_weather(city: str) -> str: - return f"The weather in {city} is 73 degrees and Sunny." - async def main() -> None: - # Define an agent - weather_agent = AssistantAgent( - name="weather_agent", - model_client=OpenAIChatCompletionClient( - model="gpt-4o-2024-08-06", - # api_key="YOUR_API_KEY", - ), - tools=[get_weather], - ) - - # Define termination condition - termination = TextMentionTermination("TERMINATE") - - # Define a team - agent_team = RoundRobinGroupChat([weather_agent], termination_condition=termination) - - # Run the team and stream messages to the console - stream = agent_team.run_stream(task="What is the weather in New York?") - await Console(stream) + agent = AssistantAgent("assistant", OpenAIChatCompletionClient(model="gpt-4o")) + print(agent.run(task="Say 'Hello World!'")) asyncio.run(main()) ``` -### C\# - -The .NET SDK does not yet support all of the interfaces that the python SDK offers but we are working on bringing them to parity. -To use the .NET SDK, you need to add a package reference to the src in your project. -We will release nuget packages soon and will update these instructions when that happens. - -``` -git clone https://github.com/microsoft/autogen.git -cd autogen -# Switch to the branch that has this code -git switch staging-dev -# Build the project -cd dotnet && dotnet build AutoGen.sln -# In your source code, add AutoGen to your project -dotnet add reference /dotnet/src/Microsoft.AutoGen/Core/Microsoft.AutoGen.Core.csproj -``` - -Then, define and run your first agent: - -```csharp -using Microsoft.AutoGen.Contracts; -using Microsoft.AutoGen.Core; -using Microsoft.Extensions.DependencyInjection; -using Microsoft.Extensions.Hosting; - -// send a message to the agent -var app = await App.PublishMessageAsync("HelloAgents", new NewMessageReceived -{ - Message = "World" -}, local: true); - -await App.RuntimeApp!.WaitForShutdownAsync(); -await app.WaitForShutdownAsync(); - -[TopicSubscription("agents")] -public class HelloAgent( - IAgentContext worker, - [FromKeyedServices("EventTypes")] EventTypes typeRegistry) : ConsoleAgent( - worker, - typeRegistry), - ISayHello, - IHandle, - IHandle -{ - public async Task Handle(NewMessageReceived item) - { - var response = await SayHello(item.Message).ConfigureAwait(false); - var evt = new Output - { - Message = response - }.ToCloudEvent(this.AgentId.Key); - await PublishEventAsync(evt).ConfigureAwait(false); - var goodbye = new ConversationClosed - { - UserId = this.AgentId.Key, - UserMessage = "Goodbye" - }.ToCloudEvent(this.AgentId.Key); - await PublishEventAsync(goodbye).ConfigureAwait(false); - } - public async Task Handle(ConversationClosed item) - { - var goodbye = $"********************* {item.UserId} said {item.UserMessage} ************************"; - var evt = new Output - { - Message = goodbye - }.ToCloudEvent(this.AgentId.Key); - await PublishEventAsync(evt).ConfigureAwait(false); - await Task.Delay(60000); - await App.ShutdownAsync(); - } - public async Task SayHello(string ask) - { - var response = $"\n\n\n\n***************Hello {ask}**********************\n\n\n\n"; - return response; - } -} -public interface ISayHello -{ - public Task SayHello(string ask); -} -``` - -```bash -dotnet run -``` - -

- - ā†‘ Back to Top ā†‘ - -

- -## Roadmap - -- AutoGen 0.2 - This is the current stable release of AutoGen. We will continue to accept bug fixes and minor enhancements to this version. -- AutoGen 0.4 - This is the first release of the new architecture. This release is still in *preview*. We will be focusing on the stability of the interfaces, documentation, tutorials, samples, and a collection of built-in agents which you can use. We are excited to work with our community to define the future of AutoGen. We are looking for feedback and contributions to help shape the future of this project. Here are some major planned items: - - More programming languages (e.g., TypeScript) - - More built-in agents and multi-agent workflows - - Deployment of distributed agents - - Re-implementation/migration of AutoGen Studio - - Integration with other agent frameworks and data sources - - Advanced RAG techniques and memory services - -

- - ā†‘ Back to Top ā†‘ - -

- -## FAQs - -### What is AutoGen 0.4? - -AutoGen v0.4 is a rewrite of AutoGen from the ground up to create a more robust, -scalable, easier to use, cross-language library for building AI Agents. -Some key features include asynchronous messaging, support for scalable distributed agents, -modular extensible design (bring your own agents, implement behaviors however you like), -cross-language support, improved observability, and full typing integration. -It is a breaking change. - -### Why these changes? - -We listened to our AutoGen users, learned from what was working, and adapted to fix what wasn't. -We brought together wide-ranging teams working on many different types of AI Agents -and collaborated to design an improved framework with a more flexible -programming model and better scalability. +### Team -### Is this project still maintained? +Create a group chat team with an assistant agent, a web surfer agent, and a user proxy agent +for web browsing tasks. You need to install [playwright](https://playwright.dev/python/docs/library). -We want to reaffirm our commitment to supporting both the original version of AutoGen (0.2) and the redesign (0.4) . AutoGen 0.4 is still work-in-progress, and we shared the code now to build with the community. There are no plans to deprecate the original AutoGen anytime soon, and both versions will be actively maintained. - -### Who should use it 0.4? - -This code is still experimental, so expect changes and bugs while we work towards a stable 0.4 release. We encourage early adopters to -try it out, give us feedback, and contribute. -For those looking for a stable version we recommend to continue using 0.2 - -### I'm using AutoGen 0.2, should I upgrade? - -If you consider yourself an early adopter, you are comfortable making some -changes to your code, and are willing to try it out, then yes. - -### How do I still use AutoGen 0.2? +```python +# pip install autogen-agentchat autogen-ext[openai,web-surfer] +# playwright install +import asyncio +from autogen_agentchat.agents import AssistantAgent, UserProxyAgent +from autogen_agentchat.conditions import TextMentionTermination +from autogen_agentchat.teams import RoundRobinGroupChat +from autogen_agentchat.ui import Console +from autogen_ext.models.openai import OpenAIChatCompletionClient +from autogen_ext.agents.web_surfer import MultimodalWebSurfer -AutoGen 0.2 can be installed with: +async def main() -> None: + model_client = OpenAIChatCompletionClient(model="gpt-4o") + assistant = AssistantAgent("assistant", model_client) + web_surfer = MultimodalWebSurfer("web_surfer", model_client) + user_proxy = UserProxyAgent("user_proxy") + termination = TextMentionTermination("exit") # Type 'exit' to end the conversation. + team = RoundRobinGroupChat([web_surfer, assistant, user_proxy], termination_condition=termination) + await Console(team.run_stream(task="Find information about AutoGen and write a short summary.")) -```sh -pip install autogen-agentchat~=0.2 +asyncio.run(main()) ``` -### Will AutoGen Studio be supported in 0.4? - -Yes, this is on the [roadmap](#roadmap). -Our current plan is to enable an implementation of AutoGen Studio -on the AgentChat high level API which implements a set of agent functionalities -(agents, teams, etc). - -### How do I migrate? - -For users familiar with AutoGen, the AgentChat library in 0.4 provides similar concepts. -We are working on a migration guide. - -### Is 0.4 done? - -We are still actively developing AutoGen 0.4. One exciting new feature is the emergence of new SDKs for .NET. The python SDKs are further ahead at this time but our goal is to achieve parity. We aim to add additional languages in future releases. +## Why Use AutoGen? -### What is happening next? When will this release be ready? - -We are still working on improving the documentation, samples, and enhancing the code. We are hoping to release before the end of the year when things are ready. +
+ AutoGen Landing +
-### What is the history of this project? +The AutoGen ecosystem provides everything you need to create AI agents, especially multi-agent workflows -- framework, developer tools, and applications. -The rearchitecture of the framework started with multiple Microsoft teams coming together -to address the gaps and learnings from AutoGen 0.2 - merging ideas from several predecessor projects. -The team worked on this internally for some time to ensure alignment before moving work back to the open in October 2024. +The *framework* uses a layered and extensible design. Layers have clearly divided responsibilities and build on top of layers below. This design enables you to use the framework at different levels of abstraction, from high-level APIs to low-level components. -### What is the official channel for support? +- [Core API](./python/packages/autogen-core/) implements message passing, event-driven agents, and local and distributed runtime for flexibility and power. It also support cross-language support for .NET and Python. +- [AgentChat API](./python/packages/autogen-agentchat/) implements a simpler but opinionatedĀ API rapid for prototyping. This API is built on top of the Core API and is closest to what users of v0.2 are familiar with and supports familiar multi-agent patterns such as two-agent chat or group chats. +- [Extensions API](./python/packages/autogen-ext/) enables first- and third-party extensions continuously expanding framework capabilities. It support specific implementation of LLM clients (e.g., OpenAI, AzureOpenAI), and capabilities such as code execution. -Use GitHub [Issues](https://github.com/microsoft/autogen/issues) for bug reports and feature requests. -Use GitHub [Discussions](https://github.com/microsoft/autogen/discussions) for general questions and discussions. +The ecosystem also supports two essential *developer tools*: -### Do you use Discord for communications? +
+ AutoGen Studio Screenshot +
-We are unable to use the old Discord for project discussions, many of the maintainers no longer have viewing or posting rights there. Therefore, we request that all discussions take place on or the [new discord server](https://aka.ms/autogen-discord). +- [AutoGen Studio](./python/packages/autogen-studio/) provides a no-code GUI for building multi-agent applications. +- [AutoGen Bench](./python/packages/agbench/) provides a benchmarking suite for evaluating agent performance. -### What about forks? +You can use the AutoGen framework and developer tools to create applications for your domain. For example, [Magentic-One](./python/packages/magentic-one-cli/) is a state-of-art multi-agent team built using AgentChat API and Extensions API that can handle variety of tasks that require web browsing, code execution, and file handling. - remains the only official repo for development and support of AutoGen. -We are aware that there are thousands of forks of AutoGen, including many for personal development and startups building with or on top of the library. We are not involved with any of these forks and are not aware of any plans related to them. +With AutoGen you get to join and contribute to a thriving ecosystem. We host weekly office hours and talks with maintainers and community. We also have a [Discord server](https://aka.ms/autogen-discord) for real-time chat, GitHub Discussions for Q&A, and a blog for tutorials and updates. -### What is the status of the license and open source? +## Where to go next? -Our project remains fully open-source and accessible to everyone. We understand that some forks use different licenses to align with different interests. We will continue to use the most permissive license (MIT) for the project. +
-### Can you clarify the current state of the packages? +| | [![Python](https://img.shields.io/badge/AutoGen-Python-blue?logo=python&logoColor=white)](./python) | [![.NET](https://img.shields.io/badge/AutoGen-.NET-green?logo=.net&logoColor=white)](./dotnet) | [![Studio](https://img.shields.io/badge/AutoGen-Studio-purple?logo=visual-studio&logoColor=white)](./python/packages/autogen-studio) | +|----------------------|--------------------------------------------------------------------------------------------|-------------------|-------------------| +| Installation | [![Installation](https://img.shields.io/badge/Install-blue)](https://microsoft.github.io/autogen/dev/user-guide/agentchat-user-guide/installation.html) | * | [![Install](https://img.shields.io/badge/Install-purple)](https://microsoft.github.io/autogen/dev/user-guide/autogenstudio-user-guide/installation.html) | +| Quickstart | [![Quickstart](https://img.shields.io/badge/Quickstart-blue)](https://microsoft.github.io/autogen/dev/user-guide/agentchat-user-guide/quickstart.html#) | * | * | +| Tutorial | [![Tutorial](https://img.shields.io/badge/Tutorial-blue)](https://microsoft.github.io/autogen/dev/user-guide/agentchat-user-guide/tutorial/models.html) | *| * | +| API Reference | [![API](https://img.shields.io/badge/Docs-blue)](https://microsoft.github.io/autogen/dev/reference/index.html#) | * | [![API](https://img.shields.io/badge/Docs-purple)](https://microsoft.github.io/autogen/dev/user-guide/autogenstudio-user-guide/usage.html) | +| Packages | [![PyPi autogen-core](https://img.shields.io/badge/PyPi-autogen--core-blue?logo=pypi)](https://pypi.org/project/autogen-core/)
[![PyPi autogen-agentchat](https://img.shields.io/badge/PyPi-autogen--agentchat-blue?logo=pypi)](https://pypi.org/project/autogen-agentchat/)
[![PyPi autogen-ext](https://img.shields.io/badge/PyPi-autogen--ext-blue?logo=pypi)](https://pypi.org/project/autogen-ext/) | * | [![PyPi autogenstudio](https://img.shields.io/badge/PyPi-autogenstudio-purple?logo=pypi)](https://pypi.org/project/autogenstudio/) | -Currently, we are unable to make releases to the `pyautogen` package via Pypi due to a change to package ownership that was done without our involvement. Additionally, we are moving to using multiple packages to align with the new design. Please see details [here](https://microsoft.github.io/autogen/dev/packages/index.html). +
-### Can I still be involved? +**Releasing soon* -We are grateful to all the contributors to AutoGen 0.2 and we look forward to continuing to collaborate with everyone in the AutoGen community. +Interested in contributing? See [CONTRIBUTING.md](./CONTRIBUTING.md) for guidelines on how to get started. We welcome contributions of all kinds, including bug fixes, new features, and documentation improvements. Join our community and help us make AutoGen better! -

- - ā†‘ Back to Top ā†‘ - -

+Have questions? Check out our [Frequently Asked Questions (FAQ)](./FAQ.md) for answers to common queries. If you don't find what you're looking for, feel free to ask in our [GitHub Discussions](https://github.com/microsoft/autogen/discussions) or join our [Discord server](https://aka.ms/autogen-discord) for real-time support. ## Legal Notices diff --git a/autogen-landing.jpg b/autogen-landing.jpg new file mode 100644 index 000000000000..c8572e4dd060 --- /dev/null +++ b/autogen-landing.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:149a1ab7bec4917c445992c0bff2d4402cb194207a03d4bec573d74d52aac5e8 +size 269405 From b07c1662b33a96af5c44dadbfc4eaa5e188e715e Mon Sep 17 00:00:00 2001 From: afourney Date: Thu, 9 Jan 2025 10:33:56 -0800 Subject: [PATCH 36/61] Disable usage stats on m1 command. (#4960) Co-authored-by: Eric Zhu --- python/packages/magentic-one-cli/src/magentic_one_cli/_m1.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/packages/magentic-one-cli/src/magentic_one_cli/_m1.py b/python/packages/magentic-one-cli/src/magentic_one_cli/_m1.py index d2698d23f598..e5a07b164939 100644 --- a/python/packages/magentic-one-cli/src/magentic_one_cli/_m1.py +++ b/python/packages/magentic-one-cli/src/magentic_one_cli/_m1.py @@ -39,7 +39,7 @@ def main() -> None: async def run_task(task: str, hil_mode: bool) -> None: client = OpenAIChatCompletionClient(model="gpt-4o") m1 = MagenticOne(client=client, hil_mode=hil_mode) - await Console(m1.run_stream(task=task)) + await Console(m1.run_stream(task=task), output_stats=False) task = args.task[0] asyncio.run(run_task(task, not args.no_hil)) From c293b931f57a59231a789890883dffc7f7e67cfe Mon Sep 17 00:00:00 2001 From: Jack Gerrits Date: Thu, 9 Jan 2025 13:37:13 -0500 Subject: [PATCH 37/61] Make API reference TOC visible (#4962) Co-authored-by: Eric Zhu --- .../autogen-core/docs/src/reference/index.md | 30 ++----------------- 1 file changed, 3 insertions(+), 27 deletions(-) diff --git a/python/packages/autogen-core/docs/src/reference/index.md b/python/packages/autogen-core/docs/src/reference/index.md index 3f1374643931..cbd580884c8a 100644 --- a/python/packages/autogen-core/docs/src/reference/index.md +++ b/python/packages/autogen-core/docs/src/reference/index.md @@ -8,8 +8,8 @@ myst: # API Reference ```{toctree} -:hidden: :caption: AutoGen AgentChat +:maxdepth: 2 python/autogen_agentchat python/autogen_agentchat.messages @@ -22,8 +22,8 @@ python/autogen_agentchat.state ``` ```{toctree} -:hidden: :caption: AutoGen Core +:maxdepth: 2 python/autogen_core python/autogen_core.code_executor @@ -36,8 +36,8 @@ python/autogen_core.logging ``` ```{toctree} -:hidden: :caption: AutoGen Extensions +:maxdepth: 2 python/autogen_ext.agents.magentic_one python/autogen_ext.agents.openai @@ -56,27 +56,3 @@ python/autogen_ext.code_executors.docker python/autogen_ext.code_executors.azure python/autogen_ext.runtimes.grpc ``` - - From ecdf18d3f6ae406bb96859358e94edf9eee4ea06 Mon Sep 17 00:00:00 2001 From: Jack Gerrits Date: Thu, 9 Jan 2025 13:44:13 -0500 Subject: [PATCH 38/61] Make package readmes slightly less empty (#4961) * Make package readmes slightly less empty * Update python/packages/autogen-ext/README.md --------- Co-authored-by: Eric Zhu --- python/packages/autogen-agentchat/README.md | 9 +++++++++ python/packages/autogen-core/README.md | 2 ++ python/packages/autogen-ext/README.md | 6 ++++-- 3 files changed, 15 insertions(+), 2 deletions(-) diff --git a/python/packages/autogen-agentchat/README.md b/python/packages/autogen-agentchat/README.md index 07fa5d8c5208..4ada6f98280f 100644 --- a/python/packages/autogen-agentchat/README.md +++ b/python/packages/autogen-agentchat/README.md @@ -1,3 +1,12 @@ # AutoGen AgentChat - [Documentation](https://microsoft.github.io/autogen/stable/user-guide/agentchat-user-guide/index.html) + +AgentChat is a high-level API for building multi-agent applications. +It is built on top of the [`autogen-core`](https://microsoft.github.io/autogen/stable/user-guide/core-user-guide/index.html) package. +For beginner users, AgentChat is the recommended starting point. +For advanced users, [`autogen-core`](https://microsoft.github.io/autogen/stable/user-guide/core-user-guide/index.html)'s event-driven +programming model provides more flexibility and control over the underlying components. + +AgentChat provides intuitive defaults, such as **Agents** with preset +behaviors and **Teams** with predefined [multi-agent design patterns](https://microsoft.github.io/autogen/stable/user-guide/core-user-guide/design-patterns/intro.html). diff --git a/python/packages/autogen-core/README.md b/python/packages/autogen-core/README.md index b09e22b90a4e..8cebb616922c 100644 --- a/python/packages/autogen-core/README.md +++ b/python/packages/autogen-core/README.md @@ -1,3 +1,5 @@ # AutoGen Core - [Documentation](https://microsoft.github.io/autogen/stable/user-guide/core-user-guide/index.html) + +AutoGen core offers an easy way to quickly build event-driven, distributed, scalable, resilient AI agent systems. Agents are developed by using the [Actor model](https://en.wikipedia.org/wiki/Actor_model). You can build and run your agent system locally and easily move to a distributed system in the cloud when you are ready. diff --git a/python/packages/autogen-ext/README.md b/python/packages/autogen-ext/README.md index 402c411e224f..99f3138dfff9 100644 --- a/python/packages/autogen-ext/README.md +++ b/python/packages/autogen-ext/README.md @@ -1,3 +1,5 @@ -# autogen-ext +# AutoGen Extensions -[Documentation](https://microsoft.github.io/autogen/stable/user-guide/extensions-user-guide/index.html) +- [Documentation](https://microsoft.github.io/autogen/stable/user-guide/extensions-user-guide/index.html) + +AutoGen is designed to be extensible. The `autogen-ext` package contains many different component implementations maintained by the AutoGen project. However, we strongly encourage others to build their own components and publish them as part of the ecosytem. From f3ed7ae14781f834000fa5a4ac16df59b76827aa Mon Sep 17 00:00:00 2001 From: afourney Date: Thu, 9 Jan 2025 10:57:07 -0800 Subject: [PATCH 39/61] Fixed a failure in the MagenticOne test CI (#4966) Fixed CI --- .../tests/browser_utils/test_requests_markdown_browser.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/packages/autogen-magentic-one/tests/browser_utils/test_requests_markdown_browser.py b/python/packages/autogen-magentic-one/tests/browser_utils/test_requests_markdown_browser.py index b9919abf7a8e..72310c79ba2c 100644 --- a/python/packages/autogen-magentic-one/tests/browser_utils/test_requests_markdown_browser.py +++ b/python/packages/autogen-magentic-one/tests/browser_utils/test_requests_markdown_browser.py @@ -11,7 +11,7 @@ from autogen_magentic_one.markdown_browser import BingMarkdownSearch, RequestsMarkdownBrowser BLOG_POST_URL = "https://microsoft.github.io/autogen/0.2/blog/2023/04/21/LLM-tuning-math" -BLOG_POST_TITLE = "Does Model and Inference Parameter Matter in LLM Applications? - A Case Study for MATH | AutoGen" +BLOG_POST_TITLE = "Does Model and Inference Parameter Matter in LLM Applications? - A Case Study for MATH | AutoGen 0.2" BLOG_POST_STRING = "Large language models (LLMs) are powerful tools that can generate natural language texts for various applications, such as chatbots, summarization, translation, and more. GPT-4 is currently the state of the art LLM in the world. Is model selection irrelevant? What about inference parameters?" BLOG_POST_FIND_ON_PAGE_QUERY = "an example where high * complex" BLOG_POST_FIND_ON_PAGE_MATCH = "an example where high cost can easily prevent a generic complex" From 0446ce924fa80e07a99b8dceac8526d695e48442 Mon Sep 17 00:00:00 2001 From: Griffin Bassman Date: Thu, 9 Jan 2025 14:05:20 -0500 Subject: [PATCH 40/61] feat: Add o1-2024-12-17 model (#4965) Co-authored-by: Jack Gerrits --- .../src/autogen_ext/models/openai/_model_info.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/python/packages/autogen-ext/src/autogen_ext/models/openai/_model_info.py b/python/packages/autogen-ext/src/autogen_ext/models/openai/_model_info.py index d67beb44e1d8..d1c2bdf4241a 100644 --- a/python/packages/autogen-ext/src/autogen_ext/models/openai/_model_info.py +++ b/python/packages/autogen-ext/src/autogen_ext/models/openai/_model_info.py @@ -5,6 +5,7 @@ # Based on: https://platform.openai.com/docs/models/continuous-model-upgrades # This is a moving target, so correctness is checked by the model value returned by openai against expected values at runtime`` _MODEL_POINTERS = { + "o1": "o1-2024-12-17", "o1-preview": "o1-preview-2024-09-12", "o1-mini": "o1-mini-2024-09-12", "gpt-4o": "gpt-4o-2024-08-06", @@ -18,6 +19,12 @@ } _MODEL_INFO: Dict[str, ModelInfo] = { + "o1-2024-12-17": { + "vision": False, + "function_calling": False, + "json_output": False, + "family": ModelFamily.O1, + }, "o1-preview-2024-09-12": { "vision": False, "function_calling": False, @@ -117,6 +124,7 @@ } _MODEL_TOKEN_LIMITS: Dict[str, int] = { + "o1-2024-12-17": 200000, "o1-preview-2024-09-12": 128000, "o1-mini-2024-09-12": 128000, "gpt-4o-2024-08-06": 128000, From 5b841e26d6284e7a3bd2a1c2ef2ce712a177c43e Mon Sep 17 00:00:00 2001 From: Eric Zhu Date: Thu, 9 Jan 2025 11:19:25 -0800 Subject: [PATCH 41/61] update landing page example (#4968) --- python/packages/autogen-core/docs/src/index.md | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/python/packages/autogen-core/docs/src/index.md b/python/packages/autogen-core/docs/src/index.md index 763da49b622a..4c3b3b222ae6 100644 --- a/python/packages/autogen-core/docs/src/index.md +++ b/python/packages/autogen-core/docs/src/index.md @@ -112,18 +112,15 @@ A programming framework for building conversational single and multi-agent appli Built on Core. ```python -# pip install "autogen-agentchat==0.4.0.dev13" "autogen-ext[openai]==0.4.0.dev13" "yfinance" "matplotlib" +# pip install -U "autogen-agentchat" "autogen-ext[openai]" import asyncio from autogen_agentchat.agents import AssistantAgent -from autogen_agentchat.ui import Console from autogen_ext.models.openai import OpenAIChatCompletionClient -from autogen_ext.code_executors.local import LocalCommandLineCodeExecutor -from autogen_ext.tools.code_execution import PythonCodeExecutionTool async def main() -> None: - tool = PythonCodeExecutionTool(LocalCommandLineCodeExecutor(work_dir="coding")) - agent = AssistantAgent("assistant", OpenAIChatCompletionClient(model="gpt-4o"), tools=[tool], reflect_on_tool_use=True) - await Console(agent.run_stream(task="Create a plot of MSFT stock prices in 2024 and save it to a file. Use yfinance and matplotlib.")) + agent = AssistantAgent("assistant", OpenAIChatCompletionClient(model="gpt-4o")) + print(agent.run(task="Say 'Hello World!'")) + asyncio.run(main()) ``` From 0122d44aa33977b2c9c1d47e5de9fc23fe41395a Mon Sep 17 00:00:00 2001 From: Jack Gerrits Date: Thu, 9 Jan 2025 15:06:01 -0500 Subject: [PATCH 42/61] OpenAI assistant fixes (#4969) --- python/packages/autogen-core/pyproject.toml | 9 ++- .../agents/openai/_openai_assistant_agent.py | 70 ++++++++----------- 2 files changed, 34 insertions(+), 45 deletions(-) diff --git a/python/packages/autogen-core/pyproject.toml b/python/packages/autogen-core/pyproject.toml index 9c15908f3b50..7f06fc66d02a 100644 --- a/python/packages/autogen-core/pyproject.toml +++ b/python/packages/autogen-core/pyproject.toml @@ -78,17 +78,16 @@ dev = [ [tool.ruff] extend = "../../pyproject.toml" -exclude = ["build", "dist", "src/autogen_core/application/protos", "tests/protos", "samples/protos"] -include = ["src/**", "samples/*.py", "docs/**/*.ipynb", "tests/**"] +exclude = ["build", "dist", "src/autogen_core/application/protos", "tests/protos"] +include = ["src/**", "docs/**/*.ipynb", "tests/**"] [tool.ruff.lint.per-file-ignores] -"samples/**.py" = ["T20"] "docs/**.ipynb" = ["T20"] [tool.pyright] extends = "../../pyproject.toml" -include = ["src", "tests", "samples"] -exclude = ["src/autogen_core/application/protos", "tests/protos", "samples/protos"] +include = ["src", "tests"] +exclude = ["src/autogen_core/application/protos", "tests/protos"] reportDeprecated = true [tool.pytest.ini_options] diff --git a/python/packages/autogen-ext/src/autogen_ext/agents/openai/_openai_assistant_agent.py b/python/packages/autogen-ext/src/autogen_ext/agents/openai/_openai_assistant_agent.py index 72c7ef568775..496b68fd3e0b 100644 --- a/python/packages/autogen-ext/src/autogen_ext/agents/openai/_openai_assistant_agent.py +++ b/python/packages/autogen-ext/src/autogen_ext/agents/openai/_openai_assistant_agent.py @@ -3,7 +3,6 @@ import logging import os from typing import ( - TYPE_CHECKING, Any, AsyncGenerator, Awaitable, @@ -19,6 +18,7 @@ cast, ) +import aiofiles from autogen_agentchat import EVENT_LOGGER_NAME from autogen_agentchat.agents import BaseChatAgent from autogen_agentchat.base import Response @@ -33,50 +33,31 @@ ToolCallRequestEvent, ) from autogen_core import CancellationToken, FunctionCall +from autogen_core.models._model_client import ChatCompletionClient from autogen_core.models._types import FunctionExecutionResult from autogen_core.tools import FunctionTool, Tool -_has_openai_dependencies: bool = True -try: - import aiofiles - - from openai import NOT_GIVEN - from openai.resources.beta.threads import AsyncMessages, AsyncRuns, AsyncThreads - from openai.types.beta.code_interpreter_tool_param import CodeInterpreterToolParam - from openai.types.beta.file_search_tool_param import FileSearchToolParam - from openai.types.beta.function_tool_param import FunctionToolParam - from openai.types.shared_params.function_definition import FunctionDefinition -except ImportError: - _has_openai_dependencies = False - -if TYPE_CHECKING: - import aiofiles - - from openai import NOT_GIVEN, AsyncClient, NotGiven - from openai.pagination import AsyncCursorPage - from openai.resources.beta.threads import AsyncMessages, AsyncRuns, AsyncThreads - from openai.types import FileObject - from openai.types.beta import thread_update_params - from openai.types.beta.assistant import Assistant - from openai.types.beta.assistant_response_format_option_param import AssistantResponseFormatOptionParam - from openai.types.beta.assistant_tool_param import AssistantToolParam - from openai.types.beta.code_interpreter_tool_param import CodeInterpreterToolParam - from openai.types.beta.file_search_tool_param import FileSearchToolParam - from openai.types.beta.function_tool_param import FunctionToolParam - from openai.types.beta.thread import Thread, ToolResources, ToolResourcesCodeInterpreter - from openai.types.beta.threads import Message, MessageDeleted, Run - from openai.types.beta.vector_store import VectorStore - from openai.types.shared_params.function_definition import FunctionDefinition +from openai import NOT_GIVEN, AsyncClient, NotGiven +from openai.pagination import AsyncCursorPage +from openai.resources.beta.threads import AsyncMessages, AsyncRuns, AsyncThreads +from openai.types import FileObject +from openai.types.beta import thread_update_params +from openai.types.beta.assistant import Assistant +from openai.types.beta.assistant_response_format_option_param import AssistantResponseFormatOptionParam +from openai.types.beta.assistant_tool_param import AssistantToolParam +from openai.types.beta.code_interpreter_tool_param import CodeInterpreterToolParam +from openai.types.beta.file_search_tool_param import FileSearchToolParam +from openai.types.beta.function_tool_param import FunctionToolParam +from openai.types.beta.thread import Thread, ToolResources, ToolResourcesCodeInterpreter +from openai.types.beta.threads import Message, MessageDeleted, Run +from openai.types.beta.vector_store import VectorStore +from openai.types.shared_params.function_definition import FunctionDefinition event_logger = logging.getLogger(EVENT_LOGGER_NAME) def _convert_tool_to_function_param(tool: Tool) -> "FunctionToolParam": """Convert an autogen Tool to an OpenAI Assistant function tool parameter.""" - if not _has_openai_dependencies: - raise RuntimeError( - "Missing dependecies for OpenAIAssistantAgent. Please ensure the autogen-ext package was installed with the 'openai' extra." - ) schema = tool.schema parameters: Dict[str, object] = {} @@ -158,10 +139,12 @@ async def example(): await assistant.on_upload_for_code_interpreter("data.csv", cancellation_token) # Get response from the assistant - _response = await assistant.on_messages( + response = await assistant.on_messages( [TextMessage(source="user", content="Analyze the data in data.csv")], cancellation_token ) + print(response) + # Clean up resources await assistant.delete_uploaded_files(cancellation_token) await assistant.delete_assistant(cancellation_token) @@ -207,9 +190,9 @@ def __init__( tool_resources: Optional["ToolResources"] = None, top_p: Optional[float] = None, ) -> None: - if not _has_openai_dependencies: - raise RuntimeError( - "Missing dependecies for OpenAIAssistantAgent. Please ensure the autogen-ext package was installed with the 'openai' extra." + if isinstance(client, ChatCompletionClient): + raise ValueError( + "Incorrect client passed to OpenAIAssistantAgent. Please use an OpenAI AsyncClient instance instead of an AutoGen ChatCompletionClient instance." ) super().__init__(name, description) @@ -510,6 +493,8 @@ async def on_reset(self, cancellation_token: CancellationToken) -> None: async def _upload_files(self, file_paths: str | Iterable[str], cancellation_token: CancellationToken) -> List[str]: """Upload files and return their IDs.""" + await self._ensure_initialized() + if isinstance(file_paths, str): file_paths = [file_paths] @@ -531,6 +516,8 @@ async def on_upload_for_code_interpreter( self, file_paths: str | Iterable[str], cancellation_token: CancellationToken ) -> None: """Handle file uploads for the code interpreter.""" + await self._ensure_initialized() + file_ids = await self._upload_files(file_paths, cancellation_token) # Update thread with the new files @@ -596,6 +583,7 @@ async def on_upload_for_file_search( async def delete_uploaded_files(self, cancellation_token: CancellationToken) -> None: """Delete all files that were uploaded by this agent instance.""" + await self._ensure_initialized() for file_id in self._uploaded_file_ids: try: await cancellation_token.link_future(asyncio.ensure_future(self._client.files.delete(file_id=file_id))) @@ -605,6 +593,7 @@ async def delete_uploaded_files(self, cancellation_token: CancellationToken) -> async def delete_assistant(self, cancellation_token: CancellationToken) -> None: """Delete the assistant if it was created by this instance.""" + await self._ensure_initialized() if self._assistant is not None and not self._assistant_id: try: await cancellation_token.link_future( @@ -616,6 +605,7 @@ async def delete_assistant(self, cancellation_token: CancellationToken) -> None: async def delete_vector_store(self, cancellation_token: CancellationToken) -> None: """Delete the vector store if it was created by this instance.""" + await self._ensure_initialized() if self._vector_store_id is not None: try: await cancellation_token.link_future( From 99e2e39281084ec96eccfe4ddd6b39bbfbc81e7d Mon Sep 17 00:00:00 2001 From: SeryioGonzalez Date: Thu, 9 Jan 2025 21:28:50 +0100 Subject: [PATCH 43/61] Update swarm.ipynb (#4958) Small typo Co-authored-by: Jack Gerrits --- .../docs/src/user-guide/agentchat-user-guide/swarm.ipynb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/swarm.ipynb b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/swarm.ipynb index 99bb02710ffe..dfc08fa9e50c 100644 --- a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/swarm.ipynb +++ b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/swarm.ipynb @@ -28,7 +28,7 @@ "where agents take turn to generate a response. \n", "Similar to {py:class}`~autogen_agentchat.teams.SelectorGroupChat`\n", "and {py:class}`~autogen_agentchat.teams.RoundRobinGroupChat`, participant agents\n", - "broadcast their responses so all agents share the same mesasge context.\n", + "broadcast their responses so all agents share the same message context.\n", "\n", "Different from the other two group chat teams, at each turn,\n", "**the speaker agent is selected based on the most recent\n", From 7c31ee057394629a8b71d8913d5bd681d8e678fd Mon Sep 17 00:00:00 2001 From: SeryioGonzalez Date: Thu, 9 Jan 2025 21:29:13 +0100 Subject: [PATCH 44/61] Update swarm.ipynb (#4959) Small typo in docs Co-authored-by: Jack Gerrits --- .../docs/src/user-guide/agentchat-user-guide/swarm.ipynb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/swarm.ipynb b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/swarm.ipynb index dfc08fa9e50c..1cd48486abee 100644 --- a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/swarm.ipynb +++ b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/swarm.ipynb @@ -83,7 +83,7 @@ " - For information needed from the customer, either agent can hand off to the `\"user\"`.\n", "3. The **Flights Refunder** processes refunds using the `refund_flight` tool when appropriate.\n", "4. If an agent hands off to the `\"user\"`, the team execution will stop and wait for the user to input a response.\n", - "5. When the user provides input, it's sent back to the team as a {py:class}`~autogen_agentchat.messages.HandaffMessage`. This message is directed to the agent that originally requested user input.\n", + "5. When the user provides input, it's sent back to the team as a {py:class}`~autogen_agentchat.messages.HandoffMessage`. This message is directed to the agent that originally requested user input.\n", "6. The process continues until the Travel Agent determines the task is complete and terminates the workflow." ] }, From c4302eecab6c34fb14e301fa2579d21aeedabf34 Mon Sep 17 00:00:00 2001 From: Jack Gerrits Date: Thu, 9 Jan 2025 15:29:36 -0500 Subject: [PATCH 45/61] Fixes for azure-container-code-executor.ipynb (#4970) Fixes for azure-container-code-executor.ipyn --- .../azure-container-code-executor.ipynb | 38 +++++++++++++------ 1 file changed, 26 insertions(+), 12 deletions(-) diff --git a/python/packages/autogen-core/docs/src/user-guide/extensions-user-guide/azure-container-code-executor.ipynb b/python/packages/autogen-core/docs/src/user-guide/extensions-user-guide/azure-container-code-executor.ipynb index 7bc7ef4da275..c71ee58e9118 100644 --- a/python/packages/autogen-core/docs/src/user-guide/extensions-user-guide/azure-container-code-executor.ipynb +++ b/python/packages/autogen-core/docs/src/user-guide/extensions-user-guide/azure-container-code-executor.ipynb @@ -18,9 +18,9 @@ "\n", "Alternatively, you can use the [Azure CLI to create a session pool.](https://learn.microsoft.com/en-us/azure/container-apps/sessions-code-interpreter#create-a-session-pool-with-azure-cli)\n", "\n", - "## AzureContainerCodeExecutor\n", + "## ACADynamicSessionsCodeExecutor\n", "\n", - "The {py:class}`~autogen_ext.code_executor.aca_dynamic_sessions.AzureContainerCodeExecutor` class is a python code executor that creates and executes arbitrary python code on a default Serverless code interpreter session. Its interface is as follows\n", + "The {py:class}`~autogen_ext.code_executors.azure.ACADynamicSessionsCodeExecutor` class is a python code executor that creates and executes arbitrary python code on a default Serverless code interpreter session. Its interface is as follows\n", "\n", "### Initialization\n", "\n", @@ -53,7 +53,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 2, "metadata": {}, "outputs": [], "source": [ @@ -62,8 +62,8 @@ "\n", "from anyio import open_file\n", "from autogen_core import CancellationToken\n", - "from autogen_core.components.code_executor import CodeBlock\n", - "from autogen_ext.code_executor.aca_dynamic_sessions import AzureContainerCodeExecutor\n", + "from autogen_core.code_executor import CodeBlock\n", + "from autogen_ext.code_executors.azure import ACADynamicSessionsCodeExecutor\n", "from azure.identity import DefaultAzureCredential" ] }, @@ -84,7 +84,7 @@ "POOL_MANAGEMENT_ENDPOINT = \"...\"\n", "\n", "with tempfile.TemporaryDirectory() as temp_dir:\n", - " executor = AzureContainerCodeExecutor(\n", + " executor = ACADynamicSessionsCodeExecutor(\n", " pool_management_endpoint=POOL_MANAGEMENT_ENDPOINT, credential=DefaultAzureCredential(), work_dir=temp_dir\n", " )\n", "\n", @@ -120,7 +120,7 @@ " assert os.path.isfile(os.path.join(temp_dir, test_file_1))\n", " assert os.path.isfile(os.path.join(temp_dir, test_file_2))\n", "\n", - " executor = AzureContainerCodeExecutor(\n", + " executor = ACADynamicSessionsCodeExecutor(\n", " pool_management_endpoint=POOL_MANAGEMENT_ENDPOINT, credential=DefaultAzureCredential(), work_dir=temp_dir\n", " )\n", " await executor.upload_files([test_file_1, test_file_2], cancellation_token)\n", @@ -168,7 +168,7 @@ " assert not os.path.isfile(os.path.join(temp_dir, test_file_1))\n", " assert not os.path.isfile(os.path.join(temp_dir, test_file_2))\n", "\n", - " executor = AzureContainerCodeExecutor(\n", + " executor = ACADynamicSessionsCodeExecutor(\n", " pool_management_endpoint=POOL_MANAGEMENT_ENDPOINT, credential=DefaultAzureCredential(), work_dir=temp_dir\n", " )\n", "\n", @@ -208,7 +208,7 @@ "source": [ "### New Sessions\n", "\n", - "Every instance of the {py:class}`~autogen_ext.code_executors.azure.AzureContainerCodeExecutor` class will have a unique session ID. Every call to a particular code executor will be executed on the same session until the {py:meth}`~autogen_ext.code_executors.azure.AzureContainerCodeExecutor.restart` function is called on it. Previous sessions cannot be reused.\n", + "Every instance of the {py:class}`~autogen_ext.code_executors.azure.ACADynamicSessionsCodeExecutor` class will have a unique session ID. Every call to a particular code executor will be executed on the same session until the {py:meth}`~autogen_ext.code_executors.azure.ACADynamicSessionsCodeExecutor.restart` function is called on it. Previous sessions cannot be reused.\n", "\n", "Here we'll run some code on the code session, restart it, then verify that a new session has been opened." ] @@ -219,7 +219,7 @@ "metadata": {}, "outputs": [], "source": [ - "executor = AzureContainerCodeExecutor(\n", + "executor = ACADynamicSessionsCodeExecutor(\n", " pool_management_endpoint=POOL_MANAGEMENT_ENDPOINT, credential=DefaultAzureCredential()\n", ")\n", "\n", @@ -243,7 +243,7 @@ "source": [ "### Available Packages\n", "\n", - "Each code execution instance is pre-installed with most of the commonly used packages. However, the list of available packages and versions are not available outside of the execution environment. The packages list on the environment can be retrieved by calling the {py:meth}`~autogen_ext.code_executors.azure.AzureContainerCodeExecutor.get_available_packages` function on the code executor." + "Each code execution instance is pre-installed with most of the commonly used packages. However, the list of available packages and versions are not available outside of the execution environment. The packages list on the environment can be retrieved by calling the {py:meth}`~autogen_ext.code_executors.azure.ACADynamicSessionsCodeExecutor.get_available_packages` function on the code executor." ] }, { @@ -257,8 +257,22 @@ } ], "metadata": { + "kernelspec": { + "display_name": ".venv", + "language": "python", + "name": "python3" + }, "language_info": { - "name": "python" + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.5" } }, "nbformat": 4, From c2721ff65b745da03a0aa92f7e93eea028de17d7 Mon Sep 17 00:00:00 2001 From: Jack Gerrits Date: Thu, 9 Jan 2025 15:29:54 -0500 Subject: [PATCH 46/61] Update all versions to 0.4.0 (#4941) * Update all versions to 0.4.0 * update redirect * install with upgrade for agentchat --- README.md | 4 ++-- python/README.md | 2 +- python/packages/autogen-agentchat/pyproject.toml | 4 ++-- .../src/_templates/sidebar-nav-bs-agentchat.html | 2 +- .../docs/src/_templates/sidebar-nav-bs-core.html | 2 +- .../src/_templates/sidebar-nav-bs-extensions.html | 2 +- python/packages/autogen-core/docs/src/index.md | 6 +++--- .../user-guide/agentchat-user-guide/installation.md | 4 ++-- .../user-guide/agentchat-user-guide/magentic-one.md | 2 +- .../user-guide/agentchat-user-guide/quickstart.ipynb | 2 +- .../agentchat-user-guide/tutorial/models.ipynb | 4 ++-- .../framework/distributed-agent-runtime.ipynb | 4 ++-- .../src/user-guide/core-user-guide/installation.md | 2 +- .../user-guide/extensions-user-guide/installation.md | 2 +- python/packages/autogen-core/pyproject.toml | 4 ++-- python/packages/autogen-ext/pyproject.toml | 12 ++++++------ .../autogen_ext/agents/file_surfer/_file_surfer.py | 2 +- .../agents/openai/_openai_assistant_agent.py | 2 +- .../autogen_ext/agents/video_surfer/_video_surfer.py | 2 +- .../agents/web_surfer/_multimodal_web_surfer.py | 2 +- .../azure/_azure_container_code_executor.py | 2 +- .../code_executors/docker/_docker_code_executor.py | 2 +- .../src/autogen_ext/models/openai/__init__.py | 4 ++-- .../src/autogen_ext/models/openai/_openai_client.py | 4 ++-- .../src/autogen_ext/teams/magentic_one.py | 2 +- .../tools/code_execution/_code_execution.py | 2 +- .../packages/autogen-studio/autogenstudio/version.py | 2 +- python/packages/autogen-studio/pyproject.toml | 6 +++--- python/samples/agentchat_chainlit/requirements.txt | 2 +- .../samples/core_async_human_in_the_loop/README.md | 2 +- python/samples/core_chess_game/README.md | 2 +- python/uv.lock | 8 ++++---- 32 files changed, 52 insertions(+), 52 deletions(-) diff --git a/README.md b/README.md index f2911f4950ec..807f13077b29 100644 --- a/README.md +++ b/README.md @@ -17,7 +17,7 @@ ```bash # Install AgentChat and OpenAI client from Extensions -pip install "autogen-agentchat" "autogen-ext[openai]" +pip install -U "autogen-agentchat" "autogen-ext[openai]" ``` The current stable version is v0.4. If you are upgrading from AutoGen v0.2, please refer to the [Migration Guide](https://microsoft.github.io/autogen/dev/user-guide/agentchat-user-guide/migration-guide.html) for detailed instructions on how to update your code and configurations. @@ -44,7 +44,7 @@ Create a group chat team with an assistant agent, a web surfer agent, and a user for web browsing tasks. You need to install [playwright](https://playwright.dev/python/docs/library). ```python -# pip install autogen-agentchat autogen-ext[openai,web-surfer] +# pip install -U autogen-agentchat autogen-ext[openai,web-surfer] # playwright install import asyncio from autogen_agentchat.agents import AssistantAgent, UserProxyAgent diff --git a/python/README.md b/python/README.md index 53859a0132b7..4c8b53fdadbc 100644 --- a/python/README.md +++ b/python/README.md @@ -1,7 +1,7 @@ # AutoGen Python packages [![0.4 Docs](https://img.shields.io/badge/Docs-0.4-blue)](https://microsoft.github.io/autogen/dev/) -[![PyPi autogen-core](https://img.shields.io/badge/PyPi-autogen--core-blue?logo=pypi)](https://pypi.org/project/autogen-core/0.4.0.dev13/) [![PyPi autogen-agentchat](https://img.shields.io/badge/PyPi-autogen--agentchat-blue?logo=pypi)](https://pypi.org/project/autogen-agentchat/0.4.0.dev13/) [![PyPi autogen-ext](https://img.shields.io/badge/PyPi-autogen--ext-blue?logo=pypi)](https://pypi.org/project/autogen-ext/0.4.0.dev13/) +[![PyPi autogen-core](https://img.shields.io/badge/PyPi-autogen--core-blue?logo=pypi)](https://pypi.org/project/autogen-core/) [![PyPi autogen-agentchat](https://img.shields.io/badge/PyPi-autogen--agentchat-blue?logo=pypi)](https://pypi.org/project/autogen-agentchat/) [![PyPi autogen-ext](https://img.shields.io/badge/PyPi-autogen--ext-blue?logo=pypi)](https://pypi.org/project/autogen-ext/) This directory works as a single `uv` workspace containing all project packages. See [`packages`](./packages/) to discover all project packages. diff --git a/python/packages/autogen-agentchat/pyproject.toml b/python/packages/autogen-agentchat/pyproject.toml index b8b694d9651d..2aad2d35b1b7 100644 --- a/python/packages/autogen-agentchat/pyproject.toml +++ b/python/packages/autogen-agentchat/pyproject.toml @@ -4,7 +4,7 @@ build-backend = "hatchling.build" [project] name = "autogen-agentchat" -version = "0.4.0.dev13" +version = "0.4.0" license = {file = "LICENSE-CODE"} description = "AutoGen agents and teams library" readme = "README.md" @@ -15,7 +15,7 @@ classifiers = [ "Operating System :: OS Independent", ] dependencies = [ - "autogen-core==0.4.0.dev13", + "autogen-core==0.4.0", "aioconsole>=0.8.1" ] diff --git a/python/packages/autogen-core/docs/src/_templates/sidebar-nav-bs-agentchat.html b/python/packages/autogen-core/docs/src/_templates/sidebar-nav-bs-agentchat.html index 3351908bfd67..afbaff852b8f 100644 --- a/python/packages/autogen-core/docs/src/_templates/sidebar-nav-bs-agentchat.html +++ b/python/packages/autogen-core/docs/src/_templates/sidebar-nav-bs-agentchat.html @@ -20,7 +20,7 @@
  • + href="https://pypi.org/project/autogen-agentchat/"> PyPi diff --git a/python/packages/autogen-core/docs/src/_templates/sidebar-nav-bs-core.html b/python/packages/autogen-core/docs/src/_templates/sidebar-nav-bs-core.html index d5b18efd3312..d6288526d322 100644 --- a/python/packages/autogen-core/docs/src/_templates/sidebar-nav-bs-core.html +++ b/python/packages/autogen-core/docs/src/_templates/sidebar-nav-bs-core.html @@ -19,7 +19,7 @@
  • - + PyPi diff --git a/python/packages/autogen-core/docs/src/_templates/sidebar-nav-bs-extensions.html b/python/packages/autogen-core/docs/src/_templates/sidebar-nav-bs-extensions.html index 9ae28dd1a109..b2c27fdfc811 100644 --- a/python/packages/autogen-core/docs/src/_templates/sidebar-nav-bs-extensions.html +++ b/python/packages/autogen-core/docs/src/_templates/sidebar-nav-bs-extensions.html @@ -20,7 +20,7 @@
  • - + PyPi diff --git a/python/packages/autogen-core/docs/src/index.md b/python/packages/autogen-core/docs/src/index.md index 4c3b3b222ae6..e62b398dce58 100644 --- a/python/packages/autogen-core/docs/src/index.md +++ b/python/packages/autogen-core/docs/src/index.md @@ -105,7 +105,7 @@ Get Started
    {fas}`people-group;pst-color-primary` AgentChat -[![PyPi autogen-agentchat](https://img.shields.io/badge/PyPi-autogen--agentchat-blue?logo=pypi)](https://pypi.org/project/autogen-agentchat/0.4.0.dev13/) +[![PyPi autogen-agentchat](https://img.shields.io/badge/PyPi-autogen--agentchat-blue?logo=pypi)](https://pypi.org/project/autogen-agentchat/)
    A programming framework for building conversational single and multi-agent applications. @@ -136,7 +136,7 @@ Get Started ::: -:::{grid-item-card} {fas}`cube;pst-color-primary` Core [![PyPi autogen-core](https://img.shields.io/badge/PyPi-autogen--core-blue?logo=pypi)](https://pypi.org/project/autogen-core/0.4.0.dev13/) +:::{grid-item-card} {fas}`cube;pst-color-primary` Core [![PyPi autogen-core](https://img.shields.io/badge/PyPi-autogen--core-blue?logo=pypi)](https://pypi.org/project/autogen-core/) :shadow: none :margin: 2 0 0 0 :columns: 12 12 12 12 @@ -159,7 +159,7 @@ Get Started ::: -:::{grid-item-card} {fas}`puzzle-piece;pst-color-primary` Extensions [![PyPi autogen-ext](https://img.shields.io/badge/PyPi-autogen--ext-blue?logo=pypi)](https://pypi.org/project/autogen-ext/0.4.0.dev13/) +:::{grid-item-card} {fas}`puzzle-piece;pst-color-primary` Extensions [![PyPi autogen-ext](https://img.shields.io/badge/PyPi-autogen--ext-blue?logo=pypi)](https://pypi.org/project/autogen-ext/) :shadow: none :margin: 2 0 0 0 :columns: 12 12 12 12 diff --git a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/installation.md b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/installation.md index b55fcccd54ca..e4e49591df95 100644 --- a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/installation.md +++ b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/installation.md @@ -61,7 +61,7 @@ Install the `autogen-agentchat` package using pip: ```bash -pip install "autogen-agentchat==0.4.0.dev13" +pip install -U "autogen-agentchat" ``` ```{note} @@ -74,7 +74,7 @@ To use the OpenAI and Azure OpenAI models, you need to install the following extensions: ```bash -pip install "autogen-ext[openai]==0.4.0.dev13" +pip install "autogen-ext[openai]" ``` If you are using Azure OpenAI with AAD authentication, you need to install the following: diff --git a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/magentic-one.md b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/magentic-one.md index 48c7afc0b646..556952605618 100644 --- a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/magentic-one.md +++ b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/magentic-one.md @@ -41,7 +41,7 @@ Be aware that agents may occasionally attempt risky actions, such as recruiting Install the required packages: ```bash -pip install autogen-agentchat==0.4.0.dev13 autogen-ext[magentic-one,openai]==0.4.0.dev13 +pip install autogen-agentchat autogen-ext[magentic-one,openai] # If using the MultimodalWebSurfer, you also need to install playwright dependencies: playwright install --with-deps chromium diff --git a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/quickstart.ipynb b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/quickstart.ipynb index 94dbb7528d64..06e265b1429d 100644 --- a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/quickstart.ipynb +++ b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/quickstart.ipynb @@ -29,7 +29,7 @@ }, "outputs": [], "source": [ - "pip install \"autogen-agentchat==0.4.0.dev13\" \"autogen-ext[openai,azure]==0.4.0.dev13\"" + "pip install -U \"autogen-agentchat\" \"autogen-ext[openai,azure]\"" ] }, { diff --git a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/models.ipynb b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/models.ipynb index ce1c21077d43..d7aed4fc5cda 100644 --- a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/models.ipynb +++ b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/models.ipynb @@ -28,7 +28,7 @@ }, "outputs": [], "source": [ - "pip install \"autogen-ext[openai]==0.4.0.dev13\"" + "pip install \"autogen-ext[openai]\"" ] }, { @@ -108,7 +108,7 @@ }, "outputs": [], "source": [ - "pip install \"autogen-ext[openai,azure]==0.4.0.dev13\"" + "pip install \"autogen-ext[openai,azure]\"" ] }, { diff --git a/python/packages/autogen-core/docs/src/user-guide/core-user-guide/framework/distributed-agent-runtime.ipynb b/python/packages/autogen-core/docs/src/user-guide/core-user-guide/framework/distributed-agent-runtime.ipynb index 96a80a2f08cb..c67c998c0a65 100644 --- a/python/packages/autogen-core/docs/src/user-guide/core-user-guide/framework/distributed-agent-runtime.ipynb +++ b/python/packages/autogen-core/docs/src/user-guide/core-user-guide/framework/distributed-agent-runtime.ipynb @@ -24,7 +24,7 @@ "````{note}\n", "The distributed agent runtime requires extra dependencies, install them using:\n", "```bash\n", - "pip install \"autogen-ext[grpc]==0.4.0.dev13\"\n", + "pip install \"autogen-ext[grpc]\"\n", "```\n", "````\n", "\n", @@ -222,4 +222,4 @@ }, "nbformat": 4, "nbformat_minor": 2 -} \ No newline at end of file +} diff --git a/python/packages/autogen-core/docs/src/user-guide/core-user-guide/installation.md b/python/packages/autogen-core/docs/src/user-guide/core-user-guide/installation.md index 8b7d0dc2dfeb..3fd181c7feeb 100644 --- a/python/packages/autogen-core/docs/src/user-guide/core-user-guide/installation.md +++ b/python/packages/autogen-core/docs/src/user-guide/core-user-guide/installation.md @@ -54,7 +54,7 @@ Install the `autogen-core` package using pip: ```bash -pip install "autogen-core==0.4.0.dev13" +pip install "autogen-core" ``` ```{note} diff --git a/python/packages/autogen-core/docs/src/user-guide/extensions-user-guide/installation.md b/python/packages/autogen-core/docs/src/user-guide/extensions-user-guide/installation.md index bbec3120d6dd..7a59605b19ec 100644 --- a/python/packages/autogen-core/docs/src/user-guide/extensions-user-guide/installation.md +++ b/python/packages/autogen-core/docs/src/user-guide/extensions-user-guide/installation.md @@ -10,7 +10,7 @@ myst: First-part maintained extensions are available in the `autogen-ext` package. ```sh -pip install "autogen-ext==0.4.0.dev13" +pip install "autogen-ext" ``` Extras: diff --git a/python/packages/autogen-core/pyproject.toml b/python/packages/autogen-core/pyproject.toml index 7f06fc66d02a..4d6aa4ba6410 100644 --- a/python/packages/autogen-core/pyproject.toml +++ b/python/packages/autogen-core/pyproject.toml @@ -4,7 +4,7 @@ build-backend = "hatchling.build" [project] name = "autogen-core" -version = "0.4.0.dev13" +version = "0.4.0" license = {file = "LICENSE-CODE"} description = "Foundational interfaces and agent runtime implementation for AutoGen" readme = "README.md" @@ -69,7 +69,7 @@ dev = [ "pygments", "sphinxext-rediraffe", - "autogen_ext==0.4.0.dev13", + "autogen_ext==0.4.0", # Documentation tooling "sphinx-autobuild", diff --git a/python/packages/autogen-ext/pyproject.toml b/python/packages/autogen-ext/pyproject.toml index 8b972e98b8b0..a2ace335f172 100644 --- a/python/packages/autogen-ext/pyproject.toml +++ b/python/packages/autogen-ext/pyproject.toml @@ -4,7 +4,7 @@ build-backend = "hatchling.build" [project] name = "autogen-ext" -version = "0.4.0.dev13" +version = "0.4.0" license = {file = "LICENSE-CODE"} description = "AutoGen extensions library" readme = "README.md" @@ -15,7 +15,7 @@ classifiers = [ "Operating System :: OS Independent", ] dependencies = [ - "autogen-core==0.4.0.dev13", + "autogen-core==0.4.0", ] [project.optional-dependencies] @@ -24,23 +24,23 @@ azure = ["azure-core", "azure-identity"] docker = ["docker~=7.0"] openai = ["openai>=1.52.2", "tiktoken>=0.8.0", "aiofiles"] file-surfer = [ - "autogen-agentchat==0.4.0.dev13", + "autogen-agentchat==0.4.0", "markitdown>=0.0.1a2", ] web-surfer = [ - "autogen-agentchat==0.4.0.dev13", + "autogen-agentchat==0.4.0", "playwright>=1.48.0", "pillow>=11.0.0", "markitdown>=0.0.1a2", ] magentic-one = [ - "autogen-agentchat==0.4.0.dev13", + "autogen-agentchat==0.4.0", "markitdown>=0.0.1a2", "playwright>=1.48.0", "pillow>=11.0.0", ] video-surfer = [ - "autogen-agentchat==0.4.0.dev13", + "autogen-agentchat==0.4.0", "opencv-python>=4.5", "ffmpeg-python", "openai-whisper", diff --git a/python/packages/autogen-ext/src/autogen_ext/agents/file_surfer/_file_surfer.py b/python/packages/autogen-ext/src/autogen_ext/agents/file_surfer/_file_surfer.py index 7297a88e6979..0f389313057a 100644 --- a/python/packages/autogen-ext/src/autogen_ext/agents/file_surfer/_file_surfer.py +++ b/python/packages/autogen-ext/src/autogen_ext/agents/file_surfer/_file_surfer.py @@ -37,7 +37,7 @@ class FileSurfer(BaseChatAgent): .. code-block:: bash - pip install "autogen-ext[file-surfer]==0.4.0.dev13" + pip install "autogen-ext[file-surfer]" Args: name (str): The agent's name diff --git a/python/packages/autogen-ext/src/autogen_ext/agents/openai/_openai_assistant_agent.py b/python/packages/autogen-ext/src/autogen_ext/agents/openai/_openai_assistant_agent.py index 496b68fd3e0b..e4d359cf3eff 100644 --- a/python/packages/autogen-ext/src/autogen_ext/agents/openai/_openai_assistant_agent.py +++ b/python/packages/autogen-ext/src/autogen_ext/agents/openai/_openai_assistant_agent.py @@ -84,7 +84,7 @@ class OpenAIAssistantAgent(BaseChatAgent): .. code-block:: bash - pip install "autogen-ext[openai]==0.4.0.dev13" + pip install "autogen-ext[openai]" This agent leverages the OpenAI Assistant API to create AI assistants with capabilities like: diff --git a/python/packages/autogen-ext/src/autogen_ext/agents/video_surfer/_video_surfer.py b/python/packages/autogen-ext/src/autogen_ext/agents/video_surfer/_video_surfer.py index 43be974b1ea4..34887779310c 100644 --- a/python/packages/autogen-ext/src/autogen_ext/agents/video_surfer/_video_surfer.py +++ b/python/packages/autogen-ext/src/autogen_ext/agents/video_surfer/_video_surfer.py @@ -22,7 +22,7 @@ class VideoSurfer(AssistantAgent): .. code-block:: bash - pip install "autogen-ext[video-surfer]==0.4.0.dev13" + pip install "autogen-ext[video-surfer]" This agent utilizes various tools to extract information from the video, such as its length, screenshots at specific timestamps, and audio transcriptions. It processes these elements to provide detailed answers to user queries. diff --git a/python/packages/autogen-ext/src/autogen_ext/agents/web_surfer/_multimodal_web_surfer.py b/python/packages/autogen-ext/src/autogen_ext/agents/web_surfer/_multimodal_web_surfer.py index c6bcf2c85d7b..d266a2086529 100644 --- a/python/packages/autogen-ext/src/autogen_ext/agents/web_surfer/_multimodal_web_surfer.py +++ b/python/packages/autogen-ext/src/autogen_ext/agents/web_surfer/_multimodal_web_surfer.py @@ -66,7 +66,7 @@ class MultimodalWebSurfer(BaseChatAgent): .. code-block:: bash - pip install "autogen-ext[web-surfer]==0.4.0.dev13" + pip install "autogen-ext[web-surfer]" It launches a chromium browser and allows the playwright to interact with the web browser and can perform a variety of actions. The browser is launched on the first call to the agent and is reused for subsequent calls. diff --git a/python/packages/autogen-ext/src/autogen_ext/code_executors/azure/_azure_container_code_executor.py b/python/packages/autogen-ext/src/autogen_ext/code_executors/azure/_azure_container_code_executor.py index c877d91f69e9..3971d93665c1 100644 --- a/python/packages/autogen-ext/src/autogen_ext/code_executors/azure/_azure_container_code_executor.py +++ b/python/packages/autogen-ext/src/autogen_ext/code_executors/azure/_azure_container_code_executor.py @@ -51,7 +51,7 @@ class ACADynamicSessionsCodeExecutor(CodeExecutor): .. code-block:: bash - pip install "autogen-ext[azure]==0.4.0.dev13" + pip install "autogen-ext[azure]" .. caution:: diff --git a/python/packages/autogen-ext/src/autogen_ext/code_executors/docker/_docker_code_executor.py b/python/packages/autogen-ext/src/autogen_ext/code_executors/docker/_docker_code_executor.py index d608d389d1d9..05924e186643 100644 --- a/python/packages/autogen-ext/src/autogen_ext/code_executors/docker/_docker_code_executor.py +++ b/python/packages/autogen-ext/src/autogen_ext/code_executors/docker/_docker_code_executor.py @@ -59,7 +59,7 @@ class DockerCommandLineCodeExecutor(CodeExecutor): .. code-block:: bash - pip install "autogen-ext[docker]==0.4.0.dev13" + pip install "autogen-ext[docker]" The executor first saves each code block in a file in the working diff --git a/python/packages/autogen-ext/src/autogen_ext/models/openai/__init__.py b/python/packages/autogen-ext/src/autogen_ext/models/openai/__init__.py index dbe2eb65e045..366ad831175e 100644 --- a/python/packages/autogen-ext/src/autogen_ext/models/openai/__init__.py +++ b/python/packages/autogen-ext/src/autogen_ext/models/openai/__init__.py @@ -1,9 +1,9 @@ -from ._openai_client import AzureOpenAIChatCompletionClient, OpenAIChatCompletionClient, BaseOpenAIChatCompletionClient +from ._openai_client import AzureOpenAIChatCompletionClient, BaseOpenAIChatCompletionClient, OpenAIChatCompletionClient from .config import ( AzureOpenAIClientConfigurationConfigModel, - OpenAIClientConfigurationConfigModel, BaseOpenAIClientConfigurationConfigModel, CreateArgumentsConfigModel, + OpenAIClientConfigurationConfigModel, ) __all__ = [ diff --git a/python/packages/autogen-ext/src/autogen_ext/models/openai/_openai_client.py b/python/packages/autogen-ext/src/autogen_ext/models/openai/_openai_client.py index 0a811dacce83..5b9f51129a88 100644 --- a/python/packages/autogen-ext/src/autogen_ext/models/openai/_openai_client.py +++ b/python/packages/autogen-ext/src/autogen_ext/models/openai/_openai_client.py @@ -931,7 +931,7 @@ class OpenAIChatCompletionClient(BaseOpenAIChatCompletionClient, Component[OpenA .. code-block:: bash - pip install "autogen-ext[openai]==0.4.0.dev13" + pip install "autogen-ext[openai]" The following code snippet shows how to use the client with an OpenAI model: @@ -1062,7 +1062,7 @@ class AzureOpenAIChatCompletionClient( .. code-block:: bash - pip install "autogen-ext[openai,azure]==0.4.0.dev13" + pip install "autogen-ext[openai,azure]" To use the client, you need to provide your deployment id, Azure Cognitive Services endpoint, api version, and model capabilities. diff --git a/python/packages/autogen-ext/src/autogen_ext/teams/magentic_one.py b/python/packages/autogen-ext/src/autogen_ext/teams/magentic_one.py index fb59332be213..23aca97014c3 100644 --- a/python/packages/autogen-ext/src/autogen_ext/teams/magentic_one.py +++ b/python/packages/autogen-ext/src/autogen_ext/teams/magentic_one.py @@ -23,7 +23,7 @@ class MagenticOne(MagenticOneGroupChat): .. code-block:: bash - pip install "autogen-ext[magentic-one]==0.4.0.dev13" + pip install "autogen-ext[magentic-one]" Args: diff --git a/python/packages/autogen-ext/src/autogen_ext/tools/code_execution/_code_execution.py b/python/packages/autogen-ext/src/autogen_ext/tools/code_execution/_code_execution.py index a0669e5c71fe..3b72940f2445 100644 --- a/python/packages/autogen-ext/src/autogen_ext/tools/code_execution/_code_execution.py +++ b/python/packages/autogen-ext/src/autogen_ext/tools/code_execution/_code_execution.py @@ -30,7 +30,7 @@ class PythonCodeExecutionTool(BaseTool[CodeExecutionInput, CodeExecutionResult]) .. code-block:: bash - pip install "autogen-agentchat==0.4.0.dev13" "autogen-ext[openai]==0.4.0.dev13" "yfinance" "matplotlib" + pip install -U "autogen-agentchat" "autogen-ext[openai]" "yfinance" "matplotlib" .. code-block:: python diff --git a/python/packages/autogen-studio/autogenstudio/version.py b/python/packages/autogen-studio/autogenstudio/version.py index 525ab752dcd4..171811d227da 100644 --- a/python/packages/autogen-studio/autogenstudio/version.py +++ b/python/packages/autogen-studio/autogenstudio/version.py @@ -1,3 +1,3 @@ -VERSION = "0.4.0.dev41" +VERSION = "0.4.0" __version__ = VERSION APP_NAME = "autogenstudio" diff --git a/python/packages/autogen-studio/pyproject.toml b/python/packages/autogen-studio/pyproject.toml index 5ebbc086b7b0..5fa6676198a6 100644 --- a/python/packages/autogen-studio/pyproject.toml +++ b/python/packages/autogen-studio/pyproject.toml @@ -33,9 +33,9 @@ dependencies = [ "alembic", "loguru", "pyyaml", - "autogen-core==0.4.0.dev13", - "autogen-agentchat==0.4.0.dev13", - "autogen-ext[magentic-one]==0.4.0.dev13" + "autogen-core==0.4.0", + "autogen-agentchat==0.4.0", + "autogen-ext[magentic-one]==0.4.0" ] optional-dependencies = {web = ["fastapi", "uvicorn"], database = ["psycopg"]} diff --git a/python/samples/agentchat_chainlit/requirements.txt b/python/samples/agentchat_chainlit/requirements.txt index c7dd4ca40348..db122ba31ce4 100644 --- a/python/samples/agentchat_chainlit/requirements.txt +++ b/python/samples/agentchat_chainlit/requirements.txt @@ -1,2 +1,2 @@ chainlit -autogen-agentchat==0.4.0.dev13 +autogen-agentchat==0.4.0 diff --git a/python/samples/core_async_human_in_the_loop/README.md b/python/samples/core_async_human_in_the_loop/README.md index af8d9e4a1d79..7ed54cc92416 100644 --- a/python/samples/core_async_human_in_the_loop/README.md +++ b/python/samples/core_async_human_in_the_loop/README.md @@ -9,7 +9,7 @@ An example showing human-in-the-loop which waits for human input before making t First, you need a shell with AutoGen core and required dependencies installed. ```bash -pip install "autogen-core==0.4.0.dev13" "autogen-ext[openai,azure]==0.4.0.dev13" +pip install "autogen-core" "autogen-ext[openai,azure]" ``` ### Model Configuration diff --git a/python/samples/core_chess_game/README.md b/python/samples/core_chess_game/README.md index b0f5a9194a2d..a27c5cb99025 100644 --- a/python/samples/core_chess_game/README.md +++ b/python/samples/core_chess_game/README.md @@ -9,7 +9,7 @@ An example with two chess player agents that executes its own tools to demonstra First, you need a shell with AutoGen core and required dependencies installed. ```bash -pip install "autogen-core==0.4.0.dev13" "autogen-ext[openai,azure]==0.4.0.dev13" "chess" +pip install "autogen-core" "autogen-ext[openai,azure]" "chess" ``` ### Model Configuration diff --git a/python/uv.lock b/python/uv.lock index 330f570ef432..435d9d210ebe 100644 --- a/python/uv.lock +++ b/python/uv.lock @@ -347,7 +347,7 @@ wheels = [ [[package]] name = "autogen-agentchat" -version = "0.4.0.dev13" +version = "0.4.0" source = { editable = "packages/autogen-agentchat" } dependencies = [ { name = "aioconsole" }, @@ -362,7 +362,7 @@ requires-dist = [ [[package]] name = "autogen-core" -version = "0.4.0.dev13" +version = "0.4.0" source = { editable = "packages/autogen-core" } dependencies = [ { name = "jsonref" }, @@ -477,7 +477,7 @@ dev = [ [[package]] name = "autogen-ext" -version = "0.4.0.dev13" +version = "0.4.0" source = { editable = "packages/autogen-ext" } dependencies = [ { name = "autogen-core" }, @@ -652,7 +652,7 @@ requires-dist = [ [[package]] name = "autogenstudio" -version = "0.4.0.dev41" +version = "0.4.0" source = { editable = "packages/autogen-studio" } dependencies = [ { name = "aiofiles" }, From 388a402243964de8eddd48a82c406cd9f19b6e25 Mon Sep 17 00:00:00 2001 From: Jack Gerrits Date: Thu, 9 Jan 2025 15:31:46 -0500 Subject: [PATCH 47/61] Update magentic-one-cli dep bounds (#4971) --- python/packages/magentic-one-cli/pyproject.toml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/python/packages/magentic-one-cli/pyproject.toml b/python/packages/magentic-one-cli/pyproject.toml index b5db07ed79e5..036cb33b787d 100644 --- a/python/packages/magentic-one-cli/pyproject.toml +++ b/python/packages/magentic-one-cli/pyproject.toml @@ -15,8 +15,8 @@ classifiers = [ "Operating System :: OS Independent", ] dependencies = [ - "autogen-agentchat", #>=0.4.0<0.5 - "autogen-ext[openai,magentic-one]", #>=0.4.0<0.5 + "autogen-agentchat>=0.4.0<0.5", + "autogen-ext[openai,magentic-one]>=0.4.0<0.5", ] [project.scripts] From 78ac9f8507d9f7c8e3d87b4db67b57e116ade1b7 Mon Sep 17 00:00:00 2001 From: Jack Gerrits Date: Thu, 9 Jan 2025 15:40:19 -0500 Subject: [PATCH 48/61] Fix magentic-one-cli version bound (#4972) --- python/packages/magentic-one-cli/pyproject.toml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/python/packages/magentic-one-cli/pyproject.toml b/python/packages/magentic-one-cli/pyproject.toml index 036cb33b787d..cbbc48867e8c 100644 --- a/python/packages/magentic-one-cli/pyproject.toml +++ b/python/packages/magentic-one-cli/pyproject.toml @@ -15,8 +15,8 @@ classifiers = [ "Operating System :: OS Independent", ] dependencies = [ - "autogen-agentchat>=0.4.0<0.5", - "autogen-ext[openai,magentic-one]>=0.4.0<0.5", + "autogen-agentchat>=0.4.0,<0.5", + "autogen-ext[openai,magentic-one]>=0.4.0,<0.5", ] [project.scripts] From 4dab09cabe78c5360cc31832055d72f78a67877b Mon Sep 17 00:00:00 2001 From: Jack Gerrits Date: Thu, 9 Jan 2025 15:54:36 -0500 Subject: [PATCH 49/61] Update magentic-one-cli version to 0.2.0 (#4973) * Update magentic-one-cli version to 0.2.0 * lock --- python/packages/magentic-one-cli/pyproject.toml | 2 +- python/uv.lock | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/python/packages/magentic-one-cli/pyproject.toml b/python/packages/magentic-one-cli/pyproject.toml index cbbc48867e8c..5b14ed6f73d7 100644 --- a/python/packages/magentic-one-cli/pyproject.toml +++ b/python/packages/magentic-one-cli/pyproject.toml @@ -4,7 +4,7 @@ build-backend = "hatchling.build" [project] name = "magentic-one-cli" -version = "0.1.0" +version = "0.2.0" license = {file = "LICENSE-CODE"} description = "Magentic-One is a generalist multi-agent system, built on `AutoGen-AgentChat`, for solving complex web and file-based tasks. This package installs the `m1` command-line utility to quickly get started with Magentic-One." readme = "README.md" diff --git a/python/uv.lock b/python/uv.lock index 435d9d210ebe..219b5176d2d1 100644 --- a/python/uv.lock +++ b/python/uv.lock @@ -2468,7 +2468,7 @@ wheels = [ [[package]] name = "magentic-one-cli" -version = "0.1.0" +version = "0.2.0" source = { editable = "packages/magentic-one-cli" } dependencies = [ { name = "autogen-agentchat" }, From 623e0185c7f33eb43e073d33bb74c21e59522dad Mon Sep 17 00:00:00 2001 From: Jack Gerrits Date: Thu, 9 Jan 2025 15:58:45 -0500 Subject: [PATCH 50/61] Update switcher versions and make 0.4.0 stable (#4940) * Update switcher versions and make 0.4.0 stable * update versions * update switcher --- .github/workflows/docs.yml | 3 ++- docs/switcher.json | 16 +++++++++++----- 2 files changed, 13 insertions(+), 6 deletions(-) diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml index 0afb1745ba10..40b66a54471c 100644 --- a/.github/workflows/docs.yml +++ b/.github/workflows/docs.yml @@ -33,7 +33,7 @@ jobs: [ # For main use the workflow target { ref: "${{github.ref}}", dest-dir: dev, uv-version: "0.5.13" }, - { ref: "${{github.ref}}", dest-dir: stable, uv-version: "0.5.13" }, + { ref: "v0.4.0", dest-dir: stable, uv-version: "0.5.13" }, { ref: "v0.4.0.dev0", dest-dir: "0.4.0.dev0", uv-version: "0.5.11" }, { ref: "v0.4.0.dev1", dest-dir: "0.4.0.dev1", uv-version: "0.5.11" }, { ref: "v0.4.0.dev2", dest-dir: "0.4.0.dev2", uv-version: "0.5.11" }, @@ -48,6 +48,7 @@ jobs: { ref: "v0.4.0.dev11", dest-dir: "0.4.0.dev11", uv-version: "0.5.11" }, { ref: "v0.4.0.dev12", dest-dir: "0.4.0.dev12", uv-version: "0.5.13" }, { ref: "v0.4.0.dev13", dest-dir: "0.4.0.dev13", uv-version: "0.5.13" }, + { ref: "v0.4.0", dest-dir: "0.4.0", uv-version: "0.5.13" }, ] steps: - name: Checkout diff --git a/docs/switcher.json b/docs/switcher.json index 2e6bcd63f8b9..f90f52ef0847 100644 --- a/docs/switcher.json +++ b/docs/switcher.json @@ -1,13 +1,20 @@ [ { - "name": "0.2 (stable)", - "version": "0.2-stable", - "url": "/autogen/0.2/" + "name": "0.4.0 (stable)", + "version": "stable", + "url": "/autogen/stable/", + "preferred": true }, { + "name": "dev (main)", "version": "dev", "url": "/autogen/dev/" }, + { + "name": "0.2", + "version": "0.2", + "url": "/autogen/0.2/" + }, { "name": "0.4.0.dev0", "version": "0.4.0.dev0", @@ -76,7 +83,6 @@ { "name": "0.4.0.dev13", "version": "0.4.0.dev13", - "url": "/autogen/0.4.0.dev13/", - "preferred": true + "url": "/autogen/0.4.0.dev13/" } ] From 90112e1162e13dadfd6c0a8375cdb9d705d1c984 Mon Sep 17 00:00:00 2001 From: Jack Gerrits Date: Thu, 9 Jan 2025 16:09:14 -0500 Subject: [PATCH 51/61] Update version of preferred docs URL --- docs/switcher.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/switcher.json b/docs/switcher.json index f90f52ef0847..3d94443e6a4e 100644 --- a/docs/switcher.json +++ b/docs/switcher.json @@ -1,7 +1,7 @@ [ { "name": "0.4.0 (stable)", - "version": "stable", + "version": "0.4.0", "url": "/autogen/stable/", "preferred": true }, From 6abc70044ad5ba782099f1bb0912078714b45128 Mon Sep 17 00:00:00 2001 From: Jack Gerrits Date: Thu, 9 Jan 2025 17:06:35 -0500 Subject: [PATCH 52/61] Fix version switcher rendering (#4974) * Fix docs switcher rendering * update tag * use post1 --- .github/workflows/docs.yml | 35 ++++++++++--------- docs/switcher.json | 2 +- python/packages/autogen-core/docs/src/conf.py | 6 +++- 3 files changed, 24 insertions(+), 19 deletions(-) diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml index 40b66a54471c..74595b02e8ee 100644 --- a/.github/workflows/docs.yml +++ b/.github/workflows/docs.yml @@ -32,23 +32,23 @@ jobs: version: [ # For main use the workflow target - { ref: "${{github.ref}}", dest-dir: dev, uv-version: "0.5.13" }, - { ref: "v0.4.0", dest-dir: stable, uv-version: "0.5.13" }, - { ref: "v0.4.0.dev0", dest-dir: "0.4.0.dev0", uv-version: "0.5.11" }, - { ref: "v0.4.0.dev1", dest-dir: "0.4.0.dev1", uv-version: "0.5.11" }, - { ref: "v0.4.0.dev2", dest-dir: "0.4.0.dev2", uv-version: "0.5.11" }, - { ref: "v0.4.0.dev3", dest-dir: "0.4.0.dev3", uv-version: "0.5.11" }, - { ref: "v0.4.0.dev4", dest-dir: "0.4.0.dev4", uv-version: "0.5.11" }, - { ref: "v0.4.0.dev5", dest-dir: "0.4.0.dev5", uv-version: "0.5.11" }, - { ref: "v0.4.0.dev6", dest-dir: "0.4.0.dev6", uv-version: "0.5.11" }, - { ref: "v0.4.0.dev7", dest-dir: "0.4.0.dev7", uv-version: "0.5.11" }, - { ref: "v0.4.0.dev8", dest-dir: "0.4.0.dev8", uv-version: "0.5.11" }, - { ref: "v0.4.0.dev9", dest-dir: "0.4.0.dev9", uv-version: "0.5.11" }, - { ref: "v0.4.0.dev10", dest-dir: "0.4.0.dev10", uv-version: "0.5.11" }, - { ref: "v0.4.0.dev11", dest-dir: "0.4.0.dev11", uv-version: "0.5.11" }, - { ref: "v0.4.0.dev12", dest-dir: "0.4.0.dev12", uv-version: "0.5.13" }, - { ref: "v0.4.0.dev13", dest-dir: "0.4.0.dev13", uv-version: "0.5.13" }, - { ref: "v0.4.0", dest-dir: "0.4.0", uv-version: "0.5.13" }, + { ref: "${{github.ref}}", dest-dir: dev, uv-version: "0.5.13", sphinx-release-override: "dev" }, + { ref: "v0.4.0.post1", dest-dir: stable, uv-version: "0.5.13", sphinx-release-override: "stable" }, + { ref: "v0.4.0.dev0", dest-dir: "0.4.0.dev0", uv-version: "0.5.11", sphinx-release-override: "" }, + { ref: "v0.4.0.dev1", dest-dir: "0.4.0.dev1", uv-version: "0.5.11", sphinx-release-override: "" }, + { ref: "v0.4.0.dev2", dest-dir: "0.4.0.dev2", uv-version: "0.5.11", sphinx-release-override: "" }, + { ref: "v0.4.0.dev3", dest-dir: "0.4.0.dev3", uv-version: "0.5.11", sphinx-release-override: "" }, + { ref: "v0.4.0.dev4", dest-dir: "0.4.0.dev4", uv-version: "0.5.11", sphinx-release-override: "" }, + { ref: "v0.4.0.dev5", dest-dir: "0.4.0.dev5", uv-version: "0.5.11", sphinx-release-override: "" }, + { ref: "v0.4.0.dev6", dest-dir: "0.4.0.dev6", uv-version: "0.5.11", sphinx-release-override: "" }, + { ref: "v0.4.0.dev7", dest-dir: "0.4.0.dev7", uv-version: "0.5.11", sphinx-release-override: "" }, + { ref: "v0.4.0.dev8", dest-dir: "0.4.0.dev8", uv-version: "0.5.11", sphinx-release-override: "" }, + { ref: "v0.4.0.dev9", dest-dir: "0.4.0.dev9", uv-version: "0.5.11", sphinx-release-override: "" }, + { ref: "v0.4.0.dev10", dest-dir: "0.4.0.dev10", uv-version: "0.5.11", sphinx-release-override: "" }, + { ref: "v0.4.0.dev11", dest-dir: "0.4.0.dev11", uv-version: "0.5.11", sphinx-release-override: "" }, + { ref: "v0.4.0.dev12", dest-dir: "0.4.0.dev12", uv-version: "0.5.13", sphinx-release-override: "" }, + { ref: "v0.4.0.dev13", dest-dir: "0.4.0.dev13", uv-version: "0.5.13", sphinx-release-override: "" }, + { ref: "v0.4.0.post1", dest-dir: "0.4.0", uv-version: "0.5.13", sphinx-release-override: "" }, ] steps: - name: Checkout @@ -73,6 +73,7 @@ jobs: env: PY_DOCS_DIR: ${{ matrix.version.dest-dir }}/ PY_SWITCHER_VERSION: ${{ matrix.version.dest-dir }} + SPHINX_RELEASE_OVERRIDE: ${{ matrix.version.sphinx-release-override }} - uses: actions/upload-artifact@v4 with: path: "./python/docs-staging" diff --git a/docs/switcher.json b/docs/switcher.json index 3d94443e6a4e..f90f52ef0847 100644 --- a/docs/switcher.json +++ b/docs/switcher.json @@ -1,7 +1,7 @@ [ { "name": "0.4.0 (stable)", - "version": "0.4.0", + "version": "stable", "url": "/autogen/stable/", "preferred": true }, diff --git a/python/packages/autogen-core/docs/src/conf.py b/python/packages/autogen-core/docs/src/conf.py index 7aa75ebd1acc..c1b362480eb3 100644 --- a/python/packages/autogen-core/docs/src/conf.py +++ b/python/packages/autogen-core/docs/src/conf.py @@ -17,8 +17,12 @@ copyright = "2024, Microsoft" author = "Microsoft" version = "0.4" -release = autogen_core.__version__ +release_override = os.getenv("SPHINX_RELEASE_OVERRIDE") +if release_override is None or release_override == "": + release = autogen_core.__version__ +else: + release = release_override sys.path.append(str(Path(".").resolve())) From 6bc285ce395ed6642c1ed945b9a1eb07a213187c Mon Sep 17 00:00:00 2001 From: Jack Gerrits Date: Thu, 9 Jan 2025 18:03:56 -0500 Subject: [PATCH 53/61] Don't show banner on stable (#4976) --- .../autogen-core/docs/src/_static/banner-override.js | 11 +++++++++++ .../docs/src/_templates/version-banner-override.html | 1 + python/packages/autogen-core/docs/src/conf.py | 4 ++-- 3 files changed, 14 insertions(+), 2 deletions(-) create mode 100644 python/packages/autogen-core/docs/src/_static/banner-override.js create mode 100644 python/packages/autogen-core/docs/src/_templates/version-banner-override.html diff --git a/python/packages/autogen-core/docs/src/_static/banner-override.js b/python/packages/autogen-core/docs/src/_static/banner-override.js new file mode 100644 index 000000000000..48c9358d898d --- /dev/null +++ b/python/packages/autogen-core/docs/src/_static/banner-override.js @@ -0,0 +1,11 @@ +var version = DOCUMENTATION_OPTIONS.VERSION; +if (version === "stable") { + var styles = ` +s#bd-header-version-warning { + display: none; +} + ` + var styleSheet = document.createElement("style") + styleSheet.textContent = styles + document.head.appendChild(styleSheet) +} \ No newline at end of file diff --git a/python/packages/autogen-core/docs/src/_templates/version-banner-override.html b/python/packages/autogen-core/docs/src/_templates/version-banner-override.html new file mode 100644 index 000000000000..1fa4844f966c --- /dev/null +++ b/python/packages/autogen-core/docs/src/_templates/version-banner-override.html @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/python/packages/autogen-core/docs/src/conf.py b/python/packages/autogen-core/docs/src/conf.py index c1b362480eb3..fc46900dc004 100644 --- a/python/packages/autogen-core/docs/src/conf.py +++ b/python/packages/autogen-core/docs/src/conf.py @@ -121,7 +121,7 @@ "footer_start": ["copyright"], "footer_center": ["footer-middle-links"], - "footer_end": ["theme-version"], + "footer_end": ["theme-version", "version-banner-override"], "pygments_light_style": "xcode", "pygments_dark_style": "monokai", "navbar_start": ["navbar-logo", "version-switcher"], @@ -135,7 +135,7 @@ ] } -html_js_files = ["custom-icon.js"] +html_js_files = ["custom-icon.js", "banner-override.js"] html_sidebars = { "packages/index": [], "user-guide/core-user-guide/**": ["sidebar-nav-bs-core"], From d2c1bb8ad90590d6263d5f754489541f9f8a5c8a Mon Sep 17 00:00:00 2001 From: Jack Gerrits Date: Thu, 9 Jan 2025 19:00:19 -0500 Subject: [PATCH 54/61] Remove accidentally added character (#4980) --- .../packages/autogen-core/docs/src/_static/banner-override.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/packages/autogen-core/docs/src/_static/banner-override.js b/python/packages/autogen-core/docs/src/_static/banner-override.js index 48c9358d898d..e67243f03901 100644 --- a/python/packages/autogen-core/docs/src/_static/banner-override.js +++ b/python/packages/autogen-core/docs/src/_static/banner-override.js @@ -1,7 +1,7 @@ var version = DOCUMENTATION_OPTIONS.VERSION; if (version === "stable") { var styles = ` -s#bd-header-version-warning { +#bd-header-version-warning { display: none; } ` From b6c3df29201133ea2f422f7f9fa31f3775179ec6 Mon Sep 17 00:00:00 2001 From: Matthew Wyman Date: Thu, 9 Jan 2025 16:29:28 -0800 Subject: [PATCH 55/61] Update README.md to fix spelling error (#4982) --- python/samples/agentchat_chainlit/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/samples/agentchat_chainlit/README.md b/python/samples/agentchat_chainlit/README.md index 3f297158598c..d03ca98e0283 100644 --- a/python/samples/agentchat_chainlit/README.md +++ b/python/samples/agentchat_chainlit/README.md @@ -104,5 +104,5 @@ team = RoundRobinGroupChat( In this example, we created a basic AutoGen team with a single agent in a RoundRobinGroupChat team. There are a few ways you can extend this example: - Add more [agents](https://microsoft.github.io/autogen/dev/user-guide/agentchat-user-guide/tutorial/agents.html) to the team. -- Explor custom agents that sent multimodal messages +- Explore custom agents that sent multimodal messages - Explore more [team](https://microsoft.github.io/autogen/dev/user-guide/agentchat-user-guide/tutorial/teams.html) types beyond the `RoundRobinGroupChat`. From 001f0262babf5baa71148467a2d705e80280653d Mon Sep 17 00:00:00 2001 From: Eric Zhu Date: Fri, 10 Jan 2025 00:08:30 -0800 Subject: [PATCH 56/61] Minor API doc update for openai assistant agent (#4986) --- .../src/autogen_ext/agents/openai/_openai_assistant_agent.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/python/packages/autogen-ext/src/autogen_ext/agents/openai/_openai_assistant_agent.py b/python/packages/autogen-ext/src/autogen_ext/agents/openai/_openai_assistant_agent.py index e4d359cf3eff..d738a5fae894 100644 --- a/python/packages/autogen-ext/src/autogen_ext/agents/openai/_openai_assistant_agent.py +++ b/python/packages/autogen-ext/src/autogen_ext/agents/openai/_openai_assistant_agent.py @@ -109,6 +109,8 @@ class OpenAIAssistantAgent(BaseChatAgent): * Vector store integration for efficient file search * Automatic file parsing and embedding + You can use an existing thread or assistant by providing the `thread_id` or `assistant_id` parameters. + Example: .. code-block:: python @@ -160,6 +162,7 @@ async def example(): instructions (str): System instructions for the assistant tools (Optional[Iterable[Union[Literal["code_interpreter", "file_search"], Tool | Callable[..., Any] | Callable[..., Awaitable[Any]]]]]): Tools the assistant can use assistant_id (Optional[str]): ID of existing assistant to use + thread_id (Optional[str]): ID of existing thread to use metadata (Optional[object]): Additional metadata for the assistant response_format (Optional[AssistantResponseFormatOptionParam]): Response format settings temperature (Optional[float]): Temperature for response generation From 6044924a4fbf96912d0307fce6a5466a16013987 Mon Sep 17 00:00:00 2001 From: Jack Gerrits Date: Fri, 10 Jan 2025 08:47:10 -0500 Subject: [PATCH 57/61] Add guidance for docstrings when adding an API (#4981) --- CONTRIBUTING.md | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index e9705198c74d..c2e781239c48 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -118,3 +118,19 @@ Args: agent_type (str): Agent type to handle this subscription """ ``` + +## Docs when adding a new API + +Now that 0.4.0 is out, we should ensure the docs between versions are easy to navigate. To this end, added or changed APIs should have the following added to their docstrings respectively: + +```rst +.. versionadded:: v0.4.1 + + Here's a version added message. + +.. versionchanged:: v0.4.1 + + Here's a version changed message. +``` + +See [here](https://pydata-sphinx-theme.readthedocs.io/en/stable/examples/kitchen-sink/admonitions.html#versionadded) for how they are rendered. From c59cfdd787e657fed62345824364883e0b7d88ca Mon Sep 17 00:00:00 2001 From: Tim Rogers Date: Fri, 10 Jan 2025 14:01:00 +0000 Subject: [PATCH 58/61] Fix typo in `Multi-Agent Design Patterns -> Intro` docs (#4991) --- .../src/user-guide/core-user-guide/design-patterns/intro.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/packages/autogen-core/docs/src/user-guide/core-user-guide/design-patterns/intro.md b/python/packages/autogen-core/docs/src/user-guide/core-user-guide/design-patterns/intro.md index 5fad8db2506c..b8f50d799a98 100644 --- a/python/packages/autogen-core/docs/src/user-guide/core-user-guide/design-patterns/intro.md +++ b/python/packages/autogen-core/docs/src/user-guide/core-user-guide/design-patterns/intro.md @@ -9,7 +9,7 @@ like software development. A multi-agent design pattern is a structure that emerges from message protocols: it describes how agents interact with each other to solve problems. -For example, the [tool-equiped agent](../framework/tools.ipynb#tool-equipped-agent) in +For example, the [tool-equipped agent](../framework/tools.ipynb#tool-equipped-agent) in the previous section employs a design pattern called ReAct, which involves an agent interacting with tools. From 115fefa132116f62c428e4dfe43d8f23f71fe306 Mon Sep 17 00:00:00 2001 From: Jack Gerrits Date: Fri, 10 Jan 2025 13:00:17 -0500 Subject: [PATCH 59/61] Add missing py.typed in autogen_ext, fix type issue in core (#4993) --- .../autogen-core/src/autogen_core/_component_config.py | 6 +++--- python/packages/autogen-ext/src/autogen_ext/py.typed | 0 2 files changed, 3 insertions(+), 3 deletions(-) create mode 100644 python/packages/autogen-ext/src/autogen_ext/py.typed diff --git a/python/packages/autogen-core/src/autogen_core/_component_config.py b/python/packages/autogen-core/src/autogen_core/_component_config.py index f5426428c90c..1045282921f2 100644 --- a/python/packages/autogen-core/src/autogen_core/_component_config.py +++ b/python/packages/autogen-core/src/autogen_core/_component_config.py @@ -2,7 +2,7 @@ import importlib import warnings -from typing import Any, ClassVar, Dict, Generic, Literal, Protocol, Type, cast, overload, runtime_checkable +from typing import Any, ClassVar, Dict, Generic, List, Literal, Protocol, Type, cast, overload, runtime_checkable from pydantic import BaseModel from typing_extensions import Self, TypeVar @@ -243,9 +243,9 @@ def _from_config(cls, config: Config) -> MyComponent: return cls(value=config.value) """ - required_class_vars = ["component_config_schema", "component_type"] + required_class_vars: ClassVar[List[str]] = ["component_config_schema", "component_type"] - def __init_subclass__(cls, **kwargs: Any): + def __init_subclass__(cls, **kwargs: Any) -> None: super().__init_subclass__(**kwargs) # TODO: validate provider is loadable diff --git a/python/packages/autogen-ext/src/autogen_ext/py.typed b/python/packages/autogen-ext/src/autogen_ext/py.typed new file mode 100644 index 000000000000..e69de29bb2d1 From 9f351c50fff0043737990304a72a2b85fd9a1f2b Mon Sep 17 00:00:00 2001 From: Victor Dibia Date: Sat, 11 Jan 2025 18:25:18 -0800 Subject: [PATCH 60/61] Minor Updates to AGS Docs (#5010) * update docs * update docs * update ags documentation * update uv lock * update usage --- README.md | 21 ++-- .../autogenstudio-user-guide/index.md | 9 -- .../autogenstudio-user-guide/installation.md | 94 ++++++++++++---- .../autogenstudio-user-guide/usage.md | 100 +++++++++++++----- python/packages/autogen-studio/README.md | 4 +- .../autogen-studio/docs/ags_screen.png | 4 +- python/packages/autogen-studio/pyproject.toml | 3 +- python/uv.lock | 2 + 8 files changed, 166 insertions(+), 71 deletions(-) diff --git a/README.md b/README.md index 807f13077b29..8760e8caf96e 100644 --- a/README.md +++ b/README.md @@ -7,6 +7,7 @@ [![LinkedIn](https://img.shields.io/badge/LinkedIn-Company?style=flat&logo=linkedin&logoColor=white)](https://www.linkedin.com/company/105812540) [![Discord](https://img.shields.io/badge/discord-chat-green?logo=discord)](https://aka.ms/autogen-discord) [![Documentation](https://img.shields.io/badge/Documentation-AutoGen-blue?logo=read-the-docs)](https://microsoft.github.io/autogen/) + # AutoGen @@ -74,13 +75,13 @@ asyncio.run(main()) The AutoGen ecosystem provides everything you need to create AI agents, especially multi-agent workflows -- framework, developer tools, and applications. -The *framework* uses a layered and extensible design. Layers have clearly divided responsibilities and build on top of layers below. This design enables you to use the framework at different levels of abstraction, from high-level APIs to low-level components. +The _framework_ uses a layered and extensible design. Layers have clearly divided responsibilities and build on top of layers below. This design enables you to use the framework at different levels of abstraction, from high-level APIs to low-level components. - [Core API](./python/packages/autogen-core/) implements message passing, event-driven agents, and local and distributed runtime for flexibility and power. It also support cross-language support for .NET and Python. - [AgentChat API](./python/packages/autogen-agentchat/) implements a simpler but opinionatedĀ API rapid for prototyping. This API is built on top of the Core API and is closest to what users of v0.2 are familiar with and supports familiar multi-agent patterns such as two-agent chat or group chats. - [Extensions API](./python/packages/autogen-ext/) enables first- and third-party extensions continuously expanding framework capabilities. It support specific implementation of LLM clients (e.g., OpenAI, AzureOpenAI), and capabilities such as code execution. -The ecosystem also supports two essential *developer tools*: +The ecosystem also supports two essential _developer tools_:
    AutoGen Studio Screenshot @@ -97,17 +98,17 @@ With AutoGen you get to join and contribute to a thriving ecosystem. We host wee
    -| | [![Python](https://img.shields.io/badge/AutoGen-Python-blue?logo=python&logoColor=white)](./python) | [![.NET](https://img.shields.io/badge/AutoGen-.NET-green?logo=.net&logoColor=white)](./dotnet) | [![Studio](https://img.shields.io/badge/AutoGen-Studio-purple?logo=visual-studio&logoColor=white)](./python/packages/autogen-studio) | -|----------------------|--------------------------------------------------------------------------------------------|-------------------|-------------------| -| Installation | [![Installation](https://img.shields.io/badge/Install-blue)](https://microsoft.github.io/autogen/dev/user-guide/agentchat-user-guide/installation.html) | * | [![Install](https://img.shields.io/badge/Install-purple)](https://microsoft.github.io/autogen/dev/user-guide/autogenstudio-user-guide/installation.html) | -| Quickstart | [![Quickstart](https://img.shields.io/badge/Quickstart-blue)](https://microsoft.github.io/autogen/dev/user-guide/agentchat-user-guide/quickstart.html#) | * | * | -| Tutorial | [![Tutorial](https://img.shields.io/badge/Tutorial-blue)](https://microsoft.github.io/autogen/dev/user-guide/agentchat-user-guide/tutorial/models.html) | *| * | -| API Reference | [![API](https://img.shields.io/badge/Docs-blue)](https://microsoft.github.io/autogen/dev/reference/index.html#) | * | [![API](https://img.shields.io/badge/Docs-purple)](https://microsoft.github.io/autogen/dev/user-guide/autogenstudio-user-guide/usage.html) | -| Packages | [![PyPi autogen-core](https://img.shields.io/badge/PyPi-autogen--core-blue?logo=pypi)](https://pypi.org/project/autogen-core/)
    [![PyPi autogen-agentchat](https://img.shields.io/badge/PyPi-autogen--agentchat-blue?logo=pypi)](https://pypi.org/project/autogen-agentchat/)
    [![PyPi autogen-ext](https://img.shields.io/badge/PyPi-autogen--ext-blue?logo=pypi)](https://pypi.org/project/autogen-ext/) | * | [![PyPi autogenstudio](https://img.shields.io/badge/PyPi-autogenstudio-purple?logo=pypi)](https://pypi.org/project/autogenstudio/) | +| | [![Python](https://img.shields.io/badge/AutoGen-Python-blue?logo=python&logoColor=white)](./python) | [![.NET](https://img.shields.io/badge/AutoGen-.NET-green?logo=.net&logoColor=white)](./dotnet) | [![Studio](https://img.shields.io/badge/AutoGen-Studio-purple?logo=visual-studio&logoColor=white)](./python/packages/autogen-studio) | +| ------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ---------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------- | +| Installation | [![Installation](https://img.shields.io/badge/Install-blue)](https://microsoft.github.io/autogen/dev/user-guide/agentchat-user-guide/installation.html) | \* | [![Install](https://img.shields.io/badge/Install-purple)](https://microsoft.github.io/autogen/dev/user-guide/autogenstudio-user-guide/installation.html) | +| Quickstart | [![Quickstart](https://img.shields.io/badge/Quickstart-blue)](https://microsoft.github.io/autogen/dev/user-guide/agentchat-user-guide/quickstart.html#) | \* | [![Usage](https://img.shields.io/badge/Quickstart-blue)](https://microsoft.github.io/autogen/dev/user-guide/autogenstudio-user-guide/usage.html#) | +| Tutorial | [![Tutorial](https://img.shields.io/badge/Tutorial-blue)](https://microsoft.github.io/autogen/dev/user-guide/agentchat-user-guide/tutorial/models.html) | \* | [![Usage](https://img.shields.io/badge/Quickstart-blue)](https://microsoft.github.io/autogen/dev/user-guide/autogenstudio-user-guide/usage.html#) | +| API Reference | [![API](https://img.shields.io/badge/Docs-blue)](https://microsoft.github.io/autogen/dev/reference/index.html#) | \* | [![API](https://img.shields.io/badge/Docs-purple)](https://microsoft.github.io/autogen/dev/user-guide/autogenstudio-user-guide/usage.html) | +| Packages | [![PyPi autogen-core](https://img.shields.io/badge/PyPi-autogen--core-blue?logo=pypi)](https://pypi.org/project/autogen-core/)
    [![PyPi autogen-agentchat](https://img.shields.io/badge/PyPi-autogen--agentchat-blue?logo=pypi)](https://pypi.org/project/autogen-agentchat/)
    [![PyPi autogen-ext](https://img.shields.io/badge/PyPi-autogen--ext-blue?logo=pypi)](https://pypi.org/project/autogen-ext/) | \* | [![PyPi autogenstudio](https://img.shields.io/badge/PyPi-autogenstudio-purple?logo=pypi)](https://pypi.org/project/autogenstudio/) |
    -**Releasing soon* +\*_Releasing soon_ Interested in contributing? See [CONTRIBUTING.md](./CONTRIBUTING.md) for guidelines on how to get started. We welcome contributions of all kinds, including bug fixes, new features, and documentation improvements. Join our community and help us make AutoGen better! diff --git a/python/packages/autogen-core/docs/src/user-guide/autogenstudio-user-guide/index.md b/python/packages/autogen-core/docs/src/user-guide/autogenstudio-user-guide/index.md index 4582657bc24e..09de3f9ac14f 100644 --- a/python/packages/autogen-core/docs/src/user-guide/autogenstudio-user-guide/index.md +++ b/python/packages/autogen-core/docs/src/user-guide/autogenstudio-user-guide/index.md @@ -49,15 +49,6 @@ AutoGen Studio offers four main interfaces to help you build and manage multi-ag - Setup and test endpoints based on a team configuration - Run teams in a docker container -This revision improves clarity by: - -- Organizing capabilities into clearly numbered sections -- Using more precise language -- Breaking down complex features into digestible points -- Maintaining consistent formatting and structure -- Eliminating awkward phrasing and grammatical issues -- Adding context about how each interface serves users - ### Roadmap Review project roadmap and issues [here](https://github.com/microsoft/autogen/issues/4006) . diff --git a/python/packages/autogen-core/docs/src/user-guide/autogenstudio-user-guide/installation.md b/python/packages/autogen-core/docs/src/user-guide/autogenstudio-user-guide/installation.md index 2ebc167213d2..2ca91af58251 100644 --- a/python/packages/autogen-core/docs/src/user-guide/autogenstudio-user-guide/installation.md +++ b/python/packages/autogen-core/docs/src/user-guide/autogenstudio-user-guide/installation.md @@ -9,35 +9,83 @@ myst: There are two ways to install AutoGen Studio - from PyPi or from source. We **recommend installing from PyPi** unless you plan to modify the source code. -1. **Install from PyPi** +## Create a Virtual Environment (Recommended) - We recommend using a virtual environment (e.g., conda) to avoid conflicts with existing Python packages. With Python 3.10 or newer active in your virtual environment, use pip to install AutoGen Studio: +We recommend using a virtual environment as this will ensure that the dependencies for AutoGen Studio are isolated from the rest of your system. - ```bash - pip install -U autogenstudio - ``` +``````{tab-set} -2. **Install from Source** +`````{tab-item} venv - > Note: This approach requires some familiarity with building interfaces in React. +Create and activate: - If you prefer to install from source, ensure you have Python 3.10+ and Node.js (version above 14.15.0) installed. Here's how you get started: +```bash +python3 -m venv .venv +source .venv/bin/activate +``` + +To deactivate later, run: + +```bash +deactivate +``` + +````` + +`````{tab-item} conda + +[Install Conda](https://docs.conda.io/projects/conda/en/stable/user-guide/install/index.html) if you have not already. + + +Create and activate: + +```bash +conda create -n autogen python=3.10 +conda activate autogen +``` + +To deactivate later, run: + +```bash +conda deactivate +``` + + +````` + + + +`````` + +## Install Using pip (Recommended) + +You can install AutoGen Studio using pip, the Python package manager. + +```bash +pip install -U autogenstudio +``` + +### Install from Source\*\* + +> Note: This approach requires some familiarity with building interfaces in React. + +If you prefer to install from source, ensure you have Python 3.10+ and Node.js (version above 14.15.0) installed. Here's how you get started: - - Clone the AutoGen Studio repository and install its Python dependencies: +- Clone the AutoGen Studio repository and install its Python dependencies: - ```bash - pip install -e . - ``` + ```bash + pip install -e . + ``` - - Navigate to the `samples/apps/autogen-studio/frontend` directory, install dependencies, and build the UI: +- Navigate to the `samples/apps/autogen-studio/frontend` directory, install dependencies, and build the UI: - ```bash - npm install -g gatsby-cli - npm install --global yarn - cd frontend - yarn install - yarn build - ``` + ```bash + npm install -g gatsby-cli + npm install --global yarn + cd frontend + yarn install + yarn build + ``` For Windows users, to build the frontend, you may need alternative commands to build the frontend. @@ -47,7 +95,7 @@ For Windows users, to build the frontend, you may need alternative commands to b ``` -### Running the Application +## Running the Application Once installed, run the web UI by entering the following in a terminal: @@ -62,8 +110,8 @@ AutoGen Studio also takes several parameters to customize the application: - `--host ` argument to specify the host address. By default, it is set to `localhost`. - `--appdir ` argument to specify the directory where the app files (e.g., database and generated user files) are stored. By default, it is set to the a `.autogenstudio` directory in the user's home directory. - `--port ` argument to specify the port number. By default, it is set to `8080`. -- `--upgrade-database` argument to upgrade the database schema (assuming there are changes in the version you are installing). By default, it is set to `False`. +- `--upgrade-database` argument to force-upgrade it's internal database schema (assuming there are changes in the version you are installing). By default, it is set to `False`. - `--reload` argument to enable auto-reloading of the server when changes are made to the code. By default, it is set to `False`. -- `--database-uri` argument to specify the database URI. Example values include `sqlite:///database.sqlite` for SQLite and `postgresql+psycopg://user:password@localhost/dbname` for PostgreSQL. If this is not specified, the database URI defaults to a `database.sqlite` file in the `--appdir` directory. +- `--database-uri` argument to specify the database URI. Example values include `sqlite:///database.sqlite` for SQLite and `postgresql+psycopg://user:password@localhost/dbname` for PostgreSQL. If this is not specified, the database URI defaults to a `autogen.db` file in the `--appdir` directory. Now that you have AutoGen Studio installed and running, you are ready to explore its capabilities, including defining and modifying agent workflows, interacting with agents and sessions, and expanding agent skills. diff --git a/python/packages/autogen-core/docs/src/user-guide/autogenstudio-user-guide/usage.md b/python/packages/autogen-core/docs/src/user-guide/autogenstudio-user-guide/usage.md index 12a409e157df..fa88712c1971 100644 --- a/python/packages/autogen-core/docs/src/user-guide/autogenstudio-user-guide/usage.md +++ b/python/packages/autogen-core/docs/src/user-guide/autogenstudio-user-guide/usage.md @@ -7,51 +7,103 @@ myst: # Usage -The expected usage behavior is that developers use the provided Team Builder interface to to define teams - create agents, attach tools and models to agents, and define termination conditions. Once the team is defined, users can run the team in the Playground to interact with the team to accomplish tasks. +AutoGen Studio provides a Team Builder interface where developers can define multiple components and behaviors. Users can create teams, add agents to teams, attach tools and models to agents, and define team termination conditions. +After defining a team, users can test it in the Playground view to accomplish various tasks through direct interaction. ![AutoGen Studio](https://media.githubusercontent.com/media/microsoft/autogen/refs/heads/main/python/packages/autogen-studio/docs/ags_screen.png) +## Declarative Specification of Componenents + +AutoGen Studio uses a declarative specification system to build its GUI components. At runtime, the AGS API loads these specifications into AutoGen AgentChat objects to address tasks. + +Here's an example of a declarative team specification: + +```json +{ + "version": "1.0.0", + "component_type": "team", + "name": "sample_team", + "participants": [ + { + "component_type": "agent", + "name": "assistant_agent", + "agent_type": "AssistantAgent", + "system_message": "You are a helpful assistant. Solve tasks carefully. When done respond with TERMINATE", + "model_client": { + "component_type": "model", + "model": "gpt-4o-2024-08-06", + "model_type": "OpenAIChatCompletionClient" + }, + "tools": [] + } + ], + "team_type": "RoundRobinGroupChat", + "termination_condition": { + "component_type": "termination", + "termination_type": "MaxMessageTermination", + "max_messages": 3 + } +} +``` + +This example shows a team with a single agent, using the `RoundRobinGroupChat` type and a `MaxMessageTermination` condition limited to 3 messages. + +```{note} +Work is currently in progress to make the entire AgentChat API declarative. This will allow all agentchat components to be `dumped` into the same declarative specification format used by AGS. +``` + ## Building an Agent Team -AutoGen Studio is tied very closely with all of the component abstractions provided by AutoGen AgentChat. This includes - {py:class}`~autogen_agentchat.teams`, {py:class}`~autogen_agentchat.agents`, {py:class}`~autogen_core.models`, {py:class}`~autogen_core.tools`, termination {py:class}`~autogen_agentchat.conditions`. +
    -Users can define these components in the Team Builder interface either via a declarative specification or by dragging and dropping components from a component library. +
    -## Interactively Running Teams +AutoGen Studio integrates closely with all component abstractions provided by AutoGen AgentChat, including {py:class}`~autogen_agentchat.teams`, {py:class}`~autogen_agentchat.agents`, {py:class}`~autogen_core.models`, {py:class}`~autogen_core.tools`, and termination {py:class}`~autogen_agentchat.conditions`. -AutoGen Studio Playground allows users to interactively test teams on tasks and review resulting artifacts (such as images, code, and text). +The Team Builder interface allows users to define components through either declarative specification or drag-and-drop functionality: -Users can also review the ā€œinner monologueā€ of team as they address tasks, and view profiling information such as costs associated with the run (such as number of turns, number of tokens etc.), and agent actions (such as whether tools were called and the outcomes of code execution). +Team Builder Operations: -## Importing and Reusing Team Configurations +- Create a new team + - Edit Team JSON directly (toggle visual builder mode off) or + - Use the visual builder, drag-and-drop components from the library: + - Teams: Add agents and termination conditions + - Agents: Add models and tools +- Save team configurations -AutoGen Studio provides a Gallery view which provides a built-in default gallery. A Gallery is simply is a collection of components - teams, agents, models tools etc. Furthermore, users can import components from 3rd party community sources either by providing a URL to a JSON Gallery spec or pasting in the gallery JSON. This allows users to reuse and share team configurations with others. +Component Library Management: -- Gallery -> New Gallery -> Import -- Set as default gallery (in side bar, by clicking pin icon) -- Reuse components in Team Builder. Team Builder -> Sidebar -> From Gallery +- Create new galleries via Gallery -> New Gallery +- Edit gallery JSON as needed +- Set a **default** gallery (click pin icon in sidebar) to make components available in Team Builder -### Using AutoGen Studio Teams in a Python Application +## Interactively Running Teams -An exported team can be easily integrated into any Python application using the `TeamManager` class with just two lines of code. Underneath, the `TeamManager` rehydrates the team specification into AutoGen AgentChat agents that are subsequently used to address tasks. +The AutoGen Studio Playground enables users to: -```python +- Test teams on specific tasks +- Review generated artifacts (images, code, text) +- Monitor team "inner monologue" during task execution +- View performance metrics (turn count, token usage) +- Track agent actions (tool usage, code execution results) -from autogenstudio.teammanager import TeamManager +## Importing and Reusing Team Configurations -tm = TeamManager() -result_stream = tm.run(task="What is the weather in New York?", team_config="team.json") # or wm.run_stream(..) +AutoGen Studio's Gallery view offers a default component collection and supports importing external configurations: -``` +- Create/Import galleries through Gallery -> New Gallery -> Import +- Set default galleries via sidebar pin icon +- Access components in Team Builder through Sidebar -> From Gallery -To export a team configuration, click on the export button in the Team Builder interface. This will generate a JSON file that can be used to rehydrate the team in a Python application. +### Python Integration - +To export team configurations, use the export button in Team Builder to generate a JSON file for Python application use. diff --git a/python/packages/autogen-studio/README.md b/python/packages/autogen-studio/README.md index 007210350829..e75d6d3d4309 100644 --- a/python/packages/autogen-studio/README.md +++ b/python/packages/autogen-studio/README.md @@ -1,9 +1,9 @@ # AutoGen Studio [![PyPI version](https://badge.fury.io/py/autogenstudio.svg)](https://badge.fury.io/py/autogenstudio) -[![Downloads](https://static.pepy.tech/badge/autogenstudio/week)](https://pepy.tech/project/autogenstudio) +![PyPI - Downloads](https://img.shields.io/pypi/dm/autogenstudio) -![ARA](./docs/ags_screen.png) +![ARA](https://media.githubusercontent.com/media/microsoft/autogen/refs/heads/main/python/packages/autogen-studio/docs/ags_screen.png) AutoGen Studio is an AutoGen-powered AI app (user interface) to help you rapidly prototype AI agents, enhance them with skills, compose them into workflows and interact with them to accomplish tasks. It is built on top of the [AutoGen](https://microsoft.github.io/autogen) framework, which is a toolkit for building AI agents. diff --git a/python/packages/autogen-studio/docs/ags_screen.png b/python/packages/autogen-studio/docs/ags_screen.png index 017b69aac25d..3cafcb18b933 100644 --- a/python/packages/autogen-studio/docs/ags_screen.png +++ b/python/packages/autogen-studio/docs/ags_screen.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:54473a4fbfcded2b3e008b448c00117e801462cc7687b0bc14a1c22c92dbdb97 -size 621469 +oid sha256:876389d20f68c9c6e230563a145f8e10c6870bf8633163f0a6fe1f5db8d8ffe8 +size 195570 diff --git a/python/packages/autogen-studio/pyproject.toml b/python/packages/autogen-studio/pyproject.toml index 5fa6676198a6..869bdb78b8f6 100644 --- a/python/packages/autogen-studio/pyproject.toml +++ b/python/packages/autogen-studio/pyproject.toml @@ -35,7 +35,8 @@ dependencies = [ "pyyaml", "autogen-core==0.4.0", "autogen-agentchat==0.4.0", - "autogen-ext[magentic-one]==0.4.0" + "autogen-ext[magentic-one]==0.4.0", + "azure-identity", ] optional-dependencies = {web = ["fastapi", "uvicorn"], database = ["psycopg"]} diff --git a/python/uv.lock b/python/uv.lock index 219b5176d2d1..95db2f3c8185 100644 --- a/python/uv.lock +++ b/python/uv.lock @@ -660,6 +660,7 @@ dependencies = [ { name = "autogen-agentchat" }, { name = "autogen-core" }, { name = "autogen-ext", extra = ["magentic-one"] }, + { name = "azure-identity" }, { name = "fastapi" }, { name = "loguru" }, { name = "numpy" }, @@ -690,6 +691,7 @@ requires-dist = [ { name = "autogen-agentchat", editable = "packages/autogen-agentchat" }, { name = "autogen-core", editable = "packages/autogen-core" }, { name = "autogen-ext", extras = ["magentic-one"], editable = "packages/autogen-ext" }, + { name = "azure-identity" }, { name = "fastapi" }, { name = "fastapi", marker = "extra == 'web'" }, { name = "loguru" }, From 5e2a69a3038a81140944864105146727394a5816 Mon Sep 17 00:00:00 2001 From: Ranuga <79456372+Programmer-RD-AI@users.noreply.github.com> Date: Sun, 12 Jan 2025 14:34:30 +0530 Subject: [PATCH 61/61] Fix: Properly await `agent.run()` in README `Hello World` example (#5013) * Fix: Properly await `agent.run()` in README `Hello World` example - Updated the `Hello World` code sample to use `asyncio` for proper coroutine handling. - Resolved `RuntimeWarning` caused by not awaiting the `agent.run()` method. - Ensures the example executes correctly without errors or warnings. * Add await to agent.run call in index.md * Fix hello world --------- Co-authored-by: Eric Zhu --- README.md | 2 +- python/packages/autogen-core/docs/src/index.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 8760e8caf96e..c0994b24d68f 100644 --- a/README.md +++ b/README.md @@ -34,7 +34,7 @@ from autogen_ext.models.openai import OpenAIChatCompletionClient async def main() -> None: agent = AssistantAgent("assistant", OpenAIChatCompletionClient(model="gpt-4o")) - print(agent.run(task="Say 'Hello World!'")) + print(await agent.run(task="Say 'Hello World!'")) asyncio.run(main()) ``` diff --git a/python/packages/autogen-core/docs/src/index.md b/python/packages/autogen-core/docs/src/index.md index e62b398dce58..85f06bcdd3d4 100644 --- a/python/packages/autogen-core/docs/src/index.md +++ b/python/packages/autogen-core/docs/src/index.md @@ -119,7 +119,7 @@ from autogen_ext.models.openai import OpenAIChatCompletionClient async def main() -> None: agent = AssistantAgent("assistant", OpenAIChatCompletionClient(model="gpt-4o")) - print(agent.run(task="Say 'Hello World!'")) + print(await agent.run(task="Say 'Hello World!'")) asyncio.run(main()) ```