Skip to content

Commit

Permalink
linting
Browse files Browse the repository at this point in the history
  • Loading branch information
lpinheiroms committed Dec 10, 2024
1 parent 838c8a1 commit 458c262
Show file tree
Hide file tree
Showing 6 changed files with 628 additions and 642 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -65,8 +65,8 @@
"import os\n",
"from typing import Optional\n",
"\n",
"from autogen_ext.models.openai import AzureOpenAIChatCompletionClient\n",
"from autogen_core.models import UserMessage\n",
"from autogen_ext.models.openai import AzureOpenAIChatCompletionClient\n",
"\n",
"\n",
"# Function to get environment variable and ensure it is not None\n",
Expand Down

Large diffs are not rendered by default.

Original file line number Diff line number Diff line change
Expand Up @@ -333,13 +333,8 @@
"from dataclasses import dataclass\n",
"\n",
"from autogen_core import MessageContext, RoutedAgent, SingleThreadedAgentRuntime, message_handler\n",
<<<<<<< HEAD
"from autogen_core.components.models import ChatCompletionClient, SystemMessage, UserMessage\n",
"from autogen_ext.models.openai import OpenAIChatCompletionClient\n",
=======
"from autogen_core.models import ChatCompletionClient, SystemMessage, UserMessage\n",
"from autogen_ext.models import OpenAIChatCompletionClient\n",
>>>>>>> main
"from autogen_ext.models.openai import OpenAIChatCompletionClient\n",
"\n",
"\n",
"@dataclass\n",
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -43,8 +43,6 @@
TopLogprob,
UserMessage,
)
from autogen_core.components.tools import Tool, ToolSchema
from autogen_core.logging import LLMCallEvent
from pydantic import BaseModel
from typing_extensions import Unpack

Expand Down Expand Up @@ -893,64 +891,64 @@ def capabilities(self) -> ModelCapabilities:
class OpenAIChatCompletionClient(BaseOpenAIChatCompletionClient):
"""Chat completion client for OpenAI hosted models.
You can also use this client for OpenAI-compatible ChatCompletion endpoints.
**Using this client for non-OpenAI models is not tested or guaranteed.**
You can also use this client for OpenAI-compatible ChatCompletion endpoints.
**Using this client for non-OpenAI models is not tested or guaranteed.**
For non-OpenAI models, please first take a look at our `community extensions <https://microsoft.github.io/autogen/dev/user-guide/extensions-user-guide/index.html>`_
for additional model clients.
For non-OpenAI models, please first take a look at our `community extensions <https://microsoft.github.io/autogen/dev/user-guide/extensions-user-guide/index.html>`_
for additional model clients.
Args:
model (str): The model to use. **Required.**
api_key (str): The API key to use. **Required if 'OPENAI_API_KEY' is not found in the environment variables.**
timeout (optional, int): The timeout for the request in seconds.
max_retries (optional, int): The maximum number of retries to attempt.
organization_id (optional, str): The organization ID to use.
base_url (optional, str): The base URL to use. **Required if the model is not hosted on OpenAI.**
model_capabilities (optional, ModelCapabilities): The capabilities of the model. **Required if the model name is not a valid OpenAI model.**
Args:
model (str): The model to use. **Required.**
api_key (str): The API key to use. **Required if 'OPENAI_API_KEY' is not found in the environment variables.**
timeout (optional, int): The timeout for the request in seconds.
max_retries (optional, int): The maximum number of retries to attempt.
organization_id (optional, str): The organization ID to use.
base_url (optional, str): The base URL to use. **Required if the model is not hosted on OpenAI.**
model_capabilities (optional, ModelCapabilities): The capabilities of the model. **Required if the model name is not a valid OpenAI model.**
To use this client, you must install the `openai` extension:
To use this client, you must install the `openai` extension:
.. code-block:: bash
.. code-block:: bash
pip install 'autogen-ext[openai]==0.4.0.dev8'
pip install 'autogen-ext[openai]==0.4.0.dev8'
The following code snippet shows how to use the client with an OpenAI model:
The following code snippet shows how to use the client with an OpenAI model:
.. code-block:: python
.. code-block:: python
<<<<<<< HEAD:python/packages/autogen-ext/src/autogen_ext/models/openai/_openai_client.py
from autogen_ext.models.openai import OpenAIChatCompletionClient
from autogen_core.components.models import UserMessage
=======
from autogen_ext.models import OpenAIChatCompletionClient
from autogen_core.models import UserMessage
>>>>>>> main:python/packages/autogen-ext/src/autogen_ext/models/_openai/_openai_client.py
openai_client = OpenAIChatCompletionClient(
model="gpt-4o-2024-08-06",
# api_key="sk-...", # Optional if you have an OPENAI_API_KEY environment variable set.
)
<<<<<<< HEAD:python/packages/autogen-ext/src/autogen_ext/models/openai/_openai_client.py
from autogen_ext.models.openai import OpenAIChatCompletionClient
from autogen_core.components.models import UserMessage
=======
from autogen_ext.models import OpenAIChatCompletionClient
from autogen_core.models import UserMessage
>>>>>>> main:python/packages/autogen-ext/src/autogen_ext/models/_openai/_openai_client.py
result = await openai_client.create([UserMessage(content="What is the capital of France?", source="user")]) # type: ignore
print(result)
openai_client = OpenAIChatCompletionClient(
model="gpt-4o-2024-08-06",
# api_key="sk-...", # Optional if you have an OPENAI_API_KEY environment variable set.
)
result = await openai_client.create([UserMessage(content="What is the capital of France?", source="user")]) # type: ignore
print(result)
To use the client with a non-OpenAI model, you need to provide the base URL of the model and the model capabilities:
.. code-block:: python
To use the client with a non-OpenAI model, you need to provide the base URL of the model and the model capabilities:
from autogen_ext.models.openai import OpenAIChatCompletionClient
custom_model_client = OpenAIChatCompletionClient(
model="custom-model-name",
base_url="https://custom-model.com/reset/of/the/path",
api_key="placeholder",
model_capabilities={
"vision": True,
"function_calling": True,
"json_output": True,
},
)
.. code-block:: python
from autogen_ext.models.openai import OpenAIChatCompletionClient
custom_model_client = OpenAIChatCompletionClient(
model="custom-model-name",
base_url="https://custom-model.com/reset/of/the/path",
api_key="placeholder",
model_capabilities={
"vision": True,
"function_calling": True,
"json_output": True,
},
)
"""

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,97 +18,97 @@

class ReplayChatCompletionClient:
"""
A mock chat completion client that replays predefined responses using an index-based approach.
A mock chat completion client that replays predefined responses using an index-based approach.
This class simulates a chat completion client by replaying a predefined list of responses. It supports both single completion and streaming responses. The responses can be either strings or CreateResult objects. The client now uses an index-based approach to access the responses, allowing for resetting the state.
This class simulates a chat completion client by replaying a predefined list of responses. It supports both single completion and streaming responses. The responses can be either strings or CreateResult objects. The client now uses an index-based approach to access the responses, allowing for resetting the state.
.. note::
The responses can be either strings or CreateResult objects.
.. note::
The responses can be either strings or CreateResult objects.
Args:
chat_completions (Sequence[Union[str, CreateResult]]): A list of predefined responses to replay.
Args:
chat_completions (Sequence[Union[str, CreateResult]]): A list of predefined responses to replay.
Raises:
ValueError("No more mock responses available"): If the list of provided outputs are exhausted.
Raises:
ValueError("No more mock responses available"): If the list of provided outputs are exhausted.
Examples:
Examples:
Simple chat completion client to return pre-defined responses.
Simple chat completion client to return pre-defined responses.
.. code-block:: python
.. code-block:: python
from autogen_ext.models.reply import ReplayChatCompletionClient
from autogen_core.models import UserMessage
from autogen_ext.models.reply import ReplayChatCompletionClient
from autogen_core.models import UserMessage
async def example():
chat_completions = [
"Hello, how can I assist you today?",
"I'm happy to help with any questions you have.",
"Is there anything else I can assist you with?",
]
client = ReplayChatCompletionClient(chat_completions)
messages = [UserMessage(content="What can you do?", source="user")]
response = await client.create(messages)
print(response.content) # Output: "Hello, how can I assist you today?"
async def example():
chat_completions = [
"Hello, how can I assist you today?",
"I'm happy to help with any questions you have.",
"Is there anything else I can assist you with?",
]
client = ReplayChatCompletionClient(chat_completions)
messages = [UserMessage(content="What can you do?", source="user")]
response = await client.create(messages)
print(response.content) # Output: "Hello, how can I assist you today?"
Simple streaming chat completion client to return pre-defined responses
Simple streaming chat completion client to return pre-defined responses
.. code-block:: python
.. code-block:: python
import asyncio
<<<<<<< HEAD:python/packages/autogen-ext/src/autogen_ext/models/reply/_reply_chat_completion_client.py
from autogen_ext.models.reply import ReplayChatCompletionClient
from autogen_core.components.models import UserMessage
=======
from autogen_ext.models import ReplayChatCompletionClient
from autogen_core.models import UserMessage
>>>>>>> main:python/packages/autogen-ext/src/autogen_ext/models/_reply_chat_completion_client.py
import asyncio
<<<<<<< HEAD:python/packages/autogen-ext/src/autogen_ext/models/reply/_reply_chat_completion_client.py
from autogen_ext.models.reply import ReplayChatCompletionClient
from autogen_core.components.models import UserMessage
=======
from autogen_ext.models import ReplayChatCompletionClient
from autogen_core.models import UserMessage
>>>>>>> main:python/packages/autogen-ext/src/autogen_ext/models/_reply_chat_completion_client.py
async def example():
chat_completions = [
"Hello, how can I assist you today?",
"I'm happy to help with any questions you have.",
"Is there anything else I can assist you with?",
]
client = ReplayChatCompletionClient(chat_completions)
messages = [UserMessage(content="What can you do?", source="user")]
async def example():
chat_completions = [
"Hello, how can I assist you today?",
"I'm happy to help with any questions you have.",
"Is there anything else I can assist you with?",
]
client = ReplayChatCompletionClient(chat_completions)
messages = [UserMessage(content="What can you do?", source="user")]
async for token in client.create_stream(messages):
print(token, end="") # Output: "Hello, how can I assist you today?"
async for token in client.create_stream(messages):
print(token, end="") # Output: "Hello, how can I assist you today?"
async for token in client.create_stream(messages):
print(token, end="") # Output: "I'm happy to help with any questions you have."
async for token in client.create_stream(messages):
print(token, end="") # Output: "I'm happy to help with any questions you have."
asyncio.run(example())
asyncio.run(example())
Using `.reset` to reset the chat client state
Using `.reset` to reset the chat client state
.. code-block:: python
.. code-block:: python
import asyncio
from autogen_ext.models import ReplayChatCompletionClient
from autogen_core.models import UserMessage
import asyncio
from autogen_ext.models import ReplayChatCompletionClient
from autogen_core.models import UserMessage
async def example():
chat_completions = [
"Hello, how can I assist you today?",
]
client = ReplayChatCompletionClient(chat_completions)
messages = [UserMessage(content="What can you do?", source="user")]
response = await client.create(messages)
print(response.content) # Output: "Hello, how can I assist you today?"
async def example():
chat_completions = [
"Hello, how can I assist you today?",
]
client = ReplayChatCompletionClient(chat_completions)
messages = [UserMessage(content="What can you do?", source="user")]
response = await client.create(messages)
print(response.content) # Output: "Hello, how can I assist you today?"
response = await client.create(messages) # Raises ValueError("No more mock responses available")
response = await client.create(messages) # Raises ValueError("No more mock responses available")
client.reset() # Reset the client state (current index of message and token usages)
response = await client.create(messages)
print(response.content) # Output: "Hello, how can I assist you today?" again
client.reset() # Reset the client state (current index of message and token usages)
response = await client.create(messages)
print(response.content) # Output: "Hello, how can I assist you today?" again
asyncio.run(example())
asyncio.run(example())
"""

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -15,16 +15,9 @@
SystemMessage,
UserMessage,
)
<<<<<<< HEAD
from autogen_core.components.tools import BaseTool, FunctionTool
from autogen_ext.models.openai import AzureOpenAIChatCompletionClient, OpenAIChatCompletionClient
from autogen_ext.models.openai._model_info import resolve_model
from autogen_ext.models.openai._openai_client import calculate_vision_tokens, convert_tools
=======
from autogen_ext.models import AzureOpenAIChatCompletionClient, OpenAIChatCompletionClient
from autogen_ext.models._openai._model_info import resolve_model
from autogen_ext.models._openai._openai_client import calculate_vision_tokens, convert_tools
>>>>>>> main
from openai.resources.chat.completions import AsyncCompletions
from openai.types.chat.chat_completion import ChatCompletion, Choice
from openai.types.chat.chat_completion_chunk import ChatCompletionChunk, ChoiceDelta
Expand Down

0 comments on commit 458c262

Please sign in to comment.