Skip to content

Commit eee3d65

Browse files
committed
fix missing conflicts
1 parent 458c262 commit eee3d65

File tree

3 files changed

+541
-1042
lines changed

3 files changed

+541
-1042
lines changed

python/packages/autogen-ext/src/autogen_ext/models/openai/_openai_client.py

Lines changed: 39 additions & 44 deletions
Original file line numberDiff line numberDiff line change
@@ -891,64 +891,59 @@ def capabilities(self) -> ModelCapabilities:
891891
class OpenAIChatCompletionClient(BaseOpenAIChatCompletionClient):
892892
"""Chat completion client for OpenAI hosted models.
893893
894-
You can also use this client for OpenAI-compatible ChatCompletion endpoints.
895-
**Using this client for non-OpenAI models is not tested or guaranteed.**
894+
You can also use this client for OpenAI-compatible ChatCompletion endpoints.
895+
**Using this client for non-OpenAI models is not tested or guaranteed.**
896896
897-
For non-OpenAI models, please first take a look at our `community extensions <https://microsoft.github.io/autogen/dev/user-guide/extensions-user-guide/index.html>`_
898-
for additional model clients.
897+
For non-OpenAI models, please first take a look at our `community extensions <https://microsoft.github.io/autogen/dev/user-guide/extensions-user-guide/index.html>`_
898+
for additional model clients.
899899
900-
Args:
901-
model (str): The model to use. **Required.**
902-
api_key (str): The API key to use. **Required if 'OPENAI_API_KEY' is not found in the environment variables.**
903-
timeout (optional, int): The timeout for the request in seconds.
904-
max_retries (optional, int): The maximum number of retries to attempt.
905-
organization_id (optional, str): The organization ID to use.
906-
base_url (optional, str): The base URL to use. **Required if the model is not hosted on OpenAI.**
907-
model_capabilities (optional, ModelCapabilities): The capabilities of the model. **Required if the model name is not a valid OpenAI model.**
908-
909-
To use this client, you must install the `openai` extension:
900+
Args:
901+
model (str): The model to use. **Required.**
902+
api_key (str): The API key to use. **Required if 'OPENAI_API_KEY' is not found in the environment variables.**
903+
timeout (optional, int): The timeout for the request in seconds.
904+
max_retries (optional, int): The maximum number of retries to attempt.
905+
organization_id (optional, str): The organization ID to use.
906+
base_url (optional, str): The base URL to use. **Required if the model is not hosted on OpenAI.**
907+
model_capabilities (optional, ModelCapabilities): The capabilities of the model. **Required if the model name is not a valid OpenAI model.**
910908
911-
.. code-block:: bash
909+
To use this client, you must install the `openai` extension:
912910
913-
pip install 'autogen-ext[openai]==0.4.0.dev8'
911+
.. code-block:: bash
914912
915-
The following code snippet shows how to use the client with an OpenAI model:
913+
pip install 'autogen-ext[openai]==0.4.0.dev8'
916914
917-
.. code-block:: python
915+
The following code snippet shows how to use the client with an OpenAI model:
918916
919-
<<<<<<< HEAD:python/packages/autogen-ext/src/autogen_ext/models/openai/_openai_client.py
920-
from autogen_ext.models.openai import OpenAIChatCompletionClient
921-
from autogen_core.components.models import UserMessage
922-
=======
923-
from autogen_ext.models import OpenAIChatCompletionClient
924-
from autogen_core.models import UserMessage
925-
>>>>>>> main:python/packages/autogen-ext/src/autogen_ext/models/_openai/_openai_client.py
917+
.. code-block:: python
926918
927-
openai_client = OpenAIChatCompletionClient(
928-
model="gpt-4o-2024-08-06",
929-
# api_key="sk-...", # Optional if you have an OPENAI_API_KEY environment variable set.
930-
)
919+
from autogen_ext.models.openai import OpenAIChatCompletionClient
920+
from autogen_core.models import UserMessage
931921
932-
result = await openai_client.create([UserMessage(content="What is the capital of France?", source="user")]) # type: ignore
933-
print(result)
922+
openai_client = OpenAIChatCompletionClient(
923+
model="gpt-4o-2024-08-06",
924+
# api_key="sk-...", # Optional if you have an OPENAI_API_KEY environment variable set.
925+
)
934926
927+
result = await openai_client.create([UserMessage(content="What is the capital of France?", source="user")]) # type: ignore
928+
print(result)
935929
936-
To use the client with a non-OpenAI model, you need to provide the base URL of the model and the model capabilities:
937930
938-
.. code-block:: python
931+
To use the client with a non-OpenAI model, you need to provide the base URL of the model and the model capabilities:
939932
940-
from autogen_ext.models.openai import OpenAIChatCompletionClient
933+
.. code-block:: python
941934
942-
custom_model_client = OpenAIChatCompletionClient(
943-
model="custom-model-name",
944-
base_url="https://custom-model.com/reset/of/the/path",
945-
api_key="placeholder",
946-
model_capabilities={
947-
"vision": True,
948-
"function_calling": True,
949-
"json_output": True,
950-
},
951-
)
935+
from autogen_ext.models.openai import OpenAIChatCompletionClient
936+
937+
custom_model_client = OpenAIChatCompletionClient(
938+
model="custom-model-name",
939+
base_url="https://custom-model.com/reset/of/the/path",
940+
api_key="placeholder",
941+
model_capabilities={
942+
"vision": True,
943+
"function_calling": True,
944+
"json_output": True,
945+
},
946+
)
952947
953948
"""
954949

python/packages/autogen-ext/src/autogen_ext/models/reply/_reply_chat_completion_client.py

Lines changed: 59 additions & 64 deletions
Original file line numberDiff line numberDiff line change
@@ -18,97 +18,92 @@
1818

1919
class ReplayChatCompletionClient:
2020
"""
21-
A mock chat completion client that replays predefined responses using an index-based approach.
21+
A mock chat completion client that replays predefined responses using an index-based approach.
2222
23-
This class simulates a chat completion client by replaying a predefined list of responses. It supports both single completion and streaming responses. The responses can be either strings or CreateResult objects. The client now uses an index-based approach to access the responses, allowing for resetting the state.
23+
This class simulates a chat completion client by replaying a predefined list of responses. It supports both single completion and streaming responses. The responses can be either strings or CreateResult objects. The client now uses an index-based approach to access the responses, allowing for resetting the state.
2424
25-
.. note::
26-
The responses can be either strings or CreateResult objects.
25+
.. note::
26+
The responses can be either strings or CreateResult objects.
2727
28-
Args:
29-
chat_completions (Sequence[Union[str, CreateResult]]): A list of predefined responses to replay.
28+
Args:
29+
chat_completions (Sequence[Union[str, CreateResult]]): A list of predefined responses to replay.
3030
31-
Raises:
32-
ValueError("No more mock responses available"): If the list of provided outputs are exhausted.
31+
Raises:
32+
ValueError("No more mock responses available"): If the list of provided outputs are exhausted.
3333
34-
Examples:
34+
Examples:
3535
36-
Simple chat completion client to return pre-defined responses.
36+
Simple chat completion client to return pre-defined responses.
3737
38-
.. code-block:: python
38+
.. code-block:: python
3939
40-
from autogen_ext.models.reply import ReplayChatCompletionClient
41-
from autogen_core.models import UserMessage
40+
from autogen_ext.models.reply import ReplayChatCompletionClient
41+
from autogen_core.models import UserMessage
4242
4343
44-
async def example():
45-
chat_completions = [
46-
"Hello, how can I assist you today?",
47-
"I'm happy to help with any questions you have.",
48-
"Is there anything else I can assist you with?",
49-
]
50-
client = ReplayChatCompletionClient(chat_completions)
51-
messages = [UserMessage(content="What can you do?", source="user")]
52-
response = await client.create(messages)
53-
print(response.content) # Output: "Hello, how can I assist you today?"
44+
async def example():
45+
chat_completions = [
46+
"Hello, how can I assist you today?",
47+
"I'm happy to help with any questions you have.",
48+
"Is there anything else I can assist you with?",
49+
]
50+
client = ReplayChatCompletionClient(chat_completions)
51+
messages = [UserMessage(content="What can you do?", source="user")]
52+
response = await client.create(messages)
53+
print(response.content) # Output: "Hello, how can I assist you today?"
5454
55-
Simple streaming chat completion client to return pre-defined responses
55+
Simple streaming chat completion client to return pre-defined responses
5656
57-
.. code-block:: python
57+
.. code-block:: python
5858
59-
import asyncio
60-
<<<<<<< HEAD:python/packages/autogen-ext/src/autogen_ext/models/reply/_reply_chat_completion_client.py
61-
from autogen_ext.models.reply import ReplayChatCompletionClient
62-
from autogen_core.components.models import UserMessage
63-
=======
64-
from autogen_ext.models import ReplayChatCompletionClient
65-
from autogen_core.models import UserMessage
66-
>>>>>>> main:python/packages/autogen-ext/src/autogen_ext/models/_reply_chat_completion_client.py
59+
import asyncio
60+
from autogen_ext.models.reply import ReplayChatCompletionClient
61+
from autogen_core.models import UserMessage
6762
6863
69-
async def example():
70-
chat_completions = [
71-
"Hello, how can I assist you today?",
72-
"I'm happy to help with any questions you have.",
73-
"Is there anything else I can assist you with?",
74-
]
75-
client = ReplayChatCompletionClient(chat_completions)
76-
messages = [UserMessage(content="What can you do?", source="user")]
64+
async def example():
65+
chat_completions = [
66+
"Hello, how can I assist you today?",
67+
"I'm happy to help with any questions you have.",
68+
"Is there anything else I can assist you with?",
69+
]
70+
client = ReplayChatCompletionClient(chat_completions)
71+
messages = [UserMessage(content="What can you do?", source="user")]
7772
78-
async for token in client.create_stream(messages):
79-
print(token, end="") # Output: "Hello, how can I assist you today?"
73+
async for token in client.create_stream(messages):
74+
print(token, end="") # Output: "Hello, how can I assist you today?"
8075
81-
async for token in client.create_stream(messages):
82-
print(token, end="") # Output: "I'm happy to help with any questions you have."
76+
async for token in client.create_stream(messages):
77+
print(token, end="") # Output: "I'm happy to help with any questions you have."
8378
84-
asyncio.run(example())
79+
asyncio.run(example())
8580
86-
Using `.reset` to reset the chat client state
81+
Using `.reset` to reset the chat client state
8782
88-
.. code-block:: python
83+
.. code-block:: python
8984
90-
import asyncio
91-
from autogen_ext.models import ReplayChatCompletionClient
92-
from autogen_core.models import UserMessage
85+
import asyncio
86+
from autogen_ext.models import ReplayChatCompletionClient
87+
from autogen_core.models import UserMessage
9388
9489
95-
async def example():
96-
chat_completions = [
97-
"Hello, how can I assist you today?",
98-
]
99-
client = ReplayChatCompletionClient(chat_completions)
100-
messages = [UserMessage(content="What can you do?", source="user")]
101-
response = await client.create(messages)
102-
print(response.content) # Output: "Hello, how can I assist you today?"
90+
async def example():
91+
chat_completions = [
92+
"Hello, how can I assist you today?",
93+
]
94+
client = ReplayChatCompletionClient(chat_completions)
95+
messages = [UserMessage(content="What can you do?", source="user")]
96+
response = await client.create(messages)
97+
print(response.content) # Output: "Hello, how can I assist you today?"
10398
104-
response = await client.create(messages) # Raises ValueError("No more mock responses available")
99+
response = await client.create(messages) # Raises ValueError("No more mock responses available")
105100
106-
client.reset() # Reset the client state (current index of message and token usages)
107-
response = await client.create(messages)
108-
print(response.content) # Output: "Hello, how can I assist you today?" again
101+
client.reset() # Reset the client state (current index of message and token usages)
102+
response = await client.create(messages)
103+
print(response.content) # Output: "Hello, how can I assist you today?" again
109104
110105
111-
asyncio.run(example())
106+
asyncio.run(example())
112107
113108
"""
114109

0 commit comments

Comments
 (0)