|
18 | 18 |
|
19 | 19 | class ReplayChatCompletionClient:
|
20 | 20 | """
|
21 |
| - A mock chat completion client that replays predefined responses using an index-based approach. |
| 21 | + A mock chat completion client that replays predefined responses using an index-based approach. |
22 | 22 |
|
23 |
| - This class simulates a chat completion client by replaying a predefined list of responses. It supports both single completion and streaming responses. The responses can be either strings or CreateResult objects. The client now uses an index-based approach to access the responses, allowing for resetting the state. |
| 23 | + This class simulates a chat completion client by replaying a predefined list of responses. It supports both single completion and streaming responses. The responses can be either strings or CreateResult objects. The client now uses an index-based approach to access the responses, allowing for resetting the state. |
24 | 24 |
|
25 |
| - .. note:: |
26 |
| - The responses can be either strings or CreateResult objects. |
| 25 | + .. note:: |
| 26 | + The responses can be either strings or CreateResult objects. |
27 | 27 |
|
28 |
| - Args: |
29 |
| - chat_completions (Sequence[Union[str, CreateResult]]): A list of predefined responses to replay. |
| 28 | + Args: |
| 29 | + chat_completions (Sequence[Union[str, CreateResult]]): A list of predefined responses to replay. |
30 | 30 |
|
31 |
| - Raises: |
32 |
| - ValueError("No more mock responses available"): If the list of provided outputs are exhausted. |
| 31 | + Raises: |
| 32 | + ValueError("No more mock responses available"): If the list of provided outputs are exhausted. |
33 | 33 |
|
34 |
| - Examples: |
| 34 | + Examples: |
35 | 35 |
|
36 |
| - Simple chat completion client to return pre-defined responses. |
| 36 | + Simple chat completion client to return pre-defined responses. |
37 | 37 |
|
38 |
| - .. code-block:: python |
| 38 | + .. code-block:: python |
39 | 39 |
|
40 |
| - from autogen_ext.models.reply import ReplayChatCompletionClient |
41 |
| - from autogen_core.models import UserMessage |
| 40 | + from autogen_ext.models.reply import ReplayChatCompletionClient |
| 41 | + from autogen_core.models import UserMessage |
42 | 42 |
|
43 | 43 |
|
44 |
| - async def example(): |
45 |
| - chat_completions = [ |
46 |
| - "Hello, how can I assist you today?", |
47 |
| - "I'm happy to help with any questions you have.", |
48 |
| - "Is there anything else I can assist you with?", |
49 |
| - ] |
50 |
| - client = ReplayChatCompletionClient(chat_completions) |
51 |
| - messages = [UserMessage(content="What can you do?", source="user")] |
52 |
| - response = await client.create(messages) |
53 |
| - print(response.content) # Output: "Hello, how can I assist you today?" |
| 44 | + async def example(): |
| 45 | + chat_completions = [ |
| 46 | + "Hello, how can I assist you today?", |
| 47 | + "I'm happy to help with any questions you have.", |
| 48 | + "Is there anything else I can assist you with?", |
| 49 | + ] |
| 50 | + client = ReplayChatCompletionClient(chat_completions) |
| 51 | + messages = [UserMessage(content="What can you do?", source="user")] |
| 52 | + response = await client.create(messages) |
| 53 | + print(response.content) # Output: "Hello, how can I assist you today?" |
54 | 54 |
|
55 |
| - Simple streaming chat completion client to return pre-defined responses |
| 55 | + Simple streaming chat completion client to return pre-defined responses |
56 | 56 |
|
57 |
| - .. code-block:: python |
| 57 | + .. code-block:: python |
58 | 58 |
|
59 |
| - import asyncio |
60 |
| - <<<<<<< HEAD:python/packages/autogen-ext/src/autogen_ext/models/reply/_reply_chat_completion_client.py |
61 |
| - from autogen_ext.models.reply import ReplayChatCompletionClient |
62 |
| - from autogen_core.components.models import UserMessage |
63 |
| - ======= |
64 |
| - from autogen_ext.models import ReplayChatCompletionClient |
65 |
| - from autogen_core.models import UserMessage |
66 |
| - >>>>>>> main:python/packages/autogen-ext/src/autogen_ext/models/_reply_chat_completion_client.py |
| 59 | + import asyncio |
| 60 | + from autogen_ext.models.reply import ReplayChatCompletionClient |
| 61 | + from autogen_core.models import UserMessage |
67 | 62 |
|
68 | 63 |
|
69 |
| - async def example(): |
70 |
| - chat_completions = [ |
71 |
| - "Hello, how can I assist you today?", |
72 |
| - "I'm happy to help with any questions you have.", |
73 |
| - "Is there anything else I can assist you with?", |
74 |
| - ] |
75 |
| - client = ReplayChatCompletionClient(chat_completions) |
76 |
| - messages = [UserMessage(content="What can you do?", source="user")] |
| 64 | + async def example(): |
| 65 | + chat_completions = [ |
| 66 | + "Hello, how can I assist you today?", |
| 67 | + "I'm happy to help with any questions you have.", |
| 68 | + "Is there anything else I can assist you with?", |
| 69 | + ] |
| 70 | + client = ReplayChatCompletionClient(chat_completions) |
| 71 | + messages = [UserMessage(content="What can you do?", source="user")] |
77 | 72 |
|
78 |
| - async for token in client.create_stream(messages): |
79 |
| - print(token, end="") # Output: "Hello, how can I assist you today?" |
| 73 | + async for token in client.create_stream(messages): |
| 74 | + print(token, end="") # Output: "Hello, how can I assist you today?" |
80 | 75 |
|
81 |
| - async for token in client.create_stream(messages): |
82 |
| - print(token, end="") # Output: "I'm happy to help with any questions you have." |
| 76 | + async for token in client.create_stream(messages): |
| 77 | + print(token, end="") # Output: "I'm happy to help with any questions you have." |
83 | 78 |
|
84 |
| - asyncio.run(example()) |
| 79 | + asyncio.run(example()) |
85 | 80 |
|
86 |
| - Using `.reset` to reset the chat client state |
| 81 | + Using `.reset` to reset the chat client state |
87 | 82 |
|
88 |
| - .. code-block:: python |
| 83 | + .. code-block:: python |
89 | 84 |
|
90 |
| - import asyncio |
91 |
| - from autogen_ext.models import ReplayChatCompletionClient |
92 |
| - from autogen_core.models import UserMessage |
| 85 | + import asyncio |
| 86 | + from autogen_ext.models import ReplayChatCompletionClient |
| 87 | + from autogen_core.models import UserMessage |
93 | 88 |
|
94 | 89 |
|
95 |
| - async def example(): |
96 |
| - chat_completions = [ |
97 |
| - "Hello, how can I assist you today?", |
98 |
| - ] |
99 |
| - client = ReplayChatCompletionClient(chat_completions) |
100 |
| - messages = [UserMessage(content="What can you do?", source="user")] |
101 |
| - response = await client.create(messages) |
102 |
| - print(response.content) # Output: "Hello, how can I assist you today?" |
| 90 | + async def example(): |
| 91 | + chat_completions = [ |
| 92 | + "Hello, how can I assist you today?", |
| 93 | + ] |
| 94 | + client = ReplayChatCompletionClient(chat_completions) |
| 95 | + messages = [UserMessage(content="What can you do?", source="user")] |
| 96 | + response = await client.create(messages) |
| 97 | + print(response.content) # Output: "Hello, how can I assist you today?" |
103 | 98 |
|
104 |
| - response = await client.create(messages) # Raises ValueError("No more mock responses available") |
| 99 | + response = await client.create(messages) # Raises ValueError("No more mock responses available") |
105 | 100 |
|
106 |
| - client.reset() # Reset the client state (current index of message and token usages) |
107 |
| - response = await client.create(messages) |
108 |
| - print(response.content) # Output: "Hello, how can I assist you today?" again |
| 101 | + client.reset() # Reset the client state (current index of message and token usages) |
| 102 | + response = await client.create(messages) |
| 103 | + print(response.content) # Output: "Hello, how can I assist you today?" again |
109 | 104 |
|
110 | 105 |
|
111 |
| - asyncio.run(example()) |
| 106 | + asyncio.run(example()) |
112 | 107 |
|
113 | 108 | """
|
114 | 109 |
|
|
0 commit comments