Skip to content

Commit 212680c

Browse files
committed
Gorilla functions and document examples testing
1 parent bf11b23 commit 212680c

8 files changed

+358
-1
lines changed
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,62 @@
1+
import autogen
2+
3+
# Update with your Inference Endpoint and Access Token
4+
hf_inference_endpoint_url = "https://inowtlifsx7oieb7.us-east-1.aws.endpoints.huggingface.cloud/v1/"
5+
hf_api_key = ''
6+
7+
llm_config={
8+
"config_list": [
9+
{
10+
# Available together.ai model strings:
11+
# https://docs.together.ai/docs/inference-models
12+
# "model": "mistralai/Mixtral-8x7B-Instruct-v0.1",
13+
"model": "tgi", #"mistralai/Mistral-7B-Instruct-v0.2",
14+
"api_key": hf_api_key,
15+
"base_url": hf_inference_endpoint_url
16+
}
17+
],
18+
"cache_seed": None
19+
}
20+
21+
# User Proxy will execute code and finish the chat upon typing 'exit'
22+
user_proxy = autogen.UserProxyAgent(
23+
name="UserProxy",
24+
system_message="A human admin",
25+
code_execution_config={
26+
"last_n_messages": 2,
27+
"work_dir": "groupchat",
28+
"use_docker": False,
29+
}, # Please set use_docker=True if docker is available to run the generated code. Using docker is safer than running the generated code directly.
30+
human_input_mode="TERMINATE",
31+
is_termination_msg=lambda x: "TERMINATE" in x.get("content"),
32+
)
33+
34+
# Python Coder agent
35+
coder = autogen.AssistantAgent(
36+
name="softwareCoder",
37+
description="Software Coder, writes Python code as required and reiterates with feedback from the Code Reviewer.",
38+
system_message="You are a senior Python developer, a specialist in writing succinct Python functions.",
39+
llm_config=llm_config,
40+
)
41+
42+
# Code Reviewer agent
43+
reviewer = autogen.AssistantAgent(
44+
name="codeReviewer",
45+
description="Code Reviewer, reviews written code for correctness, efficiency, and security. Asks the Software Coder to address issues.",
46+
system_message="You are a Code Reviewer, experienced in checking code for correctness, efficiency, and security. Review and provide feedback to the Software Coder until you are satisfied, then return the word TERMINATE",
47+
is_termination_msg=lambda x: "TERMINATE" in x.get("content"),
48+
llm_config=llm_config,
49+
)
50+
51+
# Establish the Group Chat and disallow a speaker being selected consecutively
52+
groupchat = autogen.GroupChat(agents=[user_proxy, coder, reviewer], messages=[], max_round=12, allow_repeat_speaker=False)
53+
54+
# Manages the group of multiple agents
55+
manager = autogen.GroupChatManager(groupchat=groupchat, llm_config=llm_config)
56+
57+
# Start the chat with a request to write a function
58+
user_proxy.initiate_chat(
59+
manager,
60+
message="Write a Python function for the Fibonacci sequence, the function will have one parameter for the number in the sequence, which the function will return the Fibonacci number for."
61+
)
62+
# type exit to terminate the chat
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,58 @@
1+
import autogen
2+
import os
3+
4+
llm_config={
5+
"config_list": [
6+
{
7+
# Available together.ai model strings:
8+
# https://docs.together.ai/docs/inference-models
9+
"model": "mistralai/Mixtral-8x7B-Instruct-v0.1",
10+
"api_key": os.environ['TOGETHER_API_KEY'],
11+
"base_url": "https://api.together.xyz/v1"
12+
}
13+
],
14+
"cache_seed": 42
15+
}
16+
17+
# User Proxy will execute code and finish the chat upon typing 'exit'
18+
user_proxy = autogen.UserProxyAgent(
19+
name="UserProxy",
20+
system_message="A human admin",
21+
code_execution_config={
22+
"last_n_messages": 2,
23+
"work_dir": "groupchat",
24+
"use_docker": False,
25+
}, # Please set use_docker=True if docker is available to run the generated code. Using docker is safer than running the generated code directly.
26+
human_input_mode="TERMINATE",
27+
is_termination_msg=lambda x: "TERMINATE" in x.get("content"),
28+
)
29+
30+
# Python Coder agent
31+
coder = autogen.AssistantAgent(
32+
name="softwareCoder",
33+
description="Software Coder, writes Python code as required and reiterates with feedback from the Code Reviewer.",
34+
system_message="You are a senior Python developer, a specialist in writing succinct Python functions.",
35+
llm_config=llm_config,
36+
)
37+
38+
# Code Reviewer agent
39+
reviewer = autogen.AssistantAgent(
40+
name="codeReviewer",
41+
description="Code Reviewer, reviews written code for correctness, efficiency, and security. Asks the Software Coder to address issues.",
42+
system_message="You are a Code Reviewer, experienced in checking code for correctness, efficiency, and security. Review and provide feedback to the Software Coder until you are satisfied, then return the word TERMINATE",
43+
is_termination_msg=lambda x: "TERMINATE" in x.get("content"),
44+
llm_config=llm_config,
45+
)
46+
47+
# Establish the Group Chat and disallow a speaker being selected consecutively
48+
groupchat = autogen.GroupChat(agents=[user_proxy, coder, reviewer], messages=[], max_round=12, allow_repeat_speaker=False)
49+
50+
# Manages the group of multiple agents
51+
manager = autogen.GroupChatManager(groupchat=groupchat, llm_config=llm_config)
52+
53+
# Start the chat with a request to write a function
54+
user_proxy.initiate_chat(
55+
manager,
56+
message="Write a Python function for the Fibonacci sequence, the function will have one parameter for the number in the sequence, which the function will return the Fibonacci number for."
57+
)
58+
# type exit to terminate the chat
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,77 @@
1+
import os
2+
from typing import Literal
3+
from pydantic import BaseModel, Field
4+
from typing_extensions import Annotated
5+
import autogen
6+
7+
llm_config={
8+
# Together.ai (supports function calling, requires API key in environment variable "TOGETHER_API_KEY")
9+
# More info here: https://docs.together.ai/docs/function-calling including the models it supports
10+
"config_list": [{"model": "mistralai/Mixtral-8x7B-Instruct-v0.1", "api_key": os.environ['TOGETHER_API_KEY'], "base_url": f"https://api.together.xyz/v1"}],
11+
12+
"cache_seed": None,
13+
}
14+
15+
chatbot = autogen.AssistantAgent(
16+
name="chatbot",
17+
system_message="""For currency exchange tasks,
18+
only use the functions you have been provided with.
19+
Output 'TERMINATE' with the answer to indicate an answer has been provided.
20+
Do not include the function name or result in the JSON.
21+
Example of the return JSON is:
22+
{
23+
"parameter_1_name": 100.00,
24+
"parameter_2_name": "ABC",
25+
"parameter_3_name": "DEF",
26+
}.
27+
Another example of the return JSON is:
28+
{
29+
"parameter_1_name": "GHI",
30+
"parameter_2_name": "ABC",
31+
"parameter_3_name": "DEF",
32+
"parameter_4_name": 123.00,
33+
}.
34+
When finished, return a non-JSON format string of 'TERMINATE'""",
35+
36+
llm_config=llm_config,
37+
)
38+
39+
user_proxy = autogen.UserProxyAgent(
40+
name="user_proxy",
41+
is_termination_msg=lambda x: x.get("content", "") and "TERMINATE" in x.get("content", ""),
42+
human_input_mode="TERMINATE",
43+
max_consecutive_auto_reply=1,
44+
)
45+
46+
CurrencySymbol = Literal["USD", "EUR"]
47+
48+
def exchange_rate(base_currency: CurrencySymbol, quote_currency: CurrencySymbol) -> float:
49+
if base_currency == quote_currency:
50+
return 1.0
51+
elif base_currency == "USD" and quote_currency == "EUR":
52+
return 1 / 1.1
53+
elif base_currency == "EUR" and quote_currency == "USD":
54+
return 1.1
55+
else:
56+
raise ValueError(f"Unknown currencies {base_currency}, {quote_currency}")
57+
58+
59+
@user_proxy.register_for_execution()
60+
@chatbot.register_for_llm(description="Currency exchange calculator.")
61+
def currency_calculator(
62+
base_amount: Annotated[float, "Amount of currency in base_currency"],
63+
base_currency: Annotated[CurrencySymbol, "Base currency"] = "USD",
64+
quote_currency: Annotated[CurrencySymbol, "Quote currency"] = "EUR",
65+
) -> str:
66+
quote_amount = exchange_rate(base_currency, quote_currency) * base_amount
67+
return f"{format(quote_amount, '.2f')} {quote_currency}"
68+
69+
# Test that the function map is the function
70+
# assert user_proxy.function_map["currency_calculator"]._origin == currency_calculator
71+
72+
# start the conversation
73+
res = user_proxy.initiate_chat(
74+
chatbot,
75+
message="How much is 123.45 EUR in USD?",
76+
summary_method="reflection_with_llm",
77+
)
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,21 @@
1+
from autogen import UserProxyAgent, ConversableAgent
2+
3+
local_llm_config={
4+
"config_list": [
5+
{
6+
"model": "NotRequired", # Loaded with LiteLLM command
7+
"api_key": "NotRequired", # Not needed
8+
"base_url": "http://192.168.0.115:4000" # Your LiteLLM URL
9+
}
10+
],
11+
"cache_seed": None # Turns off caching, useful for testing different models
12+
}
13+
14+
# Create the agent that uses the LLM.
15+
assistant = ConversableAgent("agent", llm_config=local_llm_config)
16+
17+
# Create the agent that represents the user in the conversation.
18+
user_proxy = UserProxyAgent("user", code_execution_config=False)
19+
20+
# Let the assistant start the conversation. It will end when the user types exit.
21+
assistant.initiate_chat(user_proxy, message="How can I help you today?")
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,79 @@
1+
import autogen
2+
from typing import Literal
3+
from typing_extensions import Annotated
4+
5+
local_llm_config={
6+
"config_list": [
7+
{
8+
"model": "NotRequired", # Loaded with LiteLLM command
9+
"api_key": "NotRequired", # Not needed
10+
"base_url": "http://192.168.0.115:4000" # Your LiteLLM URL
11+
}
12+
],
13+
"cache_seed": None # Turns off caching, useful for testing different models
14+
}
15+
chatbot = autogen.AssistantAgent(
16+
name="chatbot",
17+
system_message="""For currency exchange tasks,
18+
only use the functions you have been provided with.
19+
Output 'TERMINATE' when an answer has been provided.
20+
Do not include the function name or result in the JSON.
21+
Example of the return JSON is:
22+
{
23+
"parameter_1_name": 100.00,
24+
"parameter_2_name": "ABC",
25+
"parameter_3_name": "DEF",
26+
}.
27+
Another example of the return JSON is:
28+
{
29+
"parameter_1_name": "GHI",
30+
"parameter_2_name": "ABC",
31+
"parameter_3_name": "DEF",
32+
"parameter_4_name": 123.00,
33+
}. """,
34+
35+
llm_config=local_llm_config,
36+
)
37+
38+
user_proxy = autogen.UserProxyAgent(
39+
name="user_proxy",
40+
is_termination_msg=lambda x: x.get("content", "") and "TERMINATE" in x.get("content", ""),
41+
human_input_mode="NEVER",
42+
max_consecutive_auto_reply=1,
43+
)
44+
45+
46+
CurrencySymbol = Literal["USD", "EUR"]
47+
48+
def exchange_rate(base_currency: CurrencySymbol, quote_currency: CurrencySymbol) -> float:
49+
if base_currency == quote_currency:
50+
return 1.0
51+
elif base_currency == "USD" and quote_currency == "EUR":
52+
return 1 / 1.1
53+
elif base_currency == "EUR" and quote_currency == "USD":
54+
return 1.1
55+
else:
56+
raise ValueError(f"Unknown currencies {base_currency}, {quote_currency}")
57+
58+
59+
@user_proxy.register_for_execution()
60+
@chatbot.register_for_llm(description="Currency exchange calculator.")
61+
def currency_calculator(
62+
base_amount: Annotated[float, "Amount of currency in base_currency"],
63+
base_currency: Annotated[CurrencySymbol, "Base currency"] = "USD",
64+
quote_currency: Annotated[CurrencySymbol, "Quote currency"] = "EUR",
65+
) -> str:
66+
quote_amount = exchange_rate(base_currency, quote_currency) * base_amount
67+
return f"{format(quote_amount, '.2f')} {quote_currency}"
68+
69+
# print(chatbot.llm_config["tools"])
70+
71+
# Test that the function map is the function
72+
assert user_proxy.function_map["currency_calculator"]._origin == currency_calculator
73+
74+
# start the conversation
75+
res = user_proxy.initiate_chat(
76+
chatbot,
77+
message="How much is 123.45 EUR in USD?",
78+
summary_method="reflection_with_llm",
79+
)
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,36 @@
1+
gemma = {
2+
"config_list": [
3+
{
4+
"model": "lmstudio-ai/gemma-2b-it-GGUF/gemma-2b-it-q8_0.gguf",
5+
"base_url": "http://192.168.0.115:1234/v1",
6+
"api_key": "NULL",
7+
},
8+
],
9+
"cache_seed": None, # Disable caching.
10+
}
11+
12+
phi2 = {
13+
"config_list": [
14+
{
15+
"model": "TheBloke/phi-2-GGUF/phi-2.Q4_K_S.gguf",
16+
"base_url": "http://192.168.0.115:1234/v1",
17+
"api_key": "NULL",
18+
},
19+
],
20+
"cache_seed": None, # Disable caching.
21+
}
22+
23+
from autogen import ConversableAgent
24+
25+
jack = ConversableAgent(
26+
"Jack (Phi-2)",
27+
llm_config=phi2,
28+
system_message="Your name is Jack and you are a comedian in a two-person comedy show.",
29+
)
30+
emma = ConversableAgent(
31+
"Emma (Gemma)",
32+
llm_config=gemma,
33+
system_message="Your name is Emma and you are a comedian in two-person comedy show.",
34+
)
35+
36+
chat_result = jack.initiate_chat(emma, message="Emma, tell me a joke.", max_turns=2)
+21
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,21 @@
1+
from autogen import UserProxyAgent, ConversableAgent
2+
3+
local_llm_config={
4+
"config_list": [
5+
{
6+
"model": "TheBloke/Llama-2-7b-Chat-AWQ", # Loaded with vLLM command
7+
"api_key": "NotRequired", # Not needed
8+
"base_url": "http://192.168.0.115:8000/v1" # Your vLLM URL
9+
}
10+
],
11+
"cache_seed": None # Turns off caching, useful for testing different models
12+
}
13+
14+
# Create the agent that uses the LLM.
15+
assistant = ConversableAgent("agent", llm_config=local_llm_config,system_message="")
16+
17+
# Create the agent that represents the user in the conversation.
18+
user_proxy = UserProxyAgent("user", code_execution_config=False,system_message="")
19+
20+
# Let the assistant start the conversation. It will end when the user types exit.
21+
assistant.initiate_chat(user_proxy, message="How can I help you today?")

function_calling/function_calling_test.py

+4-1
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,10 @@
1919
# "config_list": [{"model": model_name, "api_key": "NotRequired", "base_url": f"http://{network_address}:11434/v1"}],
2020

2121
# LiteLLM (supports function calling but may have Ollama function calling bug)
22-
"config_list": [{"model": "litellmnotneeded", "api_key": "NotRequired", "base_url": f"http://{network_address}:8801"}],
22+
# "config_list": [{"model": "litellmnotneeded", "api_key": "NotRequired", "base_url": f"http://{network_address}:8801"}],
23+
24+
# Gorilla OpenFunctions
25+
"config_list": [{"model": "gorilla-7b-hf-v1", "api_key": "EMPTY", "base_url": f"http://zanino.millennium.berkeley.edu:8000/v1"}],
2326

2427
# Functionary (not tested)
2528
# "config_list": [{"model": "litellmnotneeded", "api_key": "NotRequired", "base_url": f"http://{network_address}:8000/v1"}],

0 commit comments

Comments
 (0)