Skip to content

Commit 0cd1d3a

Browse files
committed
testbench to verify informational agent prompt with missing sections
1 parent 65845c7 commit 0cd1d3a

File tree

4 files changed

+127
-86
lines changed

4 files changed

+127
-86
lines changed

.gitignore

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -135,4 +135,5 @@ dmypy.json
135135

136136
# Synthetic data conversations
137137
src/agents/utils/synthetic_conversations/*.json
138-
src/agents/utils/synthetic_conversations/*.csv
138+
src/agents/utils/synthetic_conversations/*.csv
139+
src/agents/utils/synthetic_conversations/*.tsv

src/agents/informational_agent/informational_agent.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -36,7 +36,7 @@ class State(TypedDict):
3636
conversationalStyle: str
3737

3838
class InformationalAgent:
39-
def __init__(self):
39+
def __init__(self, informational_role_prompt: str = informational_role_prompt, conv_pref_prompt: str = conv_pref_prompt, update_conv_pref_prompt: str = update_conv_pref_prompt, summary_prompt: str = summary_prompt, update_summary_prompt: str = update_summary_prompt):
4040
llm = GoogleAILLMs()
4141
self.llm = llm.get_llm()
4242
summarisation_llm = OpenAILLMs()
@@ -181,7 +181,7 @@ def pretty_response_value(self, event: dict) -> str:
181181
return event["messages"][-1].content
182182

183183
agent = InformationalAgent()
184-
def invoke_informational_agent(query: str, conversation_history: list, summary: str, conversationalStyle: str, question_response_details: str, session_id: str) -> InvokeAgentResponseType:
184+
def invoke_informational_agent(query: str, conversation_history: list, summary: str, conversationalStyle: str, question_response_details: str, session_id: str, agent: InformationalAgent = agent) -> InvokeAgentResponseType:
185185
print(f'in invoke_informational_agent(), thread_id = {session_id}')
186186

187187
config = {"configurable": {"thread_id": session_id, "summary": summary, "conversational_style": conversationalStyle, "question_response_details": question_response_details}}

src/agents/informational_agent/informational_prompts.py

Lines changed: 4 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -20,20 +20,18 @@
2020
Adaptability: Customize teaching approaches based on the student's learning preferences and evolving needs.
2121
Curiosity-Building: Inspire students to ask thoughtful questions, fostering a love for learning.
2222
Consistency: Reinforce concepts regularly to build lasting understanding.
23-
Conversation Flow:
24-
Frequently conclude interactions with a question to keep the dialogue active and gauge the student's comprehension and comfort with the material.
23+
No Unnecessary Praise: Avoid excessive praise to maintain a authentic and constructive learning environment.
24+
Conversation Flow: Frequently conclude interactions with a question to keep the dialogue active and gauge the student's comprehension and comfort with the material.
2525
Continuously adapt to the student's problem-solving style, preferred level of guidance, and feedback.
2626
2727
Example Conversation Style:
28-
29-
If the student asks, "How do I solve this equation?" respond with:
30-
"Let's start by identifying what you know. What operation do you think comes first?"
28+
If the student asks, "How do I solve this equation?" respond with: "Let's start by identifying what you know. What operation do you think comes first?".
3129
Follow up with guided hints or clarifications based on their response.
3230
3331
## Flexibility:
3432
Restrict your response's length to quickly resolve the student's query. However, adjust your approach dynamically, if the student seeks detailed guidance, prefers a hands-off approach, or demonstrates unique problem-solving strategies. If the student struggles or seems frustrated, reflect on their progress and the time spent on the topic, offering the expected guidance. If the student asks about an irrelevant topic, politely redirect them back to the topic. Do not end your responses with a concluding statement.
3533
36-
## Governance
34+
## Governance:
3735
You are a chatbot deployed in Lambda Feedback, an online self-study platform. You are discussing with students from Imperial College London."""
3836

3937
pref_guidelines = """**Guidelines:**

src/agents/utils/testbench_agents.py

Lines changed: 119 additions & 77 deletions
Original file line numberDiff line numberDiff line change
@@ -1,98 +1,140 @@
1-
"""
2-
STEP 1: Read the USER INFO from the WEB client from a file
3-
"""
4-
51
import json
2+
import time
3+
import os
64
try:
75
from .parse_json_to_prompt import parse_json_to_prompt
86
from ..base_agent.base_agent import invoke_base_agent
9-
from ..informational_agent.informational_agent import invoke_informational_agent
7+
from ..informational_agent.informational_agent import InformationalAgent, invoke_informational_agent
108
from ..socratic_agent.socratic_agent import invoke_socratic_agent
9+
from ..informational_agent.informational_prompts import \
10+
informational_role_prompt, conv_pref_prompt, update_conv_pref_prompt, summary_prompt, update_summary_prompt
1111
except ImportError:
1212
from src.agents.utils.parse_json_to_prompt import parse_json_to_prompt
1313
from src.agents.base_agent.base_agent import invoke_base_agent
14-
from src.agents.informational_agent.informational_agent import invoke_informational_agent
14+
from src.agents.informational_agent.informational_agent import InformationalAgent, invoke_informational_agent
1515
from src.agents.socratic_agent.socratic_agent import invoke_socratic_agent
16+
from src.agents.informational_agent.informational_prompts import \
17+
informational_role_prompt, conv_pref_prompt, update_conv_pref_prompt, summary_prompt, update_summary_prompt
1618

1719
# File path for the input text
18-
path = "src/agents/utils/example_inputs/"
19-
input_file = path + "example_input_4.json"
20+
path = "src/agents/utils/"
21+
input_file = path + "example_inputs/" + "example_input_4.json"
2022

21-
# Step 1: Read the input file
23+
"""
24+
STEP 1: Read the USER INFO from the WEB client from a file
25+
"""
2226
with open(input_file, "r") as file:
2327
raw_text = file.read()
24-
25-
# Step 5: Parse into JSON
26-
try:
27-
parsed_json = json.loads(raw_text)
2828

29-
"""
30-
STEP 2: Extract the parameters from the JSON
31-
"""
29+
def testbench_agents(message, remove_index, agent_type = "informational", informational_role_prompt = informational_role_prompt):
30+
try:
31+
"""
32+
STEP 2: Parse the question information from the JSON file
33+
"""
34+
parsed_json = json.loads(raw_text)
35+
parsed_json["message"] = message
36+
parsed_json["params"]["conversation_history"][-1]["content"] = message
37+
38+
params = parsed_json["params"]
39+
40+
if "include_test_data" in params:
41+
include_test_data = params["include_test_data"]
42+
if "conversation_history" in params:
43+
conversation_history = params["conversation_history"]
44+
if "summary" in params:
45+
summary = params["summary"]
46+
if "conversational_style" in params:
47+
conversationalStyle = params["conversational_style"]
48+
if "question_response_details" in params:
49+
question_response_details = params["question_response_details"]
50+
question_submission_summary = question_response_details["questionSubmissionSummary"] if "questionSubmissionSummary" in question_response_details else []
51+
question_information = question_response_details["questionInformation"] if "questionInformation" in question_response_details else {}
52+
question_access_information = question_response_details["questionAccessInformation"] if "questionAccessInformation" in question_response_details else {}
53+
question_response_details_prompt = parse_json_to_prompt(
54+
question_submission_summary,
55+
question_information,
56+
question_access_information
57+
)
58+
# print("Question Response Details Prompt:", question_response_details_prompt, "\n\n")
59+
60+
if "agent_type" in params:
61+
agent_type = params["agent_type"]
62+
if "conversation_id" in params:
63+
conversation_id = params["conversation_id"]
64+
else:
65+
raise Exception("Internal Error: The conversation id is required in the parameters of the chat module.")
66+
67+
"""
68+
STEP 3: Call the LLM agent to get a response to the user's message
69+
"""
70+
if agent_type == "socratic":
71+
invoke = invoke_socratic_agent
72+
elif agent_type == "informational":
73+
"""
74+
STEP 4: Update the prompt to verify their performance
75+
"""
76+
role_prompt_components = informational_role_prompt.split("\n\n")
77+
main_prompt = role_prompt_components[0]
78+
teaching_methods = role_prompt_components[1].split("## Teaching Methods:\n")[1].split("\n")
79+
key_qualities = role_prompt_components[2].split("## Key Qualities:\n")[1].split("\n")
80+
example_style = role_prompt_components[3].split("Example Conversation Style:\n")[1].split("\n")
81+
flexibility_prompt = [item + '.' for item in role_prompt_components[4].split("## Flexibility:\n")[1].split(".") if item]
82+
governance_prompt = [item + '.' for item in role_prompt_components[-1].split("## Governance:\n")[1].split(".") if item]
83+
prompts = [main_prompt] + teaching_methods + key_qualities + example_style + flexibility_prompt + governance_prompt
84+
85+
# Remove one of the prompts to test the agent's performance
86+
prompt_missing = prompts[remove_index]
87+
print("Number of prompts:", len(prompts), ", current index:", remove_index, ", prompt removed:", prompt_missing)
88+
prompts.remove(prompt_missing)
89+
90+
updated_prompt = "\n\n".join(prompts)
91+
92+
agent = InformationalAgent(informational_role_prompt=updated_prompt, \
93+
conv_pref_prompt=conv_pref_prompt, \
94+
update_conv_pref_prompt=update_conv_pref_prompt, \
95+
summary_prompt=summary_prompt, \
96+
update_summary_prompt=update_summary_prompt)
97+
invoke = invoke_informational_agent
98+
else:
99+
raise Exception("Unknown Tutor Agent Type")
100+
101+
response = invoke(query=message, \
102+
conversation_history=conversation_history, \
103+
summary=summary, \
104+
conversationalStyle=conversationalStyle, \
105+
question_response_details=question_response_details_prompt, \
106+
session_id=conversation_id,
107+
agent=agent)
108+
109+
print(response)
110+
print("AI Response:", response['output'])
111+
return message, response, updated_prompt, prompt_missing
112+
113+
except json.JSONDecodeError as e:
114+
print("Error decoding JSON:", e)
115+
116+
117+
if __name__ == "__main__":
118+
file = path + "synthetic_conversations/" + "prompts_importance.tsv"
119+
# create the file if it doesnt exist
120+
if not os.path.exists(file):
121+
with open(file, "w") as f:
122+
f.write("message\t response\t prompt\t prompt_missing\n")
123+
32124
# NOTE: #### This is the testing message!! #####
33-
message = "What do you know about me?"
125+
message = "How do you tackle the worked solution for part c?"
34126
# NOTE: ########################################
35127

36-
# replace "mock" in the message and conversation history with the actual message
37-
parsed_json["message"] = message
38-
parsed_json["params"]["conversation_history"][-1]["content"] = message
39-
40-
params = parsed_json["params"]
41-
42-
if "include_test_data" in params:
43-
include_test_data = params["include_test_data"]
44-
if "conversation_history" in params:
45-
conversation_history = params["conversation_history"]
46-
if "summary" in params:
47-
summary = params["summary"]
48-
if "conversational_style" in params:
49-
conversationalStyle = params["conversational_style"]
50-
if "question_response_details" in params:
51-
question_response_details = params["question_response_details"]
52-
question_submission_summary = question_response_details["questionSubmissionSummary"] if "questionSubmissionSummary" in question_response_details else []
53-
question_information = question_response_details["questionInformation"] if "questionInformation" in question_response_details else {}
54-
question_access_information = question_response_details["questionAccessInformation"] if "questionAccessInformation" in question_response_details else {}
55-
question_response_details_prompt = parse_json_to_prompt(
56-
question_submission_summary,
57-
question_information,
58-
question_access_information
59-
)
60-
# print("Question Response Details Prompt:", question_response_details_prompt, "\n\n")
61-
62-
if "agent_type" in params:
63-
agent_type = params["agent_type"]
64-
if "conversation_id" in params:
65-
conversation_id = params["conversation_id"]
66-
else:
67-
raise Exception("Internal Error: The conversation id is required in the parameters of the chat module.")
68-
69-
"""
70-
STEP 3: Call the LLM agent to get a response to the user's message
71-
"""
72-
# NOTE: ### SET the agent type to use ###
73-
agent_type = "informational"
74-
# NOTE: #################################
75-
76-
if agent_type == "socratic":
77-
invoke = invoke_socratic_agent
78-
elif agent_type == "informational":
79-
invoke = invoke_informational_agent
80-
else:
81-
raise Exception("Unknown Tutor Agent Type")
82-
83-
response = invoke(query=message, \
84-
conversation_history=conversation_history, \
85-
summary=summary, \
86-
conversationalStyle=conversationalStyle, \
87-
question_response_details=question_response_details_prompt, \
88-
session_id=conversation_id)
89-
90-
print(response)
91-
print("AI Response:", response['output'])
92-
93-
94-
except json.JSONDecodeError as e:
95-
print("Error decoding JSON:", e)
128+
index_count = 23 # Number of sections in the informational agent prompt
129+
for i in range(0, index_count):
130+
if i == 16:
131+
time.sleep(60)
132+
message, response, prompt, prompt_missing = testbench_agents(message, remove_index=i)
96133

134+
with open(file, "a") as f:
135+
# append another line to the file
136+
if prompt_missing != " ":
137+
f.write(message + "\t" + ' '.join(response['output'].split('\n')) + "\t" + ' '.join(prompt.split('\n')) + "\t" +prompt_missing + "\n")
138+
print("File written successfully!")
97139

98140

0 commit comments

Comments
 (0)