|
1 |
| -""" |
2 |
| - STEP 1: Read the USER INFO from the WEB client from a file |
3 |
| -""" |
4 |
| - |
5 | 1 | import json
|
| 2 | +import time |
| 3 | +import os |
6 | 4 | try:
|
7 | 5 | from .parse_json_to_prompt import parse_json_to_prompt
|
8 | 6 | from ..base_agent.base_agent import invoke_base_agent
|
9 |
| - from ..informational_agent.informational_agent import invoke_informational_agent |
| 7 | + from ..informational_agent.informational_agent import InformationalAgent, invoke_informational_agent |
10 | 8 | from ..socratic_agent.socratic_agent import invoke_socratic_agent
|
| 9 | + from ..informational_agent.informational_prompts import \ |
| 10 | + informational_role_prompt, conv_pref_prompt, update_conv_pref_prompt, summary_prompt, update_summary_prompt |
11 | 11 | except ImportError:
|
12 | 12 | from src.agents.utils.parse_json_to_prompt import parse_json_to_prompt
|
13 | 13 | from src.agents.base_agent.base_agent import invoke_base_agent
|
14 |
| - from src.agents.informational_agent.informational_agent import invoke_informational_agent |
| 14 | + from src.agents.informational_agent.informational_agent import InformationalAgent, invoke_informational_agent |
15 | 15 | from src.agents.socratic_agent.socratic_agent import invoke_socratic_agent
|
| 16 | + from src.agents.informational_agent.informational_prompts import \ |
| 17 | + informational_role_prompt, conv_pref_prompt, update_conv_pref_prompt, summary_prompt, update_summary_prompt |
16 | 18 |
|
17 | 19 | # File path for the input text
|
18 |
| -path = "src/agents/utils/example_inputs/" |
19 |
| -input_file = path + "example_input_4.json" |
| 20 | +path = "src/agents/utils/" |
| 21 | +input_file = path + "example_inputs/" + "example_input_4.json" |
20 | 22 |
|
21 |
| -# Step 1: Read the input file |
| 23 | +""" |
| 24 | + STEP 1: Read the USER INFO from the WEB client from a file |
| 25 | +""" |
22 | 26 | with open(input_file, "r") as file:
|
23 | 27 | raw_text = file.read()
|
24 |
| - |
25 |
| -# Step 5: Parse into JSON |
26 |
| -try: |
27 |
| - parsed_json = json.loads(raw_text) |
28 | 28 |
|
29 |
| - """ |
30 |
| - STEP 2: Extract the parameters from the JSON |
31 |
| - """ |
| 29 | +def testbench_agents(message, remove_index, agent_type = "informational", informational_role_prompt = informational_role_prompt): |
| 30 | + try: |
| 31 | + """ |
| 32 | + STEP 2: Parse the question information from the JSON file |
| 33 | + """ |
| 34 | + parsed_json = json.loads(raw_text) |
| 35 | + parsed_json["message"] = message |
| 36 | + parsed_json["params"]["conversation_history"][-1]["content"] = message |
| 37 | + |
| 38 | + params = parsed_json["params"] |
| 39 | + |
| 40 | + if "include_test_data" in params: |
| 41 | + include_test_data = params["include_test_data"] |
| 42 | + if "conversation_history" in params: |
| 43 | + conversation_history = params["conversation_history"] |
| 44 | + if "summary" in params: |
| 45 | + summary = params["summary"] |
| 46 | + if "conversational_style" in params: |
| 47 | + conversationalStyle = params["conversational_style"] |
| 48 | + if "question_response_details" in params: |
| 49 | + question_response_details = params["question_response_details"] |
| 50 | + question_submission_summary = question_response_details["questionSubmissionSummary"] if "questionSubmissionSummary" in question_response_details else [] |
| 51 | + question_information = question_response_details["questionInformation"] if "questionInformation" in question_response_details else {} |
| 52 | + question_access_information = question_response_details["questionAccessInformation"] if "questionAccessInformation" in question_response_details else {} |
| 53 | + question_response_details_prompt = parse_json_to_prompt( |
| 54 | + question_submission_summary, |
| 55 | + question_information, |
| 56 | + question_access_information |
| 57 | + ) |
| 58 | + # print("Question Response Details Prompt:", question_response_details_prompt, "\n\n") |
| 59 | + |
| 60 | + if "agent_type" in params: |
| 61 | + agent_type = params["agent_type"] |
| 62 | + if "conversation_id" in params: |
| 63 | + conversation_id = params["conversation_id"] |
| 64 | + else: |
| 65 | + raise Exception("Internal Error: The conversation id is required in the parameters of the chat module.") |
| 66 | + |
| 67 | + """ |
| 68 | + STEP 3: Call the LLM agent to get a response to the user's message |
| 69 | + """ |
| 70 | + if agent_type == "socratic": |
| 71 | + invoke = invoke_socratic_agent |
| 72 | + elif agent_type == "informational": |
| 73 | + """ |
| 74 | + STEP 4: Update the prompt to verify their performance |
| 75 | + """ |
| 76 | + role_prompt_components = informational_role_prompt.split("\n\n") |
| 77 | + main_prompt = role_prompt_components[0] |
| 78 | + teaching_methods = role_prompt_components[1].split("## Teaching Methods:\n")[1].split("\n") |
| 79 | + key_qualities = role_prompt_components[2].split("## Key Qualities:\n")[1].split("\n") |
| 80 | + example_style = role_prompt_components[3].split("Example Conversation Style:\n")[1].split("\n") |
| 81 | + flexibility_prompt = [item + '.' for item in role_prompt_components[4].split("## Flexibility:\n")[1].split(".") if item] |
| 82 | + governance_prompt = [item + '.' for item in role_prompt_components[-1].split("## Governance:\n")[1].split(".") if item] |
| 83 | + prompts = [main_prompt] + teaching_methods + key_qualities + example_style + flexibility_prompt + governance_prompt |
| 84 | + |
| 85 | + # Remove one of the prompts to test the agent's performance |
| 86 | + prompt_missing = prompts[remove_index] |
| 87 | + print("Number of prompts:", len(prompts), ", current index:", remove_index, ", prompt removed:", prompt_missing) |
| 88 | + prompts.remove(prompt_missing) |
| 89 | + |
| 90 | + updated_prompt = "\n\n".join(prompts) |
| 91 | + |
| 92 | + agent = InformationalAgent(informational_role_prompt=updated_prompt, \ |
| 93 | + conv_pref_prompt=conv_pref_prompt, \ |
| 94 | + update_conv_pref_prompt=update_conv_pref_prompt, \ |
| 95 | + summary_prompt=summary_prompt, \ |
| 96 | + update_summary_prompt=update_summary_prompt) |
| 97 | + invoke = invoke_informational_agent |
| 98 | + else: |
| 99 | + raise Exception("Unknown Tutor Agent Type") |
| 100 | + |
| 101 | + response = invoke(query=message, \ |
| 102 | + conversation_history=conversation_history, \ |
| 103 | + summary=summary, \ |
| 104 | + conversationalStyle=conversationalStyle, \ |
| 105 | + question_response_details=question_response_details_prompt, \ |
| 106 | + session_id=conversation_id, |
| 107 | + agent=agent) |
| 108 | + |
| 109 | + print(response) |
| 110 | + print("AI Response:", response['output']) |
| 111 | + return message, response, updated_prompt, prompt_missing |
| 112 | + |
| 113 | + except json.JSONDecodeError as e: |
| 114 | + print("Error decoding JSON:", e) |
| 115 | + |
| 116 | + |
| 117 | +if __name__ == "__main__": |
| 118 | + file = path + "synthetic_conversations/" + "prompts_importance.tsv" |
| 119 | + # create the file if it doesnt exist |
| 120 | + if not os.path.exists(file): |
| 121 | + with open(file, "w") as f: |
| 122 | + f.write("message\t response\t prompt\t prompt_missing\n") |
| 123 | + |
32 | 124 | # NOTE: #### This is the testing message!! #####
|
33 |
| - message = "What do you know about me?" |
| 125 | + message = "How do you tackle the worked solution for part c?" |
34 | 126 | # NOTE: ########################################
|
35 | 127 |
|
36 |
| - # replace "mock" in the message and conversation history with the actual message |
37 |
| - parsed_json["message"] = message |
38 |
| - parsed_json["params"]["conversation_history"][-1]["content"] = message |
39 |
| - |
40 |
| - params = parsed_json["params"] |
41 |
| - |
42 |
| - if "include_test_data" in params: |
43 |
| - include_test_data = params["include_test_data"] |
44 |
| - if "conversation_history" in params: |
45 |
| - conversation_history = params["conversation_history"] |
46 |
| - if "summary" in params: |
47 |
| - summary = params["summary"] |
48 |
| - if "conversational_style" in params: |
49 |
| - conversationalStyle = params["conversational_style"] |
50 |
| - if "question_response_details" in params: |
51 |
| - question_response_details = params["question_response_details"] |
52 |
| - question_submission_summary = question_response_details["questionSubmissionSummary"] if "questionSubmissionSummary" in question_response_details else [] |
53 |
| - question_information = question_response_details["questionInformation"] if "questionInformation" in question_response_details else {} |
54 |
| - question_access_information = question_response_details["questionAccessInformation"] if "questionAccessInformation" in question_response_details else {} |
55 |
| - question_response_details_prompt = parse_json_to_prompt( |
56 |
| - question_submission_summary, |
57 |
| - question_information, |
58 |
| - question_access_information |
59 |
| - ) |
60 |
| - # print("Question Response Details Prompt:", question_response_details_prompt, "\n\n") |
61 |
| - |
62 |
| - if "agent_type" in params: |
63 |
| - agent_type = params["agent_type"] |
64 |
| - if "conversation_id" in params: |
65 |
| - conversation_id = params["conversation_id"] |
66 |
| - else: |
67 |
| - raise Exception("Internal Error: The conversation id is required in the parameters of the chat module.") |
68 |
| - |
69 |
| - """ |
70 |
| - STEP 3: Call the LLM agent to get a response to the user's message |
71 |
| - """ |
72 |
| - # NOTE: ### SET the agent type to use ### |
73 |
| - agent_type = "informational" |
74 |
| - # NOTE: ################################# |
75 |
| - |
76 |
| - if agent_type == "socratic": |
77 |
| - invoke = invoke_socratic_agent |
78 |
| - elif agent_type == "informational": |
79 |
| - invoke = invoke_informational_agent |
80 |
| - else: |
81 |
| - raise Exception("Unknown Tutor Agent Type") |
82 |
| - |
83 |
| - response = invoke(query=message, \ |
84 |
| - conversation_history=conversation_history, \ |
85 |
| - summary=summary, \ |
86 |
| - conversationalStyle=conversationalStyle, \ |
87 |
| - question_response_details=question_response_details_prompt, \ |
88 |
| - session_id=conversation_id) |
89 |
| - |
90 |
| - print(response) |
91 |
| - print("AI Response:", response['output']) |
92 |
| - |
93 |
| - |
94 |
| -except json.JSONDecodeError as e: |
95 |
| - print("Error decoding JSON:", e) |
| 128 | + index_count = 23 # Number of sections in the informational agent prompt |
| 129 | + for i in range(0, index_count): |
| 130 | + if i == 16: |
| 131 | + time.sleep(60) |
| 132 | + message, response, prompt, prompt_missing = testbench_agents(message, remove_index=i) |
96 | 133 |
|
| 134 | + with open(file, "a") as f: |
| 135 | + # append another line to the file |
| 136 | + if prompt_missing != " ": |
| 137 | + f.write(message + "\t" + ' '.join(response['output'].split('\n')) + "\t" + ' '.join(prompt.split('\n')) + "\t" +prompt_missing + "\n") |
| 138 | + print("File written successfully!") |
97 | 139 |
|
98 | 140 |
|
0 commit comments