Skip to content

Commit 4c81531

Browse files
authored
Prompt minor changes (#20)
* update: using chat function workflows * fix: add config.json boilerplate * fix: test chat function name * revert to non workflows until backend ready * minor updates readme * debugging input * fix: prompt parsing * introduce deepseek api option * update conversationalStyle for agent * minor prompt updates * minor debugging * fix: system prompt location summary * fix system message location
1 parent 05adb72 commit 4c81531

File tree

8 files changed

+113
-63
lines changed

8 files changed

+113
-63
lines changed

index.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,7 @@ def handler(event, context):
99
Lambda handler function
1010
"""
1111
# Log the input event for debugging purposes
12-
# print("Received event:", json.dumps(event, indent=2))
12+
print("Received event:", json.dumps(event, indent=2))
1313

1414
if "message" not in event:
1515
return {

src/agents/base_agent/base_agent.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -64,15 +64,15 @@ def call_model(self, state: State, config: RunnableConfig) -> str:
6464
# Adding external student progress and question context details from data queries
6565
question_response_details = config["configurable"].get("question_response_details", "")
6666
if question_response_details:
67-
system_message += f"## Known Question Materials: {question_response_details} \n\n"
67+
system_message += f"\n\n ## Known Question Materials: {question_response_details} \n\n"
6868

6969
# Adding summary and conversational style to the system message
7070
summary = state.get("summary", "")
7171
conversationalStyle = state.get("conversationalStyle", "")
7272
if summary:
7373
system_message += summary_system_prompt.format(summary=summary)
7474
if conversationalStyle:
75-
system_message += f"## Known conversational style and preferences of the student for this conversation: {conversationalStyle}. \n\nYour answer must be in line with this conversational style."
75+
system_message += f"\n\n ## Known conversational style and preferences of the student for this conversation: {conversationalStyle}. \n\nYour answer must be in line with this conversational style."
7676

7777
messages = [SystemMessage(content=system_message)] + state['messages']
7878

@@ -120,12 +120,12 @@ def summarize_conversation(self, state: State, config: RunnableConfig) -> dict:
120120
conversationalStyle_message = self.conversation_preference_prompt
121121

122122
# STEP 1: Summarize the conversation
123-
messages = state["messages"][:-1] + [SystemMessage(content=summary_message)]
123+
messages = [SystemMessage(content=summary_message)] + state["messages"][:-1]
124124
valid_messages = self.check_for_valid_messages(messages)
125125
summary_response = self.summarisation_llm.invoke(valid_messages)
126126

127127
# STEP 2: Analyze the conversational style
128-
messages = state["messages"][:-1] + [SystemMessage(content=conversationalStyle_message)]
128+
messages = [SystemMessage(content=conversationalStyle_message)] + state["messages"][:-1]
129129
valid_messages = self.check_for_valid_messages(messages)
130130
conversationalStyle_response = self.summarisation_llm.invoke(valid_messages)
131131

@@ -184,7 +184,7 @@ def invoke_base_agent(query: str, conversation_history: list, summary: str, conv
184184
Call an agent that has no conversation memory and expects to receive all past messages in the params and the latest human request in the query.
185185
If conversation history longer than X, the agent will summarize the conversation and will provide a conversational style analysis.
186186
"""
187-
print(f'in invoke_base_agent(), query = {query}, thread_id = {session_id}')
187+
print(f'in invoke_base_agent(), thread_id = {session_id}')
188188

189189
config = {"configurable": {"thread_id": session_id, "summary": summary, "conversational_style": conversationalStyle, "question_response_details": question_response_details}}
190190
response_events = agent.app.invoke({"messages": conversation_history, "summary": summary, "conversational_style": conversationalStyle}, config=config, stream_mode="values") #updates

src/agents/google_learnLM_agent/google_learnLM_agent.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -68,15 +68,15 @@ def call_model(self, state: State, config: RunnableConfig) -> str:
6868
# Adding external student progress and question context details from data queries
6969
question_response_details = config["configurable"].get("question_response_details", "")
7070
if question_response_details:
71-
system_message += f"## Known Question Materials: {question_response_details} \n\n"
71+
system_message += f"\n\n ## Known Question Materials: {question_response_details} \n\n"
7272

7373
# Adding summary and conversational style to the system message
7474
summary = state.get("summary", "")
7575
conversationalStyle = state.get("conversationalStyle", "")
7676
if summary:
7777
system_message += summary_system_prompt.format(summary=summary)
7878
if conversationalStyle:
79-
system_message += f"## Known conversational style and preferences of the student for this conversation: {conversationalStyle}. \n\nYour answer must be in line with this conversational style."
79+
system_message += f"\n\n ## Known conversational style and preferences of the student for this conversation: {conversationalStyle}. \n\nYour answer must be in line with this conversational style."
8080

8181
messages = [SystemMessage(content=system_message)] + state['messages']
8282

@@ -124,12 +124,12 @@ def summarize_conversation(self, state: State, config: RunnableConfig) -> dict:
124124
conversationalStyle_message = self.conversation_preference_prompt
125125

126126
# STEP 1: Summarize the conversation
127-
messages = state["messages"][:-1] + [SystemMessage(content=summary_message)]
127+
messages = [SystemMessage(content=summary_message)] + state["messages"][:-1]
128128
valid_messages = self.check_for_valid_messages(messages)
129129
summary_response = self.summarisation_llm.invoke(valid_messages)
130130

131131
# STEP 2: Analyze the conversational style
132-
messages = state["messages"][:-1] + [SystemMessage(content=conversationalStyle_message)]
132+
messages = [SystemMessage(content=conversationalStyle_message)] + state["messages"][:-1]
133133
valid_messages = self.check_for_valid_messages(messages)
134134
conversationalStyle_response = self.summarisation_llm.invoke(valid_messages)
135135

src/agents/informational_agent/informational_agent.py

Lines changed: 18 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -66,15 +66,15 @@ def call_model(self, state: State, config: RunnableConfig) -> str:
6666
# Adding external student progress and question context details from data queries
6767
question_response_details = config["configurable"].get("question_response_details", "")
6868
if question_response_details:
69-
system_message += f"## Known Learning Materials: {question_response_details} \n\n"
69+
system_message += f"\n\n ## Known Learning Materials: {question_response_details} \n\n"
7070

7171
# Adding summary and conversational style to the system message
7272
summary = state.get("summary", "")
7373
conversationalStyle = state.get("conversationalStyle", "")
7474
if summary:
7575
system_message += summary_system_prompt.format(summary=summary)
7676
# if conversationalStyle:
77-
# system_message += f"## Known conversational style and preferences of the student for this conversation: {conversationalStyle}. \n\nYour answer must be in line with this conversational style."
77+
# system_message += f"\n\n ## Known conversational style and preferences of the student for this conversation: {conversationalStyle}. \n\nYour answer must be in line with this conversational style."
7878

7979
messages = [SystemMessage(content=system_message)] + state['messages']
8080

@@ -101,7 +101,7 @@ def summarize_conversation(self, state: State, config: RunnableConfig) -> dict:
101101

102102
summary = state.get("summary", "")
103103
previous_summary = config["configurable"].get("summary", "")
104-
# previous_conversationalStyle = config["configurable"].get("conversational_style", "")
104+
previous_conversationalStyle = config["configurable"].get("conversational_style", "")
105105
if previous_summary:
106106
summary = previous_summary
107107

@@ -113,29 +113,29 @@ def summarize_conversation(self, state: State, config: RunnableConfig) -> dict:
113113
else:
114114
summary_message = self.summary_prompt
115115

116-
# if previous_conversationalStyle:
117-
# conversationalStyle_message = (
118-
# f"This is the previous conversational style of the student for this conversation: {previous_conversationalStyle}\n\n" +
119-
# self.update_conversation_preference_prompt
120-
# )
121-
# else:
122-
# conversationalStyle_message = self.conversation_preference_prompt
116+
if previous_conversationalStyle:
117+
conversationalStyle_message = (
118+
f"This is the previous conversational style of the student for this conversation: {previous_conversationalStyle}\n\n" +
119+
self.update_conversation_preference_prompt
120+
)
121+
else:
122+
conversationalStyle_message = self.conversation_preference_prompt
123123

124124
# STEP 1: Summarize the conversation
125-
messages = state["messages"][:-1] + [SystemMessage(content=summary_message)]
125+
messages = [SystemMessage(content=summary_message)] + state["messages"][:-1]
126126
valid_messages = self.check_for_valid_messages(messages)
127127
summary_response = self.summarisation_llm.invoke(valid_messages)
128128

129129
# STEP 2: Analyze the conversational style
130-
# messages = state["messages"][:-1] + [SystemMessage(content=conversationalStyle_message)]
131-
# valid_messages = self.check_for_valid_messages(messages)
132-
# conversationalStyle_response = self.summarisation_llm.invoke(valid_messages)
130+
messages = [SystemMessage(content=conversationalStyle_message)] + state["messages"][:-1]
131+
valid_messages = self.check_for_valid_messages(messages)
132+
conversationalStyle_response = self.summarisation_llm.invoke(valid_messages)
133133

134134
# Delete messages that are no longer wanted, except the last ones
135-
delete_messages: list[AllMessageTypes] = [RemoveMessage(id=m.id) for m in state["messages"][:-5]]
135+
delete_messages: list[AllMessageTypes] = [RemoveMessage(id=m.id) for m in state["messages"][:-3]]
136136

137-
# return {"summary": summary_response.content, "conversationalStyle": conversationalStyle_response.content, "messages": delete_messages}
138-
return {"summary": summary_response.content, "messages": delete_messages}
137+
return {"summary": summary_response.content, "conversationalStyle": conversationalStyle_response.content, "messages": delete_messages}
138+
# return {"summary": summary_response.content, "messages": delete_messages}
139139

140140
def should_summarize(self, state: State) -> str:
141141
"""
@@ -183,7 +183,7 @@ def pretty_response_value(self, event: dict) -> str:
183183

184184
agent = InformationalAgent()
185185
def invoke_informational_agent(query: str, conversation_history: list, summary: str, conversationalStyle: str, question_response_details: str, session_id: str) -> InvokeAgentResponseType:
186-
print(f'in invoke_informational_agent(), query = {query}, thread_id = {session_id}')
186+
print(f'in invoke_informational_agent(), thread_id = {session_id}')
187187

188188
config = {"configurable": {"thread_id": session_id, "summary": summary, "conversational_style": conversationalStyle, "question_response_details": question_response_details}}
189189
response_events = agent.app.invoke({"messages": conversation_history, "summary": summary, "conversational_style": conversationalStyle}, config=config, stream_mode="values") #updates

0 commit comments

Comments
 (0)