@@ -29,7 +29,7 @@ def react( llm_app,
29
29
model_name = react_configuration .model_name ,
30
30
)
31
31
32
- llm_response = llm_app (user_query , reprompted_flag = False )
32
+ llm_response = llm_app (user_query , user_instructions , reprompted_flag = False )
33
33
34
34
## Decorating the context_extractor function with AIMon's "detect"
35
35
context_extractor = detect (context_extractor )
@@ -38,25 +38,33 @@ def react( llm_app,
38
38
39
39
for _ in range (react_configuration .max_attempts ):
40
40
41
- if aimon_response .detect_response .hallucination ['score' ] > react_configuration .hallucination_threshold :
42
- llm_response = llm_app (user_query , reprompted_flag = True )
41
+ failed_instructions = []
42
+ ## Loop to check for failed instructions
43
+ for x in aimon_response .detect_response .instruction_adherence ['results' ]:
44
+ if x ['adherence' ] == False :
45
+ failed_instructions .append (x ['instruction' ])
46
+
47
+ ## Check whether the hallucination score is greater than the required threshold OR if any of the supplied instructions are not complied with
48
+ if react_configuration .hallucination_threshold > 0 and \
49
+ (aimon_response .detect_response .hallucination ['score' ] > react_configuration .hallucination_threshold or len (failed_instructions )> 0 ):
50
+
51
+ llm_response = llm_app (user_query , user_instructions , reprompted_flag = True )
52
+
43
53
_ , _ , _ , query_result , aimon_response = context_extractor (user_query , user_instructions , llm_response )
44
54
45
55
return query_result
46
56
47
57
48
- ## To do:
49
- ## Add instruction adherence logic in the next iteration
50
58
51
59
52
60
## llm_app is a function that has both conservative and creative LLMs to its access
53
61
## returns the LLM's response to the user's query
54
62
55
63
## Template for llm_app function
56
- # def llm_app(user_query, reprompted_flag=False):
57
- # creative_llm: function
58
- # conservative_llm: function
64
+ # def llm_app(user_query, user_instructions, reprompted_flag=False):
65
+ # from aimon_llamaindex import get_response
59
66
# if reprompted_flag==False:
60
- # return creative_llm.query (user_query)
67
+ # return get_response (user_query, retriever, llm_creative )
61
68
# else:
62
- # return conservative_llm.query(user_query)
69
+ # llm_conservative.system_prompt += user_instructions
70
+ # return get_response(user_query, retriever, llm_conservative)
0 commit comments