@@ -60,35 +60,37 @@ def react(self, user_query, user_instructions,):
60
60
61
61
detect_response = self .client .inference .detect (body = [aimon_payload ])
62
62
63
- for _ in range ( self . react_configuration . max_attempts ):
63
+ return detect_response
64
64
65
- failed_instructions = []
66
- ## Loop to check for failed instructions
67
- for x in detect_response .instruction_adherence ['results' ]:
68
- if x ['adherence' ] == False :
69
- failed_instructions .append (x ['instruction' ])
65
+ # for _ in range(self.react_configuration.max_attempts):
70
66
71
- hallucination_score = detect_response .hallucination ['score' ]
67
+ # failed_instructions = []
68
+ # ## Loop to check for failed instructions
69
+ # for x in detect_response.instruction_adherence['results']:
70
+ # if x['adherence'] == False:
71
+ # failed_instructions.append(x['instruction'])
72
72
73
- ## Check whether the hallucination score is greater than the required threshold OR if any of the supplied instructions are not complied with
74
- if self .react_configuration .hallucination_threshold > 0 and \
75
- (hallucination_score > self .react_configuration .hallucination_threshold or len (failed_instructions )> 0 ):
73
+ # hallucination_score = detect_response.hallucination['score']
74
+
75
+ # ## Check whether the hallucination score is greater than the required threshold OR if any of the supplied instructions are not complied with
76
+ # if self.react_configuration.hallucination_threshold > 0 and \
77
+ # (hallucination_score > self.react_configuration.hallucination_threshold or len(failed_instructions)>0):
76
78
77
- llm_response = self .llm_app (user_query , user_instructions , reprompted_flag = True , hallucination_score = hallucination_score )
79
+ # llm_response = self.llm_app(user_query, user_instructions, reprompted_flag=True, hallucination_score=hallucination_score)
78
80
79
- context = self .context_extractor (user_query , user_instructions , llm_response )
81
+ # context = self.context_extractor(user_query, user_instructions, llm_response)
80
82
81
- ## Generated text for LLM Response, if the user employs the LlamaIndex framework
82
- if llm_response .response or self .react_configuration .framework == "llamaindex" :
83
- generated_text = llm_response .response
84
- else :
85
- generated_text = llm_response
83
+ # ## Generated text for LLM Response, if the user employs the LlamaIndex framework
84
+ # if llm_response.response or self.react_configuration.framework=="llamaindex":
85
+ # generated_text = llm_response.response
86
+ # else:
87
+ # generated_text = llm_response
86
88
87
- new_aimon_payload = self .create_payload (context , user_query , user_instructions , generated_text )
89
+ # new_aimon_payload = self.create_payload(context, user_query, user_instructions, generated_text)
88
90
89
- detect_response = self .client .inference .detect (body = [new_aimon_payload ])
91
+ # detect_response = self.client.inference.detect(body=[new_aimon_payload])
90
92
91
- if hallucination_score > self .react_configuration .hallucination_threshold :
92
- return f"The generated LLM response, even after { self .react_configuration .max_attempts } attempts of ReAct is still hallucinated. The response: { generated_text } "
93
+ # if hallucination_score > self.react_configuration.hallucination_threshold:
94
+ # return f"The generated LLM response, even after {self.react_configuration.max_attempts} attempts of ReAct is still hallucinated. The response: {generated_text}"
93
95
94
- return generated_text
96
+ # return generated_text
0 commit comments