Skip to content

Commit ff0ab6f

Browse files
detect response
1 parent f2049d0 commit ff0ab6f

File tree

1 file changed

+24
-22
lines changed

1 file changed

+24
-22
lines changed

aimon/extensions/react.py

Lines changed: 24 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -60,35 +60,37 @@ def react(self, user_query, user_instructions,):
6060

6161
detect_response = self.client.inference.detect(body=[aimon_payload])
6262

63-
for _ in range(self.react_configuration.max_attempts):
63+
return detect_response
6464

65-
failed_instructions = []
66-
## Loop to check for failed instructions
67-
for x in detect_response.instruction_adherence['results']:
68-
if x['adherence'] == False:
69-
failed_instructions.append(x['instruction'])
65+
# for _ in range(self.react_configuration.max_attempts):
7066

71-
hallucination_score = detect_response.hallucination['score']
67+
# failed_instructions = []
68+
# ## Loop to check for failed instructions
69+
# for x in detect_response.instruction_adherence['results']:
70+
# if x['adherence'] == False:
71+
# failed_instructions.append(x['instruction'])
7272

73-
## Check whether the hallucination score is greater than the required threshold OR if any of the supplied instructions are not complied with
74-
if self.react_configuration.hallucination_threshold > 0 and \
75-
(hallucination_score > self.react_configuration.hallucination_threshold or len(failed_instructions)>0):
73+
# hallucination_score = detect_response.hallucination['score']
74+
75+
# ## Check whether the hallucination score is greater than the required threshold OR if any of the supplied instructions are not complied with
76+
# if self.react_configuration.hallucination_threshold > 0 and \
77+
# (hallucination_score > self.react_configuration.hallucination_threshold or len(failed_instructions)>0):
7678

77-
llm_response = self.llm_app(user_query, user_instructions, reprompted_flag=True, hallucination_score=hallucination_score)
79+
# llm_response = self.llm_app(user_query, user_instructions, reprompted_flag=True, hallucination_score=hallucination_score)
7880

79-
context = self.context_extractor(user_query, user_instructions, llm_response)
81+
# context = self.context_extractor(user_query, user_instructions, llm_response)
8082

81-
## Generated text for LLM Response, if the user employs the LlamaIndex framework
82-
if llm_response.response or self.react_configuration.framework=="llamaindex":
83-
generated_text = llm_response.response
84-
else:
85-
generated_text = llm_response
83+
# ## Generated text for LLM Response, if the user employs the LlamaIndex framework
84+
# if llm_response.response or self.react_configuration.framework=="llamaindex":
85+
# generated_text = llm_response.response
86+
# else:
87+
# generated_text = llm_response
8688

87-
new_aimon_payload = self.create_payload(context, user_query, user_instructions, generated_text)
89+
# new_aimon_payload = self.create_payload(context, user_query, user_instructions, generated_text)
8890

89-
detect_response = self.client.inference.detect(body=[new_aimon_payload])
91+
# detect_response = self.client.inference.detect(body=[new_aimon_payload])
9092

91-
if hallucination_score > self.react_configuration.hallucination_threshold:
92-
return f"The generated LLM response, even after {self.react_configuration.max_attempts} attempts of ReAct is still hallucinated. The response: {generated_text}"
93+
# if hallucination_score > self.react_configuration.hallucination_threshold:
94+
# return f"The generated LLM response, even after {self.react_configuration.max_attempts} attempts of ReAct is still hallucinated. The response: {generated_text}"
9395

94-
return generated_text
96+
# return generated_text

0 commit comments

Comments
 (0)