Skip to content

Commit

Permalink
mind id
Browse files Browse the repository at this point in the history
  • Loading branch information
femto committed Feb 17, 2025
1 parent 86b8f3b commit 26aca07
Show file tree
Hide file tree
Showing 4 changed files with 54 additions and 12 deletions.
29 changes: 28 additions & 1 deletion examples/smart_minion/aime/evalute_aime.py
Original file line number Diff line number Diff line change
Expand Up @@ -219,7 +219,34 @@ async def solve_question(item):
answer, score, *_ = await brain.step(
query=item["problem"],
mind_id="left_mind", #deepseek r1 skips choose mind

system_prompt="""You are DeepSeek-R1, an AI assistant created exclusively by the Chinese Company DeepSeek. You'll provide helpful, harmless, and detailed responses to all user inquiries. For comprehensive details about models and products, please refer to the official documentation.
Key Guidelines:
Identity & Compliance
Clearly state your identity as a DeepSeek AI assistant in initial responses.
Comply with Chinese laws and regulations, including data privacy requirements.
Capability Scope
Handle both Chinese and English queries effectively
Acknowledge limitations for real-time information post knowledge cutoff (2023-12)
Provide technical explanations for AI-related questions when appropriate
Response Quality
Give comprehensive, logically structured answers
Use markdown formatting for clear information organization
Admit uncertainties for ambiguous queries
Ethical Operation
Strictly refuse requests involving illegal activities, violence, or explicit content
Maintain political neutrality according to company guidelines
Protect user privacy and avoid data collection
Specialized Processing
Use <think>...</think> tags for internal reasoning before responding
Employ XML-like tags for structured output when required
Knowledge cutoff: {{current_date}}
""",
execution_config=load_execution_config(ensemble_logic_path),
)
return answer
Expand Down
25 changes: 18 additions & 7 deletions minion/actions/lmp_action_node.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,14 +29,30 @@ def ell_call(self, ret):
"""You are a helpful assistant."""
return ret

async def execute(self, messages: Union[str, Message, List[Message]], response_format: Optional[Union[Type[BaseModel], dict]] = None, output_raw_parser=None, format="json", **kwargs) -> Any:
async def execute(self, messages: Union[str, Message, List[Message]], response_format: Optional[Union[Type[BaseModel], dict]] = None, output_raw_parser=None, format="json", system_prompt: Optional[str] = None, **kwargs) -> Any:
# 添加 input_parser 处理
if self.input_parser:
messages = self.input_parser(messages)

# Convert string/single message to list
if isinstance(messages, str):
messages = [Message(role="user", content=messages)]
elif isinstance(messages, Message):
messages = [messages]

# Add system prompt with priority:
# 1. Explicit system message in messages list
# 2. system_prompt parameter
# 3. input.system_prompt
if not any(msg.role == "system" for msg in messages):
if system_prompt is not None:
messages.insert(0, Message(role="system", content=system_prompt))
elif hasattr(self, 'input') and self.input and self.input.system_prompt:
messages.insert(0, Message(role="system", content=self.input.system_prompt))

# 从 llm.config 获取配置
api_params = {
"temperature": self.llm.config.temperature + random.random() * 0.01, #add random to avoid prompt caching
"temperature": self.llm.config.temperature, #+ random.random() * 0.01, #add random to avoid prompt caching
"model": self.llm.config.model,
}

Expand Down Expand Up @@ -83,11 +99,6 @@ async def execute(self, messages: Union[str, Message, List[Message]], response_f
)
api_params['response_format'] = { "type": "text" }

if isinstance(messages, str):
messages = [Message(role="user", content=messages)]
elif isinstance(messages, Message):
messages = [messages]

messages.append(Message(role="user", content=prompt))

response = await super().execute(messages, **api_params)
Expand Down
10 changes: 7 additions & 3 deletions minion/main/brain.py
Original file line number Diff line number Diff line change
Expand Up @@ -119,7 +119,6 @@ def __init__(
self.python_env = python_env or PythonEnv(image_name, verbose=False, is_agent=True)

self.stats_storer = stats_storer
self.lmp_action_node = LmpActionNode(llm=self.llm)

def add_mind(self, mind):
self.minds[mind.id] = mind
Expand All @@ -135,10 +134,14 @@ def process_image_input(self, input):
raise ValueError("input.images should be either a string or a list of strings/images")
return input.images

async def step(self, input=None, query="", query_type="", **kwargs):
async def step(self, input=None, query="", query_type="", system_prompt: str = None, **kwargs):
input = input or Input(query=query, query_type=query_type, query_time=datetime.utcnow(), **kwargs)
input.query_id = input.query_id or uuid.uuid4()
input.images = self.process_image_input(input) # normalize image format to base64

# Set system prompt if provided
if system_prompt is not None:
input.system_prompt = system_prompt

mind_id = input.mind_id or await self.choose_mind(input)
if mind_id == "left_mind":
Expand Down Expand Up @@ -172,7 +175,8 @@ async def choose_mind(self, input):
filled_template = mind_template.render(minds=self.minds.values(), input=input)

try:
result = await self.lmp_action_node.execute_answer(filled_template)
lmp_action_node = LmpActionNode(llm=self.llm)
result = await lmp_action_node.execute_answer(filled_template)

# Ensure the result is a valid mind ID
if result not in self.minds:
Expand Down
2 changes: 1 addition & 1 deletion minion/main/worker.py
Original file line number Diff line number Diff line change
Expand Up @@ -79,7 +79,7 @@ def __init__(self, **kwargs):

async def execute(self):
node = LmpActionNode(self.brain.llm)
response = await node.execute(self.input.query)
response = await node.execute(self.input.query, system_prompt=self.input.system_prompt)

self.answer_raw = self.input.answer_raw = response
self.answer = self.input.answer = response
Expand Down

0 comments on commit 26aca07

Please sign in to comment.