diff --git a/examples/smart_minion/aime/evalute_aime.py b/examples/smart_minion/aime/evalute_aime.py index 4cff8a09..98fd26b2 100644 --- a/examples/smart_minion/aime/evalute_aime.py +++ b/examples/smart_minion/aime/evalute_aime.py @@ -219,7 +219,34 @@ async def solve_question(item): answer, score, *_ = await brain.step( query=item["problem"], mind_id="left_mind", #deepseek r1 skips choose mind - + system_prompt="""You are DeepSeek-R1, an AI assistant created exclusively by the Chinese Company DeepSeek. You'll provide helpful, harmless, and detailed responses to all user inquiries. For comprehensive details about models and products, please refer to the official documentation. + +Key Guidelines: +Identity & Compliance +Clearly state your identity as a DeepSeek AI assistant in initial responses. +Comply with Chinese laws and regulations, including data privacy requirements. + +Capability Scope +Handle both Chinese and English queries effectively +Acknowledge limitations for real-time information post knowledge cutoff (2023-12) +Provide technical explanations for AI-related questions when appropriate + +Response Quality +Give comprehensive, logically structured answers +Use markdown formatting for clear information organization +Admit uncertainties for ambiguous queries + +Ethical Operation +Strictly refuse requests involving illegal activities, violence, or explicit content +Maintain political neutrality according to company guidelines +Protect user privacy and avoid data collection + +Specialized Processing +Use ... tags for internal reasoning before responding +Employ XML-like tags for structured output when required + +Knowledge cutoff: {{current_date}} +""", execution_config=load_execution_config(ensemble_logic_path), ) return answer diff --git a/minion/actions/lmp_action_node.py b/minion/actions/lmp_action_node.py index e91ba547..c4c78f6b 100644 --- a/minion/actions/lmp_action_node.py +++ b/minion/actions/lmp_action_node.py @@ -29,14 +29,30 @@ def ell_call(self, ret): """You are a helpful assistant.""" return ret - async def execute(self, messages: Union[str, Message, List[Message]], response_format: Optional[Union[Type[BaseModel], dict]] = None, output_raw_parser=None, format="json", **kwargs) -> Any: + async def execute(self, messages: Union[str, Message, List[Message]], response_format: Optional[Union[Type[BaseModel], dict]] = None, output_raw_parser=None, format="json", system_prompt: Optional[str] = None, **kwargs) -> Any: # 添加 input_parser 处理 if self.input_parser: messages = self.input_parser(messages) + # Convert string/single message to list + if isinstance(messages, str): + messages = [Message(role="user", content=messages)] + elif isinstance(messages, Message): + messages = [messages] + + # Add system prompt with priority: + # 1. Explicit system message in messages list + # 2. system_prompt parameter + # 3. input.system_prompt + if not any(msg.role == "system" for msg in messages): + if system_prompt is not None: + messages.insert(0, Message(role="system", content=system_prompt)) + elif hasattr(self, 'input') and self.input and self.input.system_prompt: + messages.insert(0, Message(role="system", content=self.input.system_prompt)) + # 从 llm.config 获取配置 api_params = { - "temperature": self.llm.config.temperature + random.random() * 0.01, #add random to avoid prompt caching + "temperature": self.llm.config.temperature, #+ random.random() * 0.01, #add random to avoid prompt caching "model": self.llm.config.model, } @@ -83,11 +99,6 @@ async def execute(self, messages: Union[str, Message, List[Message]], response_f ) api_params['response_format'] = { "type": "text" } - if isinstance(messages, str): - messages = [Message(role="user", content=messages)] - elif isinstance(messages, Message): - messages = [messages] - messages.append(Message(role="user", content=prompt)) response = await super().execute(messages, **api_params) diff --git a/minion/main/brain.py b/minion/main/brain.py index 2c130e5a..c20e981f 100644 --- a/minion/main/brain.py +++ b/minion/main/brain.py @@ -119,7 +119,6 @@ def __init__( self.python_env = python_env or PythonEnv(image_name, verbose=False, is_agent=True) self.stats_storer = stats_storer - self.lmp_action_node = LmpActionNode(llm=self.llm) def add_mind(self, mind): self.minds[mind.id] = mind @@ -135,10 +134,14 @@ def process_image_input(self, input): raise ValueError("input.images should be either a string or a list of strings/images") return input.images - async def step(self, input=None, query="", query_type="", **kwargs): + async def step(self, input=None, query="", query_type="", system_prompt: str = None, **kwargs): input = input or Input(query=query, query_type=query_type, query_time=datetime.utcnow(), **kwargs) input.query_id = input.query_id or uuid.uuid4() input.images = self.process_image_input(input) # normalize image format to base64 + + # Set system prompt if provided + if system_prompt is not None: + input.system_prompt = system_prompt mind_id = input.mind_id or await self.choose_mind(input) if mind_id == "left_mind": @@ -172,7 +175,8 @@ async def choose_mind(self, input): filled_template = mind_template.render(minds=self.minds.values(), input=input) try: - result = await self.lmp_action_node.execute_answer(filled_template) + lmp_action_node = LmpActionNode(llm=self.llm) + result = await lmp_action_node.execute_answer(filled_template) # Ensure the result is a valid mind ID if result not in self.minds: diff --git a/minion/main/worker.py b/minion/main/worker.py index 690258cf..72942c03 100644 --- a/minion/main/worker.py +++ b/minion/main/worker.py @@ -79,7 +79,7 @@ def __init__(self, **kwargs): async def execute(self): node = LmpActionNode(self.brain.llm) - response = await node.execute(self.input.query) + response = await node.execute(self.input.query, system_prompt=self.input.system_prompt) self.answer_raw = self.input.answer_raw = response self.answer = self.input.answer = response