From 4a7cfb1cee4727099853ff41fecdb94dd712df56 Mon Sep 17 00:00:00 2001 From: charry Date: Fri, 21 Nov 2025 16:59:35 +0800 Subject: [PATCH] add system_prompt param in process_query --- .../ai_agent/agents/multimodal_large_model_agent.py | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/scripts/ai_agent/agents/multimodal_large_model_agent.py b/scripts/ai_agent/agents/multimodal_large_model_agent.py index 92b28e1..e57e97b 100644 --- a/scripts/ai_agent/agents/multimodal_large_model_agent.py +++ b/scripts/ai_agent/agents/multimodal_large_model_agent.py @@ -310,6 +310,7 @@ class MMLM_Agent: temperature: float = 0.7, top_p: float = 0.95, stop: Optional[List[str]] = None, + system_prompt = None, is_use_chat_history:bool = False, is_use_rag:bool = False, is_save_history:bool = False) : @@ -370,6 +371,14 @@ class MMLM_Agent: ) logger.debug(f"生成提示: {final_conversation_prompt[:200]}...") # 只显示前200字符 + + # # 5.是否用system_prompt + # is_use_system_prompt = False + # if is_use_system_prompt: + # system_prompt = "" + # else: + # system_prompt = None + ## 6.调用VLM生成回答 # output = self.llm( # prompt=conversation_prompt, @@ -385,6 +394,7 @@ class MMLM_Agent: max_tokens=max_tokens, temperature=temperature, top_p=top_p, + system_prompt=system_prompt, stop = stop) output = self.model_mag.models_interface.multimodal_inference(request=multi_modal_request) else: @@ -392,6 +402,7 @@ class MMLM_Agent: max_tokens=max_tokens, temperature=temperature, top_p=top_p, + system_prompt=system_prompt, stop = stop) output=self.model_mag.models_interface.text_inference(request=text_request)