1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217
   | """ 聊天提示词模板 演示:如何使用ChatPromptTemplate处理多角色对话 """
  from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder from langchain_openai import ChatOpenAI from langchain.schema import HumanMessage, AIMessage, SystemMessage from dotenv import load_dotenv import os
  load_dotenv()
 
  def example1_basic_chat_template() -> None:     """示例1:基础聊天模板"""     print("=== 示例1:基础聊天模板 ===\n")
           chat_template = ChatPromptTemplate.from_messages([         ("system", "你是一个{role},你的特点是{characteristic}"),         ("human", "{user_input}")     ])
           messages = chat_template.format_messages(         role="幽默的助手",         characteristic="说话风趣,喜欢用比喻",         user_input="什么是云计算?"     )
      print("生成的消息:")     for msg in messages:         print(f"{msg.__class__.__name__}: {msg.content}")     print()
 
  def example2_chat_with_llm() -> None:     """示例2:聊天模板配合LLM"""     print("=== 示例2:聊天模板配合LLM ===\n")
      llm = ChatOpenAI(         base_url=os.getenv("ALIBABA_BASE_URL"),         api_key=os.getenv("ALIBABA_API_KEY"),         model="qwen-plus",         temperature=0.7,     )
           customer_service_template = ChatPromptTemplate.from_messages([         ("system", """ 你是一个专业的客服人员,工作在{company}公司。 你的职责是: 1. 友好地回答客户问题 2. 提供准确的信息 3. 保持礼貌和耐心 4. 回答要简洁明了 """),         ("human", "客户问题:{question}")     ])
           questions = [         "你们的退货政策是什么?",         "订单什么时候能发货?",         "如何联系人工客服?"     ]
      for question in questions:         messages = customer_service_template.format_messages(             company="科技商城",             question=question         )         response = llm.invoke(messages)         print(f"客户:{question}")         print(f"客服:{response.content}\n")
 
  def example3_multi_turn_conversation() -> None:     """示例3:多轮对话模板"""     print("=== 示例3:多轮对话 ===\n")
      llm = ChatOpenAI(         base_url=os.getenv("ALIBABA_BASE_URL"),         api_key=os.getenv("ALIBABA_API_KEY"),         model="qwen-plus",         temperature=0.7,     )
           chat_template = ChatPromptTemplate.from_messages([         ("system", "你是一个Python编程导师,擅长解释编程概念"),         MessagesPlaceholder(variable_name="chat_history"),           ("human", "{user_input}")     ])
           chat_history = []
           user_input1 = "什么是列表推导式?"     messages = chat_template.format_messages(         chat_history=chat_history,         user_input=user_input1     )     response1 = llm.invoke(messages)     print(f"学生:{user_input1}")     print(f"导师:{response1.content}\n")
           chat_history.append(HumanMessage(content=user_input1))     chat_history.append(AIMessage(content=response1.content))
           user_input2 = "能给我举个例子吗?"     messages = chat_template.format_messages(         chat_history=chat_history,         user_input=user_input2     )     response2 = llm.invoke(messages)     print(f"学生:{user_input2}")     print(f"导师:{response2.content}\n")
 
  def example4_role_based_template() -> None:     """示例4:基于角色的模板"""     print("=== 示例4:不同角色的对话 ===\n")
      llm = ChatOpenAI(         base_url=os.getenv("ALIBABA_BASE_URL"),         api_key=os.getenv("ALIBABA_API_KEY"),         model="qwen-plus",         temperature=0.9,       )
           roleplay_template = ChatPromptTemplate.from_messages([         ("system", "你现在扮演{character},请完全进入角色"),         ("human", "场景:{scenario}"),         ("human", "{action}")     ])
           scenarios = [         {             "character": "一个侦探",             "scenario": "在案发现场调查",             "action": "发现了一个可疑的脚印,你会怎么做?"         },         {             "character": "一位厨师",             "scenario": "在厨房准备晚餐",             "action": "客人突然说对海鲜过敏,你会怎么调整菜单?"         }     ]
      for scenario in scenarios:         messages = roleplay_template.format_messages(**scenario)         response = llm.invoke(messages)         print(f"角色:{scenario['character']}")         print(f"场景:{scenario['scenario']}")         print(f"情况:{scenario['action']}")         print(f"回应:{response.content}\n")
 
  def example5_template_composition() -> None:     """示例5:模板组合"""     print("=== 示例5:组合多个模板 ===\n")
           system_template = "你是一个{profession},专长是{specialty}"
           task_template = """ 任务类型:{task_type} 具体要求:{requirements} """
           full_template = ChatPromptTemplate.from_messages([         ("system", system_template),         ("human", task_template)     ])
      messages = full_template.format_messages(         profession="数据分析师",         specialty="用数据讲故事",         task_type="分析销售数据",         requirements="找出销售下降的原因,用简单的语言解释"     )
      print("组合后的消息:")     for msg in messages:         print(f"\n{msg.__class__.__name__}:")         print(msg.content)
 
  def main() -> None:     """主函数"""     example1_basic_chat_template()     print("\n" + "="*50 + "\n")
      example2_chat_with_llm()     print("\n" + "="*50 + "\n")
      example3_multi_turn_conversation()     print("\n" + "="*50 + "\n")
      example4_role_based_template()     print("\n" + "="*50 + "\n")
      example5_template_composition()
 
  if __name__ == "__main__":     main()
 
   |