1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305
| class CustomerServiceWorkflow: """客服智能助手工作流""" def __init__(self, llm_client, knowledge_base): self.llm_client = llm_client self.knowledge_base = knowledge_base self.graph = self.build_customer_service_graph()
def build_customer_service_graph(self) -> StateGraph: """构建客服工作流图""" graph = StateGraph(GraphState)
graph.add_node("query_classifier", QueryClassificationNode(self.llm_client)) graph.add_node("information_retrieval", InformationRetrievalNode(self.knowledge_base)) graph.add_node("solution_generator", SolutionGenerationNode(self.llm_client)) graph.add_node("response_validator", ResponseValidationNode()) graph.add_node("escalation_handler", EscalationHandlerNode())
graph.add_edge("query_classifier", "information_retrieval") graph.add_edge("information_retrieval", "solution_generator") graph.add_edge("solution_generator", "response_validator")
graph.add_conditional_edges( "response_validator", self.response_routing_decision, { "valid": END, "escalate": "escalation_handler", "retry": "solution_generator" } )
graph.add_edge("escalation_handler", END)
graph.set_entry_point("query_classifier")
return graph
def response_routing_decision(self, state: GraphState): """响应路由决策""" validation_result = state.get('validation_result', {})
if validation_result.get('is_appropriate', True): return "valid" elif validation_result.get('requires_human_intervention', False): return "escalate" else: return "retry"
class QueryClassificationNode(LangGraphNode): """查询分类节点""" def __init__(self, llm_client): super().__init__("query_classifier", "分类客户查询类型") self.llm_client = llm_client self.categories = [ "billing_inquiry", "technical_support", "product_info", "complaint", "account_management", "other" ]
async def execute(self, state: GraphState) -> GraphState: """执行查询分类""" query = state['input_query']
classification_prompt = f""" 请将以下客户查询分类到以下类别之一: {', '.join(self.categories)}
查询:{query}
只需返回类别名称,无需其他说明。 """
try: category = await self.llm_client.acall(classification_prompt) category = category.strip().lower()
if category not in self.categories: category = "other"
state['metadata']['query_category'] = category
intermediate_step = { "node": self.name, "action": "classification", "input": query, "output": category, "timestamp": asyncio.get_event_loop().time() }
state['intermediate_steps'].append(intermediate_step) state['execution_history'].append(f"{self.name}: Classified as {category}")
return state
except Exception as e: state['error'] = f"Classification error: {str(e)}" state['execution_history'].append(f"{self.name}: Error - {str(e)}") state['metadata']['query_category'] = "other" return state
class InformationRetrievalNode(LangGraphNode): """信息检索节点""" def __init__(self, knowledge_base): super().__init__("information_retrieval", "从知识库检索相关信息") self.knowledge_base = knowledge_base
async def execute(self, state: GraphState) -> GraphState: """执行信息检索""" query = state['input_query'] category = state['metadata'].get('query_category', 'other')
try: relevant_docs = await self.knowledge_base.retrieve_relevant_docs(query, category)
context = state.get('context', '') new_context = f"{context}\n\n相关知识库信息:\n"
for doc in relevant_docs[:5]: new_context += f"- {doc.content}\n"
state['context'] = new_context
intermediate_step = { "node": self.name, "action": "retrieval", "input": query, "output": f"Retrieved {len(relevant_docs)} documents", "timestamp": asyncio.get_event_loop().time() }
state['intermediate_steps'].append(intermediate_step) state['execution_history'].append(f"{self.name}: Retrieved {len(relevant_docs)} documents")
return state
except Exception as e: state['error'] = f"Information retrieval error: {str(e)}" state['execution_history'].append(f"{self.name}: Error - {str(e)}") return state
class SolutionGenerationNode(LangGraphNode): """解决方案生成节点""" def __init__(self, llm_client): super().__init__("solution_generator", "生成客户问题的解决方案") self.llm_client = llm_client
async def execute(self, state: GraphState) -> GraphState: """执行解决方案生成""" query = state['input_query'] context = state['context'] category = state['metadata'].get('query_category', 'other')
solution_prompt = f""" 作为一名专业的客户服务代表,请为以下客户问题提供解决方案:
客户问题:{query}
相关信息:{context}
问题类别:{category}
请提供: 1. 直接解决方案或答案 2. 操作步骤(如适用) 3. 预期结果 4. 注意事项或免责声明
保持友好、专业的语气。 """
try: solution = await self.llm_client.acall(solution_prompt)
state['final_answer'] = solution
intermediate_step = { "node": self.name, "action": "solution_generation", "input": query, "output": solution[:100] + "...", "timestamp": asyncio.get_event_loop().time() }
state['intermediate_steps'].append(intermediate_step) state['execution_history'].append(f"{self.name}: Generated solution")
return state
except Exception as e: state['error'] = f"Solution generation error: {str(e)}" state['execution_history'].append(f"{self.name}: Error - {str(e)}") return state
class ResponseValidationNode(LangGraphNode): """响应验证节点""" def __init__(self): super().__init__("response_validator", "验证响应的适当性") self.appropriateness_threshold = 0.8 self.completeness_threshold = 0.7
async def execute(self, state: GraphState) -> GraphState: """执行响应验证""" solution = state.get('final_answer', '') query = state['input_query']
try: validation_prompt = f""" 请评估以下客户服务响应的质量:
客户问题:{query} 客户服务响应:{solution}
请评估: 1. 相关性(0-1分) 2. 完整性(0-1分) 3. 专业性(0-1分) 4. 是否需要人工干预(是/否)
以JSON格式返回评估结果。 """
validation_result = await self.llm_client.acall(validation_prompt)
try: eval_result = json.loads(validation_result) except json.JSONDecodeError: eval_result = {"appropriateness": 0.5, "requires_human_intervention": True}
state['validation_result'] = eval_result
intermediate_step = { "node": self.name, "action": "validation", "input": solution, "output": eval_result, "timestamp": asyncio.get_event_loop().time() }
state['intermediate_steps'].append(intermediate_step) state['execution_history'].append(f"{self.name}: Validation complete")
return state
except Exception as e: state['error'] = f"Response validation error: {str(e)}" state['execution_history'].append(f"{self.name}: Error - {str(e)}") return state
class EscalationHandlerNode(LangGraphNode): """升级处理节点""" def __init__(self): super().__init__("escalation_handler", "处理需要人工干预的情况") self.escalation_reasons = []
async def execute(self, state: GraphState) -> GraphState: """执行升级处理""" validation_result = state.get('validation_result', {}) original_query = state['input_query']
escalation_info = { "original_query": original_query, "validation_result": validation_result, "escalation_timestamp": asyncio.get_event_loop().time(), "reason": "High complexity or sensitive issue" }
state['escalation_info'] = escalation_info
human_readable_info = f""" 需要人工处理的客户查询:
原始查询:{original_query}
评估结果:{json.dumps(validation_result, ensure_ascii=False, indent=2)}
请人工处理此查询并提供解决方案。 """
state['final_answer'] = human_readable_info
intermediate_step = { "node": self.name, "action": "escalation", "input": original_query, "output": escalation_info, "timestamp": asyncio.get_event_loop().time() }
state['intermediate_steps'].append(intermediate_step) state['execution_history'].append(f"{self.name}: Escalated to human agent")
return state
|