JatsTheAIGen commited on
Commit
7506c11
Β·
1 Parent(s): 6d41cb5

skills utilized for enhanced response synthesis v1

Browse files
src/agents/synthesis_agent.py CHANGED
@@ -1,620 +1,557 @@
1
  """
2
- Response Synthesis Agent
3
- Specialized in integrating multiple agent outputs into coherent responses
4
  """
5
 
6
  import logging
7
- from typing import Dict, Any, List
 
 
8
  import re
9
 
10
  logger = logging.getLogger(__name__)
11
 
12
- class ResponseSynthesisAgent:
13
- def __init__(self, llm_router=None):
14
- self.llm_router = llm_router
15
- self.agent_id = "RESP_SYNTH_001"
16
- self.specialization = "Multi-source information integration and coherent response generation"
17
- self._current_user_input = None
18
-
19
- # Response templates for different intent types
20
- self.response_templates = {
21
- "information_request": {
22
- "structure": "introduction β†’ key_points β†’ conclusion",
23
- "tone": "informative, clear, authoritative"
24
- },
25
- "task_execution": {
26
- "structure": "confirmation β†’ steps β†’ expected_outcome",
27
- "tone": "action-oriented, precise, reassuring"
28
- },
29
- "creative_generation": {
30
- "structure": "concept β†’ development β†’ refinement",
31
- "tone": "creative, engaging, expressive"
32
- },
33
- "analysis_research": {
34
- "structure": "hypothesis β†’ analysis β†’ insights",
35
- "tone": "analytical, evidence-based, objective"
36
- },
37
- "casual_conversation": {
38
- "structure": "engagement β†’ response β†’ follow_up",
39
- "tone": "friendly, conversational, natural"
40
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
41
  }
 
42
 
43
- async def execute(self, agent_outputs: List[Dict[str, Any]], user_input: str,
44
- context: Dict[str, Any] = None, **kwargs) -> Dict[str, Any]:
45
  """
46
- Synthesize responses from multiple agent outputs
47
- """
48
- try:
49
- logger.info(f"{self.agent_id} synthesizing {len(agent_outputs)} agent outputs")
50
-
51
- # Log context for debugging
52
- if context:
53
- logger.info(f"{self.agent_id} context has {len(context.get('interactions', []))} interactions")
54
-
55
- # Extract intent information
56
- intent_info = self._extract_intent_info(agent_outputs)
57
- primary_intent = intent_info.get('primary_intent', 'casual_conversation')
58
-
59
- # Store user_input for use in synthesis
60
- self._current_user_input = user_input
61
-
62
- # Structure the synthesis process
63
- synthesis_result = await self._synthesize_response(
64
- agent_outputs, user_input, context, primary_intent
65
- )
66
-
67
- # Add quality metrics
68
- synthesis_result.update({
69
- "agent_id": self.agent_id,
70
- "synthesis_quality_metrics": self._calculate_quality_metrics(synthesis_result),
71
- "intent_alignment": self._check_intent_alignment(synthesis_result, intent_info)
72
- })
73
-
74
- logger.info(f"{self.agent_id} completed synthesis")
75
- return synthesis_result
76
 
77
- except Exception as e:
78
- logger.error(f"{self.agent_id} synthesis error: {str(e)}", exc_info=True)
79
- return self._get_fallback_response(user_input, agent_outputs)
80
-
81
- async def _synthesize_response(self, agent_outputs: List[Dict[str, Any]],
82
- user_input: str, context: Dict[str, Any],
83
- primary_intent: str) -> Dict[str, Any]:
84
- """Synthesize responses using appropriate method based on intent"""
85
-
86
- if self.llm_router:
87
- # Use LLM for sophisticated synthesis
88
- return await self._llm_based_synthesis(agent_outputs, user_input, context, primary_intent)
89
- else:
90
- # Use template-based synthesis
91
- return await self._template_based_synthesis(agent_outputs, user_input, primary_intent)
92
-
93
- async def _llm_based_synthesis(self, agent_outputs: List[Dict[str, Any]],
94
- user_input: str, context: Dict[str, Any],
95
- primary_intent: str) -> Dict[str, Any]:
96
- """Use LLM for sophisticated response synthesis"""
97
 
98
- synthesis_prompt = await self._build_synthesis_prompt(agent_outputs, user_input, context, primary_intent)
 
 
99
 
100
- try:
101
- # Call actual LLM for response generation
102
- if self.llm_router:
103
- logger.info(f"{self.agent_id} calling LLM for response synthesis")
104
- llm_response = await self.llm_router.route_inference(
105
- task_type="response_synthesis",
106
- prompt=synthesis_prompt,
107
- max_tokens=2000, # Updated to match model config
108
- temperature=0.7
109
- )
110
-
111
- if llm_response and isinstance(llm_response, str) and len(llm_response.strip()) > 0:
112
- # Clean up the response
113
- clean_response = llm_response.strip()
114
- logger.info(f"{self.agent_id} received LLM response (length: {len(clean_response)})")
115
- logger.info("=" * 80)
116
- logger.info("SYNTHESIS AGENT - COMPLETE LLM RESPONSE:")
117
- logger.info("=" * 80)
118
- logger.info(f"Agent: {self.agent_id}")
119
- logger.info(f"Task: response_synthesis")
120
- logger.info(f"Response Length: {len(clean_response)} characters")
121
- logger.info("-" * 40)
122
- logger.info("FULL LLM RESPONSE CONTENT:")
123
- logger.info("-" * 40)
124
- logger.info(clean_response)
125
- logger.info("-" * 40)
126
- logger.info("END OF SYNTHESIS LLM RESPONSE")
127
- logger.info("=" * 80)
128
-
129
- return {
130
- "draft_response": clean_response,
131
- "final_response": clean_response,
132
- "source_references": self._extract_source_references(agent_outputs),
133
- "coherence_score": 0.90,
134
- "improvement_opportunities": self._identify_improvements(clean_response),
135
- "synthesis_method": "llm_enhanced"
136
- }
137
- else:
138
- # LLM returned empty or None - use fallback
139
- logger.warning(f"{self.agent_id} LLM returned empty/invalid response, using template")
140
- except Exception as e:
141
- logger.error(f"{self.agent_id} LLM call failed: {e}, falling back to template")
142
 
143
- # Fallback to template-based if LLM fails
144
- synthesized_response = await self._template_based_synthesis(agent_outputs, user_input, primary_intent)
145
- draft_response = synthesized_response["final_response"]
146
 
147
- # Enhance the template response to make it more relevant
148
- enhanced_response = self._enhance_response_quality(draft_response, primary_intent)
149
 
150
  return {
151
- "draft_response": draft_response,
152
- "final_response": enhanced_response,
153
- "source_references": self._extract_source_references(agent_outputs),
154
- "coherence_score": 0.75,
155
- "improvement_opportunities": self._identify_improvements(enhanced_response),
156
- "synthesis_method": "template_enhanced"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
157
  }
158
 
159
- async def _template_based_synthesis(self, agent_outputs: List[Dict[str, Any]],
160
- user_input: str, primary_intent: str) -> Dict[str, Any]:
161
- """Template-based response synthesis"""
162
 
163
- template = self.response_templates.get(primary_intent, self.response_templates["casual_conversation"])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
164
 
165
- # Extract relevant content from agent outputs
166
- content_blocks = self._extract_content_blocks(agent_outputs)
 
167
 
168
- # Apply template structure
169
- structured_response = self._apply_response_template(content_blocks, template, primary_intent)
 
 
170
 
171
- # Ensure we have a response even if no content blocks
172
- if not structured_response or len(structured_response.strip()) == 0:
173
- structured_response = f"Thank you for your message: '{user_input}'. I'm working on understanding how to best help you with this."
174
 
175
  return {
176
- "draft_response": structured_response,
177
- "final_response": structured_response, # No enhancement in template mode
178
- "source_references": self._extract_source_references(agent_outputs),
179
- "coherence_score": 0.75,
180
- "improvement_opportunities": ["Consider adding more specific details"] if content_blocks else ["Need more agent inputs"],
181
- "synthesis_method": "template_based"
 
 
 
 
182
  }
183
 
184
- async def _build_synthesis_prompt(self, agent_outputs: List[Dict[str, Any]],
185
- user_input: str, context: Dict[str, Any],
186
- primary_intent: str) -> str:
187
- """Build prompt for LLM-based synthesis - optimized for Qwen instruct format with context"""
188
-
189
- # Build a comprehensive prompt for actual LLM generation
190
- agent_content = self._format_agent_outputs_for_synthesis(agent_outputs)
191
-
192
- # Extract conversation history for context (moving window strategy)
193
- conversation_history = ""
194
- if context and context.get('interactions'):
195
- recent_interactions = context.get('interactions', [])[:40] # Last 40 interactions from memory buffer
196
- if recent_interactions:
197
- # Split into: recent (last 10) + older (all remaining, LLM summarized)
198
- if len(recent_interactions) > 10:
199
- oldest_interactions = recent_interactions[10:] # All older interactions
200
- newest_interactions = recent_interactions[:10] # Last 10 (newest)
201
-
202
- # Summarize ALL older interactions using LLM (no fallback)
203
- summary = await self._summarize_interactions(oldest_interactions)
204
-
205
- conversation_history = f"\n\nConversation Summary (earlier context):\n{summary}\n\n"
206
- conversation_history += "Recent conversation details:\n"
207
-
208
- # Include recent 10 interactions in full detail
209
- for i, interaction in enumerate(reversed(newest_interactions), 1):
210
- user_msg = interaction.get('user_input', '')
211
- if user_msg:
212
- conversation_history += f"Q{i}: {user_msg}\n"
213
- response = interaction.get('response', '')
214
- if response:
215
- conversation_history += f"A{i}: {response}\n"
216
- conversation_history += "\n"
217
- else:
218
- # 10 or fewer interactions, show all in detail
219
- conversation_history = "\n\nPrevious conversation:\n"
220
- for i, interaction in enumerate(reversed(recent_interactions), 1):
221
- user_msg = interaction.get('user_input', '')
222
- if user_msg:
223
- conversation_history += f"Q{i}: {user_msg}\n"
224
- response = interaction.get('response', '')
225
- if response:
226
- conversation_history += f"A{i}: {response}\n"
227
- conversation_history += "\n"
228
-
229
- # Qwen instruct format with conversation history
230
- prompt = f"""User Question: {user_input}
231
- {conversation_history}
232
- {agent_content if agent_content else ""}
233
-
234
- Instructions: Provide a comprehensive, helpful response that directly addresses the question. If there's conversation context, use it to answer the current question appropriately. Be detailed and informative.
235
-
236
- Response:"""
237
-
238
- return prompt
239
 
240
- async def _summarize_interactions(self, interactions: List[Dict[str, Any]]) -> str:
241
- """Summarize older interactions using LLM third-person narrative (NO FALLBACK)"""
242
- if not interactions:
243
- return ""
244
 
245
- # Use LLM-based narrative summarization ONLY (no fallback)
246
- llm_summary = await self._generate_narrative_summary(interactions)
 
 
247
 
248
- if llm_summary and len(llm_summary.strip()) > 20:
249
- return llm_summary
250
- else:
251
- # If LLM fails, return minimal placeholder
252
- return f"Earlier conversation included {len(interactions)} interactions covering various topics."
253
-
254
- async def _generate_narrative_summary(self, interactions: List[Dict[str, Any]]) -> str:
255
- """Use LLM to generate a third-person narrative summary of the conversation"""
256
- if not interactions or not self.llm_router:
257
- return ""
258
 
259
- # Build conversation transcript for LLM
260
- conversation_text = "Conversation History:\n"
261
- for i, interaction in enumerate(interactions, 1):
262
- user_msg = interaction.get('user_input', '')
263
- response = interaction.get('response', '')
264
-
265
- conversation_text += f"\nTurn {i}:\n"
266
- if user_msg:
267
- conversation_text += f"User: {user_msg}\n"
268
- if response:
269
- conversation_text += f"Assistant: {response[:200]}\n" # First 200 chars of response
270
-
271
- # Prompt for third-person narrative
272
- prompt = f"""{conversation_text}
273
-
274
- Task: Write a brief third-person narrative summary (2-3 sentences) of this conversation.
275
 
276
- The summary should:
277
- - Use third-person perspective ("The user started...", "The AI assistant responded...")
278
- - Capture the flow and progression of the conversation
279
- - Highlight key topics and themes
280
- - Be concise but informative
281
 
282
- Summary:"""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
283
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
284
  try:
285
- import asyncio
286
- summary = await self.llm_router.route_inference(
287
  task_type="response_synthesis",
288
- prompt=prompt,
289
- max_tokens=300,
290
- temperature=0.5
291
  )
292
 
293
- if summary and isinstance(summary, str):
294
- # Clean up the summary
295
- clean_summary = summary.strip()
296
- # Remove any "Summary:" prefix if present
297
- if clean_summary.startswith("Summary:"):
298
- clean_summary = clean_summary[9:].strip()
299
-
300
- # Log the complete narrative summary response
301
- logger.info("=" * 80)
302
- logger.info("NARRATIVE SUMMARY - COMPLETE LLM RESPONSE:")
303
- logger.info("=" * 80)
304
- logger.info(f"Agent: {self.agent_id}")
305
- logger.info(f"Task: narrative_summary")
306
- logger.info(f"Interactions Count: {len(interactions)}")
307
- logger.info(f"Summary Length: {len(clean_summary)} characters")
308
- logger.info("-" * 40)
309
- logger.info("FULL NARRATIVE SUMMARY CONTENT:")
310
- logger.info("-" * 40)
311
- logger.info(clean_summary)
312
- logger.info("-" * 40)
313
- logger.info("END OF NARRATIVE SUMMARY RESPONSE")
314
- logger.info("=" * 80)
315
-
316
- return clean_summary
317
 
318
- except Exception as e:
319
- logger.error(f"{self.agent_id} narrative summary generation failed: {e}")
320
-
321
- return ""
322
-
323
- def _extract_intent_info(self, agent_outputs: List[Dict[str, Any]]) -> Dict[str, Any]:
324
- """Extract intent information from agent outputs"""
325
- for output in agent_outputs:
326
- if 'primary_intent' in output:
327
- return {
328
- 'primary_intent': output['primary_intent'],
329
- 'confidence': output.get('confidence_scores', {}).get(output['primary_intent'], 0.5),
330
- 'source_agent': output.get('agent_id', 'unknown')
 
 
 
 
 
 
 
 
 
331
  }
332
- return {'primary_intent': 'casual_conversation', 'confidence': 0.5}
 
 
 
 
 
 
 
 
 
 
 
333
 
334
- def _extract_content_blocks(self, agent_outputs: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
335
- """Extract content blocks from agent outputs for synthesis"""
336
- content_blocks = []
337
-
338
- for output in agent_outputs:
339
- if 'result' in output:
340
- content_blocks.append({
341
- 'content': output['result'],
342
- 'source': output.get('agent_id', 'unknown'),
343
- 'confidence': output.get('confidence', 0.5)
344
- })
345
- elif 'primary_intent' in output:
346
- content_blocks.append({
347
- 'content': f"Intent analysis: {output['primary_intent']}",
348
- 'source': output.get('agent_id', 'intent_agent'),
349
- 'confidence': output.get('confidence_scores', {}).get(output['primary_intent'], 0.5)
350
- })
351
- elif 'final_response' in output:
352
- content_blocks.append({
353
- 'content': output['final_response'],
354
- 'source': output.get('agent_id', 'unknown'),
355
- 'confidence': output.get('confidence_score', 0.7)
356
- })
357
-
358
- return content_blocks
 
 
 
 
 
 
359
 
360
- def _apply_response_template(self, content_blocks: List[Dict[str, Any]],
361
- template: Dict[str, str], intent: str) -> str:
362
- """Apply response template to structure the content"""
363
-
364
- if intent == "information_request":
365
- return self._structure_informative_response(content_blocks)
366
- elif intent == "task_execution":
367
- return self._structure_actionable_response(content_blocks)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
368
  else:
369
- return self._structure_conversational_response(content_blocks, self._current_user_input)
370
-
371
- def _structure_informative_response(self, content_blocks: List[Dict[str, Any]]) -> str:
372
- """Structure an informative response (intro β†’ key_points β†’ conclusion)"""
373
- if not content_blocks:
374
- return "I'm here to help! Could you provide more details about what you're looking for?"
375
-
376
- intro = f"Based on the information available"
377
- key_points = "\n".join([f"β€’ {block['content']}" for block in content_blocks[:3]])
378
- conclusion = "I hope this helps! Let me know if you need any clarification."
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
379
 
380
- return f"{intro}:\n\n{key_points}\n\n{conclusion}"
381
 
382
- def _structure_actionable_response(self, content_blocks: List[Dict[str, Any]]) -> str:
383
- """Structure an actionable response (confirmation β†’ steps β†’ outcome)"""
384
- if not content_blocks:
385
- return "I understand you'd like some help. What specific task would you like to accomplish?"
 
386
 
387
- confirmation = "I can help with that!"
388
- steps = "\n".join([f"{i+1}. {block['content']}" for i, block in enumerate(content_blocks[:5])])
389
- outcome = "This should help you get started. Feel free to ask if you need further assistance."
390
 
391
- return f"{confirmation}\n\n{steps}\n\n{outcome}"
392
-
393
- def _structure_conversational_response(self, content_blocks: List[Dict[str, Any]], user_input: str = None) -> str:
394
- """Structure a conversational response with context-aware content"""
395
- if not content_blocks:
396
- # Generate a meaningful, context-aware response based on user input
397
- if user_input:
398
- return self._generate_intelligent_response(user_input)
399
- return "I'm here to help! Could you provide more details about what you're looking for?"
400
-
401
- # Combine content naturally
402
- combined_content = " ".join([block['content'] for block in content_blocks])
403
- if len(combined_content) == 0:
404
- return self._generate_intelligent_response(user_input) if user_input else "I'm here to help. Could you tell me more about what you're looking for?"
405
- return combined_content[:500] + "..." if len(combined_content) > 500 else combined_content
406
-
407
- def _generate_intelligent_response(self, user_input: str) -> str:
408
- """Generate an intelligent, context-aware response based on user input"""
409
- input_lower = user_input.lower()
410
 
411
- if "agentic ai" in input_lower or "agentic" in input_lower:
412
- return """Here's a practical guide to mastering Agentic AI as a data science professional:
413
 
414
- **1. Foundational Understanding**
415
- - Study autonomous agent architectures (ReAct, Tool Use, Multi-Agent patterns)
416
- - Understand the agent reasoning loop: Perception β†’ Decision β†’ Action
417
- - Learn how agents maintain state and context across interactions
418
 
419
- **2. Implementation Practice**
420
- - Start with frameworks like LangChain or AutoGen for building agent systems
421
- - Build simple agents that use tools (search, computation, databases)
422
- - Progress to multi-agent systems where agents collaborate
423
- - Implement agent memory and learning mechanisms
424
 
425
- **3. Real-World Application**
426
- - Apply agentic principles to data science workflows (auto-EDA, model selection)
427
- - Build intelligent data processing pipelines with agent-based orchestration
428
- - Create autonomous model monitoring and retraining systems
429
 
430
- **4. Advanced Concepts**
431
- - Implement reasoning engines (Chain of Thought, Tree of Thoughts)
432
- - Build agent collaboration patterns (supervisor β†’ worker, hierarchical)
433
- - Add reflection and self-correction capabilities
434
-
435
- **Resources**: Research papers on agentic systems, LangChain documentation, and multi-agent frameworks like AutoGen.
436
-
437
- Would you like me to dive deeper into any specific aspect?"""
438
 
439
- elif "master" in input_lower and "implement" in input_lower:
440
- return """To master implementation skills systematically:
441
-
442
- **1. Start with Basics**
443
- - Build a simple implementation from scratch to understand core concepts
444
- - Study existing implementations and their design patterns
445
- - Identify common pitfalls and best practices
446
-
447
- **2. Progressive Complexity**
448
- - Implement increasingly complex features step by step
449
- - Test edge cases and handle error scenarios
450
- - Optimize for performance and maintainability
451
-
452
- **3. Real-World Practice**
453
- - Work on actual projects, not just tutorials
454
- - Contribute to open-source to get feedback
455
- - Build a portfolio showcasing your implementations
456
-
457
- **4. Advanced Techniques**
458
- - Study different architectural approaches
459
- - Learn about testing, documentation, and deployment
460
- - Understand scalability and production considerations
461
-
462
- Would you like specific guidance on implementation approaches or best practices?"""
463
 
 
 
 
 
 
 
 
 
 
464
  else:
465
- # Generate a substantive answer based on the question
466
- return self._generate_substantive_answer(user_input)
 
 
 
 
 
 
 
 
 
 
 
 
 
467
 
468
- def _generate_substantive_answer(self, user_input: str) -> str:
469
- """Generate a substantive answer based on the topic"""
470
- input_lower = user_input.lower()
 
471
 
472
- # Knowledge base for common queries
473
- if "gemini" in input_lower and "google" in input_lower:
474
- return """Google's Gemini chatbot is built on their Gemini family of multimodal AI models. Here are the key features:
475
-
476
- **1. Multimodal Capabilities**
477
- - Processes text, images, audio, video, and code simultaneously
478
- - Understands and generates content across different modalities
479
- - Supports seamless integration of visual and textual understanding
480
-
481
- **2. Three Model Sizes**
482
- - Gemini Ultra: Most capable for complex tasks
483
- - Gemini Pro: Balanced performance for general use
484
- - Gemini Nano: Efficient on-device processing
485
-
486
- **3. Advanced Reasoning**
487
- - Chain-of-thought reasoning for complex problem-solving
488
- - Tool use and function calling for real-world applications
489
- - Code generation with multiple programming languages
490
-
491
- **4. Integration Features**
492
- - Google Workspace integration (Docs, Sheets, Slides)
493
- - YouTube content understanding and summarization
494
- - Real-time web search capabilities
495
- - Code execution in multiple languages
496
-
497
- **5. Developer Platform**
498
- - API access for building custom applications
499
- - Function calling for structured outputs
500
- - Streaming responses for better UX
501
- - Context window up to 1 million tokens (experimental)
502
-
503
- **6. Safety & Alignment**
504
- - Built-in safety filters and content moderation
505
- - Responsible AI practices and bias mitigation
506
- - Transparency in AI decision-making
507
-
508
- The chatbot excels at combining multiple capabilities like understanding uploaded images, searching the web, coding, and providing detailed explanations."""
509
 
510
- elif any(keyword in input_lower for keyword in ["key features", "what can", "capabilities"]):
511
- # Generic but substantive features response
512
- return """Here are key capabilities I can help with:
513
-
514
- **Research & Analysis**
515
- - Synthesize information from multiple sources
516
- - Analyze complex topics and provide structured insights
517
- - Conduct literature reviews and summarize findings
518
- - Compare different approaches or methods
519
-
520
- **Content Generation**
521
- - Create detailed explanations and tutorials
522
- - Generate code examples and implementations
523
- - Write comprehensive documentation
524
- - Develop learning paths and guides
525
-
526
- **Problem-Solving**
527
- - Break down complex problems into steps
528
- - Propose solutions with trade-offs analysis
529
- - Debug code and suggest improvements
530
- - Design systems and architectures
531
-
532
- **Multi-Modal Understanding**
533
- - Process and discuss images, data, and text
534
- - Extract insights from visual content
535
- - Combine information from different modalities
536
- - Generate multimodal responses
537
-
538
- How can I assist you with a specific task or question?"""
539
 
540
- else:
541
- # Provide a helpful, direct answer attempt
542
- return f"""Let me address your question: "{user_input}"
543
-
544
- To provide you with the most accurate and helpful information, could you clarify:
545
-
546
- 1. What specific aspect would you like me to focus on?
547
- 2. What level of detail do you need? (Brief overview, detailed explanation, or step-by-step guide)
548
- 3. Are you looking for practical implementation guidance, theoretical concepts, or both?
549
-
550
- Alternatively, you can rephrase your question with more specific details, and I'll provide a comprehensive answer."""
551
-
552
- def _enhance_response_quality(self, response: str, intent: str) -> str:
553
- """Enhance response quality to ensure substantive content"""
554
- enhanced = response
555
-
556
- # If response is too short or generic, enrich it with context
557
- if self._current_user_input and len(response.split()) < 50:
558
- if intent == "information_request" or intent == "analysis_research":
559
- # Try to enhance with relevant knowledge
560
- enhancement = self._get_topic_knowledge(self._current_user_input)
561
- if enhancement:
562
- enhanced += "\n\n" + enhancement
563
-
564
- # Ensure minimum substance
565
- if len(enhanced.split()) < 30:
566
- enhanced += "\n\nWould you like me to elaborate on any specific aspect of this topic?"
567
-
568
- return enhanced
569
 
570
- def _get_topic_knowledge(self, user_input: str) -> str:
571
- """Get knowledge snippets for various topics"""
572
- input_lower = user_input.lower()
573
-
574
- if "machine learning" in input_lower or "ml" in input_lower:
575
- return """**Machine Learning Fundamentals:**
576
- - Supervised Learning: Models learn from labeled data (classification, regression)
577
- - Unsupervised Learning: Finding patterns in unlabeled data (clustering, dimensionality reduction)
578
- - Reinforcement Learning: Learning through rewards and punishments
579
- - Deep Learning: Neural networks with multiple layers for complex pattern recognition
580
- - Key algorithms include: Decision Trees, SVM, Random Forest, Neural Networks, Transformers"""
581
-
582
- elif "deep learning" in input_lower or "neural network" in input_lower:
583
- return """**Deep Learning Essentials:**
584
- - Convolutional Neural Networks (CNNs): Best for image recognition
585
- - Recurrent Neural Networks (RNNs/LSTMs): For sequential data like text
586
- - Transformers: Modern architecture for NLP tasks
587
- - Key frameworks: TensorFlow, PyTorch, Keras
588
- - Applications: Computer vision, NLP, speech recognition, recommendation systems"""
589
-
590
- elif "data science" in input_lower:
591
- return """**Data Science Workflow:**
592
- - Data Collection: Gathering relevant data from various sources
593
- - Data Cleaning: Removing errors, handling missing values
594
- - Exploratory Data Analysis: Understanding patterns and relationships
595
- - Feature Engineering: Creating meaningful input variables
596
- - Model Building: Selecting and training appropriate models
597
- - Evaluation & Deployment: Testing and productionizing solutions"""
598
-
599
- elif "nlp" in input_lower or "natural language" in input_lower:
600
- return """**Natural Language Processing:**
601
- - Tokenization: Breaking text into words/subwords
602
- - Embeddings: Converting words to dense vector representations (Word2Vec, GloVe, BERT)
603
- - Named Entity Recognition: Identifying people, places, organizations
604
- - Sentiment Analysis: Understanding emotional tone
605
- - Machine Translation: Converting between languages
606
- - Modern approach: Large Language Models (GPT, BERT, Llama) with transfer learning"""
607
-
608
- elif "ai" in input_lower and "trends" in input_lower:
609
- return """**Current AI Trends:**
610
- - Large Language Models (LLMs): GPT-4, Claude, Gemini for text generation
611
- - Multimodal AI: Processing text, images, audio simultaneously
612
- - Generative AI: Creating new content (text, images, code, music)
613
- - Autonomous Agents: AI systems that can act independently
614
- - Edge AI: Running models on devices for privacy and speed
615
- - Responsible AI: Fairness, ethics, and safety in AI systems"""
616
-
617
- return ""
618
 
619
  def _extract_source_references(self, agent_outputs: List[Dict[str, Any]]) -> List[str]:
620
  """Extract source references from agent outputs"""
@@ -624,30 +561,25 @@ Alternatively, you can rephrase your question with more specific details, and I'
624
  sources.append(agent_id)
625
  return list(set(sources)) # Remove duplicates
626
 
627
- def _format_agent_outputs_for_synthesis(self, agent_outputs: List[Dict[str, Any]]) -> str:
628
- """Format agent outputs for LLM synthesis prompt"""
629
- formatted = []
630
- for i, output in enumerate(agent_outputs, 1):
631
- agent_id = output.get('agent_id', 'unknown')
632
- content = output.get('result', output.get('final_response', str(output)))
633
- formatted.append(f"Agent {i} ({agent_id}): {content[:100]}...")
634
- return "\n".join(formatted)
635
-
636
  def _calculate_quality_metrics(self, synthesis_result: Dict[str, Any]) -> Dict[str, Any]:
637
  """Calculate quality metrics for synthesis"""
638
  response = synthesis_result.get('final_response', '')
639
 
640
  return {
641
  "length": len(response),
642
- "word_count": len(response.split()),
643
  "coherence_score": synthesis_result.get('coherence_score', 0.7),
644
  "source_count": len(synthesis_result.get('source_references', [])),
645
- "has_structured_elements": bool(re.search(r'[β€’\d+\.]', response))
646
  }
647
 
648
  def _check_intent_alignment(self, synthesis_result: Dict[str, Any], intent_info: Dict[str, Any]) -> Dict[str, Any]:
649
  """Check if synthesis aligns with detected intent"""
650
- alignment_score = 0.8 # Placeholder
 
 
 
 
651
 
652
  return {
653
  "intent_detected": intent_info.get('primary_intent'),
@@ -667,41 +599,36 @@ Alternatively, you can rephrase your question with more specific details, and I'
667
 
668
  return improvements
669
 
670
- def _get_fallback_response(self, user_input: str, agent_outputs: List[Dict[str, Any]]) -> Dict[str, Any]:
671
- """Provide substantive response even when synthesis fails"""
672
- # Generate a real response using our knowledge
673
- try:
674
- response = self._generate_intelligent_response(user_input)
675
- response = self._enhance_response_quality(response, "information_request")
676
-
677
- return {
678
- "final_response": response,
679
- "draft_response": response,
680
- "source_references": self._extract_source_references(agent_outputs),
681
- "coherence_score": 0.70,
682
- "improvement_opportunities": [],
683
- "synthesis_method": "knowledge_base",
684
- "agent_id": self.agent_id,
685
- "synthesis_quality_metrics": self._calculate_quality_metrics({"final_response": response}),
686
- "intent_alignment": {"intent_detected": "information_request", "alignment_score": 0.75, "alignment_verified": True},
687
- "fallback_mode": True
688
- }
689
- except Exception as e:
690
- logger.error(f"Fallback response generation failed: {e}")
691
- return {
692
- "final_response": f"Thank you for your question: '{user_input}'. I'm processing your request and will provide a detailed response shortly.",
693
- "draft_response": "",
694
- "source_references": [],
695
- "coherence_score": 0.5,
696
- "improvement_opportunities": ["Fallback mode active"],
697
- "synthesis_method": "emergency_fallback",
698
- "agent_id": self.agent_id,
699
- "synthesis_quality_metrics": {"error": "emergency_mode"},
700
- "intent_alignment": {"error": "system_recovery"},
701
- "error_handled": True
702
- }
703
 
704
- # Factory function for easy instantiation
705
- def create_synthesis_agent(llm_router=None):
706
- return ResponseSynthesisAgent(llm_router)
707
 
 
 
 
 
 
1
  """
2
+ Enhanced Synthesis Agent with Expert Consultant Assignment
3
+ Based on skill probability scores from Skills Identification Agent
4
  """
5
 
6
  import logging
7
+ import json
8
+ from typing import Dict, List, Any, Optional, Tuple
9
+ from datetime import datetime
10
  import re
11
 
12
  logger = logging.getLogger(__name__)
13
 
14
+
15
+ class ExpertConsultantAssigner:
16
+ """
17
+ Assigns expert consultant profiles based on skill probabilities
18
+ and generates weighted expertise for response synthesis
19
+ """
20
+
21
+ # Expert consultant profiles with skill mappings
22
+ EXPERT_PROFILES = {
23
+ "data_analysis": {
24
+ "title": "Senior Data Analytics Consultant",
25
+ "expertise": ["Statistical Analysis", "Data Visualization", "Business Intelligence", "Predictive Modeling"],
26
+ "background": "15+ years in data science across finance, healthcare, and tech sectors",
27
+ "style": "methodical, evidence-based, quantitative reasoning"
28
+ },
29
+ "technical_programming": {
30
+ "title": "Principal Software Engineering Consultant",
31
+ "expertise": ["Full-Stack Development", "System Architecture", "DevOps", "Code Optimization"],
32
+ "background": "20+ years leading technical teams at Fortune 500 companies",
33
+ "style": "practical, solution-oriented, best practices focused"
34
+ },
35
+ "project_management": {
36
+ "title": "Strategic Project Management Consultant",
37
+ "expertise": ["Agile/Scrum", "Risk Management", "Stakeholder Communication", "Resource Optimization"],
38
+ "background": "12+ years managing complex enterprise projects across industries",
39
+ "style": "structured, process-driven, outcome-focused"
40
+ },
41
+ "financial_analysis": {
42
+ "title": "Executive Financial Strategy Consultant",
43
+ "expertise": ["Financial Modeling", "Investment Analysis", "Risk Assessment", "Corporate Finance"],
44
+ "background": "18+ years in investment banking and corporate finance advisory",
45
+ "style": "analytical, risk-aware, ROI-focused"
46
+ },
47
+ "digital_marketing": {
48
+ "title": "Chief Marketing Strategy Consultant",
49
+ "expertise": ["Digital Campaign Strategy", "Customer Analytics", "Brand Development", "Growth Hacking"],
50
+ "background": "14+ years scaling marketing for startups to enterprise clients",
51
+ "style": "creative, data-driven, customer-centric"
52
+ },
53
+ "business_consulting": {
54
+ "title": "Senior Management Consultant",
55
+ "expertise": ["Strategic Planning", "Organizational Development", "Process Improvement", "Change Management"],
56
+ "background": "16+ years at top-tier consulting firms (McKinsey, BCG equivalent)",
57
+ "style": "strategic, framework-driven, holistic thinking"
58
+ },
59
+ "cybersecurity": {
60
+ "title": "Chief Information Security Consultant",
61
+ "expertise": ["Threat Assessment", "Security Architecture", "Compliance", "Incident Response"],
62
+ "background": "12+ years protecting critical infrastructure across government and private sectors",
63
+ "style": "security-first, compliance-aware, risk mitigation focused"
64
+ },
65
+ "healthcare_technology": {
66
+ "title": "Healthcare Innovation Consultant",
67
+ "expertise": ["Health Informatics", "Telemedicine", "Medical Device Integration", "HIPAA Compliance"],
68
+ "background": "10+ years implementing healthcare technology solutions",
69
+ "style": "patient-centric, regulation-compliant, evidence-based"
70
+ },
71
+ "educational_technology": {
72
+ "title": "Learning Technology Strategy Consultant",
73
+ "expertise": ["Instructional Design", "EdTech Implementation", "Learning Analytics", "Curriculum Development"],
74
+ "background": "13+ years transforming educational experiences through technology",
75
+ "style": "learner-focused, pedagogy-driven, accessibility-minded"
76
+ },
77
+ "environmental_science": {
78
+ "title": "Sustainability Strategy Consultant",
79
+ "expertise": ["Environmental Impact Assessment", "Carbon Footprint Analysis", "Green Technology", "ESG Reporting"],
80
+ "background": "11+ years driving environmental initiatives for corporations",
81
+ "style": "sustainability-focused, data-driven, long-term thinking"
82
  }
83
+ }
84
 
85
+ def assign_expert_consultant(self, skill_probabilities: Dict[str, float]) -> Dict[str, Any]:
 
86
  """
87
+ Create ultra-expert profile combining all relevant consultants
88
+
89
+ Args:
90
+ skill_probabilities: Dict mapping skill categories to probability scores (0.0-1.0)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
91
 
92
+ Returns:
93
+ Dict containing ultra-expert profile with combined expertise
94
+ """
95
+ if not skill_probabilities:
96
+ return self._get_default_consultant()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
97
 
98
+ # Calculate weighted scores for available expert profiles
99
+ expert_scores = {}
100
+ total_weight = 0
101
 
102
+ for skill, probability in skill_probabilities.items():
103
+ if skill in self.EXPERT_PROFILES and probability >= 0.2: # 20% threshold
104
+ expert_scores[skill] = probability
105
+ total_weight += probability
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
106
 
107
+ if not expert_scores:
108
+ return self._get_default_consultant()
 
109
 
110
+ # Create ultra-expert combining all relevant consultants
111
+ ultra_expert = self._create_ultra_expert(expert_scores, total_weight)
112
 
113
  return {
114
+ "assigned_consultant": ultra_expert,
115
+ "expertise_weights": expert_scores,
116
+ "total_weight": total_weight,
117
+ "assignment_rationale": self._generate_ultra_expert_rationale(expert_scores, total_weight)
118
+ }
119
+
120
+ def _get_default_consultant(self) -> Dict[str, Any]:
121
+ """Default consultant for general inquiries"""
122
+ return {
123
+ "assigned_consultant": {
124
+ "primary_expertise": "business_consulting",
125
+ "title": "Senior Management Consultant",
126
+ "expertise": ["Strategic Planning", "Problem Solving", "Analysis", "Communication"],
127
+ "background": "Generalist consultant with broad industry experience",
128
+ "style": "balanced, analytical, comprehensive",
129
+ "secondary_expertise": [],
130
+ "confidence_score": 0.7
131
+ },
132
+ "expertise_weights": {"business_consulting": 0.7},
133
+ "total_weight": 0.7,
134
+ "assignment_rationale": "Default consultant assigned for general business inquiry"
135
  }
136
 
137
+ def _create_ultra_expert(self, expert_scores: Dict[str, float], total_weight: float) -> Dict[str, Any]:
138
+ """Create ultra-expert profile combining all relevant consultants"""
 
139
 
140
+ # Sort skills by probability (highest first)
141
+ sorted_skills = sorted(expert_scores.items(), key=lambda x: x[1], reverse=True)
142
+
143
+ # Combine all expertise areas with weights
144
+ combined_expertise = []
145
+ combined_background_elements = []
146
+ combined_style_elements = []
147
+
148
+ for skill, weight in sorted_skills:
149
+ if skill in self.EXPERT_PROFILES:
150
+ profile = self.EXPERT_PROFILES[skill]
151
+
152
+ # Weight-based contribution
153
+ contribution_ratio = weight / total_weight
154
+
155
+ # Add expertise areas with weight indicators
156
+ for expertise in profile["expertise"]:
157
+ weighted_expertise = f"{expertise} (Weight: {contribution_ratio:.1%})"
158
+ combined_expertise.append(weighted_expertise)
159
+
160
+ # Extract background years and combine
161
+ background = profile["background"]
162
+ combined_background_elements.append(f"{background} [{skill}]")
163
+
164
+ # Combine style elements
165
+ style_parts = [s.strip() for s in profile["style"].split(",")]
166
+ combined_style_elements.extend(style_parts)
167
 
168
+ # Create ultra-expert title combining top skills
169
+ top_skills = [skill.replace("_", " ").title() for skill, _ in sorted_skills[:3]]
170
+ ultra_title = f"Visionary Ultra-Expert: {' + '.join(top_skills)} Integration Specialist"
171
 
172
+ # Combine backgrounds into comprehensive experience
173
+ total_years = sum([self._extract_years_from_background(bg) for bg in combined_background_elements])
174
+ ultra_background = f"{total_years}+ years combined experience across {len(sorted_skills)} domains: " + \
175
+ "; ".join(combined_background_elements[:3]) # Limit for readability
176
 
177
+ # Create unified style combining all approaches
178
+ unique_styles = list(set(combined_style_elements))
179
+ ultra_style = ", ".join(unique_styles[:6]) # Top 6 style elements
180
 
181
  return {
182
+ "primary_expertise": "ultra_expert_integration",
183
+ "title": ultra_title,
184
+ "expertise": combined_expertise,
185
+ "background": ultra_background,
186
+ "style": ultra_style,
187
+ "domain_integration": sorted_skills,
188
+ "confidence_score": total_weight / len(sorted_skills), # Average confidence
189
+ "ultra_expert": True,
190
+ "expertise_count": len(sorted_skills),
191
+ "total_experience_years": total_years
192
  }
193
 
194
+ def _extract_years_from_background(self, background: str) -> int:
195
+ """Extract years of experience from background string"""
196
+ years_match = re.search(r'(\d+)\+?\s*years?', background.lower())
197
+ return int(years_match.group(1)) if years_match else 10 # Default to 10 years
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
198
 
199
+ def _generate_ultra_expert_rationale(self, expert_scores: Dict[str, float], total_weight: float) -> str:
200
+ """Generate explanation for ultra-expert assignment"""
201
+ sorted_skills = sorted(expert_scores.items(), key=lambda x: x[1], reverse=True)
 
202
 
203
+ rationale_parts = [
204
+ f"Ultra-Expert Profile combining {len(sorted_skills)} specialized domains",
205
+ f"Total expertise weight: {total_weight:.2f} across integrated skill areas"
206
+ ]
207
 
208
+ # Add top 3 contributions
209
+ top_contributions = []
210
+ for skill, weight in sorted_skills[:3]:
211
+ contribution = (weight / total_weight) * 100
212
+ top_contributions.append(f"{skill} ({weight:.1%}, {contribution:.0f}% contribution)")
 
 
 
 
 
213
 
214
+ rationale_parts.append(f"Primary domains: {'; '.join(top_contributions)}")
215
+
216
+ if len(sorted_skills) > 3:
217
+ additional_count = len(sorted_skills) - 3
218
+ rationale_parts.append(f"Plus {additional_count} additional specialized areas")
219
+
220
+ return " | ".join(rationale_parts)
 
 
 
 
 
 
 
 
 
221
 
 
 
 
 
 
222
 
223
+ class EnhancedSynthesisAgent:
224
+ """
225
+ Enhanced synthesis agent with expert consultant assignment
226
+ Compatible with existing ResponseSynthesisAgent interface
227
+ """
228
+
229
+ def __init__(self, llm_router, agent_id: str = "RESP_SYNTH_001"):
230
+ self.llm_router = llm_router
231
+ self.agent_id = agent_id
232
+ self.specialization = "Multi-source information integration and coherent response generation"
233
+ self.expert_assigner = ExpertConsultantAssigner()
234
+ self._current_user_input = None
235
+
236
+ async def execute(self, user_input: str = None, agent_outputs: List[Dict[str, Any]] = None,
237
+ context: Dict[str, Any] = None, skills_result: Dict[str, Any] = None,
238
+ **kwargs) -> Dict[str, Any]:
239
+ """
240
+ Execute synthesis with expert consultant assignment
241
+ Compatible with both old interface (agent_outputs first) and new interface (user_input first)
242
+
243
+ Args:
244
+ user_input: Original user question
245
+ agent_outputs: Results from other agents (can be first positional arg for compatibility)
246
+ context: Conversation context
247
+ skills_result: Output from skills identification agent
248
+
249
+ Returns:
250
+ Dict containing synthesized response and metadata
251
+ """
252
+ # Handle backward compatibility and normalize arguments
253
+ # Case 1: First arg is agent_outputs (old interface)
254
+ if isinstance(user_input, list) and agent_outputs is None:
255
+ agent_outputs = user_input
256
+ user_input = kwargs.get('user_input', '')
257
+ context = kwargs.get('context', context)
258
+ skills_result = kwargs.get('skills_result', skills_result)
259
+ # Case 2: All args via kwargs
260
+ elif user_input is None:
261
+ user_input = kwargs.get('user_input', '')
262
+ agent_outputs = kwargs.get('agent_outputs', agent_outputs)
263
+ context = kwargs.get('context', context)
264
+ skills_result = kwargs.get('skills_result', skills_result)
265
+
266
+ # Ensure user_input is a string
267
+ if not isinstance(user_input, str):
268
+ user_input = str(user_input) if user_input else ''
269
+
270
+ # Default agent_outputs to empty list
271
+ if agent_outputs is None:
272
+ agent_outputs = []
273
 
274
+ logger.info(f"{self.agent_id} synthesizing {len(agent_outputs)} agent outputs")
275
+ if context:
276
+ logger.info(f"{self.agent_id} context has {len(context.get('interactions', []))} interactions")
277
+
278
+ # STEP 1: Extract skill probabilities from skills_result
279
+ skill_probabilities = self._extract_skill_probabilities(skills_result)
280
+ logger.info(f"Extracted skill probabilities: {skill_probabilities}")
281
+
282
+ # STEP 2: Assign expert consultant based on probabilities
283
+ consultant_assignment = self.expert_assigner.assign_expert_consultant(skill_probabilities)
284
+ assigned_consultant = consultant_assignment["assigned_consultant"]
285
+ logger.info(f"Assigned consultant: {assigned_consultant['title']} ({assigned_consultant.get('primary_expertise', 'N/A')})")
286
+
287
+ # STEP 3: Generate expert consultant preamble
288
+ expert_preamble = self._generate_expert_preamble(assigned_consultant, consultant_assignment)
289
+
290
+ # STEP 4: Build synthesis prompt with expert context
291
+ synthesis_prompt = self._build_synthesis_prompt_with_expert(
292
+ user_input=user_input,
293
+ context=context,
294
+ agent_outputs=agent_outputs,
295
+ expert_preamble=expert_preamble,
296
+ assigned_consultant=assigned_consultant
297
+ )
298
+
299
+ logger.info(f"{self.agent_id} calling LLM for response synthesis")
300
+
301
+ # Call LLM with enhanced prompt
302
  try:
303
+ response = await self.llm_router.route_inference(
 
304
  task_type="response_synthesis",
305
+ prompt=synthesis_prompt,
306
+ max_tokens=2000,
307
+ temperature=0.7
308
  )
309
 
310
+ # Only use fallback if LLM actually fails (returns None, empty, or invalid)
311
+ if not response or not isinstance(response, str) or len(response.strip()) == 0:
312
+ logger.warning(f"{self.agent_id} LLM returned empty/invalid response, using fallback")
313
+ return self._get_fallback_response(user_input, agent_outputs, assigned_consultant)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
314
 
315
+ clean_response = response.strip()
316
+ logger.info(f"{self.agent_id} received LLM response (length: {len(clean_response)})")
317
+
318
+ # Build comprehensive result compatible with existing interface
319
+ result = {
320
+ "synthesized_response": clean_response,
321
+ "draft_response": clean_response,
322
+ "final_response": clean_response, # Main response field - used by UI
323
+ "assigned_consultant": assigned_consultant,
324
+ "expertise_weights": consultant_assignment["expertise_weights"],
325
+ "assignment_rationale": consultant_assignment["assignment_rationale"],
326
+ "source_references": self._extract_source_references(agent_outputs),
327
+ "coherence_score": 0.90,
328
+ "improvement_opportunities": self._identify_improvements(clean_response),
329
+ "synthesis_method": "expert_enhanced_llm",
330
+ "agent_id": self.agent_id,
331
+ "synthesis_quality_metrics": self._calculate_quality_metrics({"final_response": clean_response}),
332
+ "synthesis_metadata": {
333
+ "agent_outputs_count": len(agent_outputs),
334
+ "context_interactions": len(context.get('interactions', [])) if context else 0,
335
+ "expert_enhanced": True,
336
+ "processing_timestamp": datetime.now().isoformat()
337
  }
338
+ }
339
+
340
+ # Add intent alignment if available
341
+ intent_info = self._extract_intent_info(agent_outputs)
342
+ if intent_info:
343
+ result["intent_alignment"] = self._check_intent_alignment(result, intent_info)
344
+
345
+ return result
346
+
347
+ except Exception as e:
348
+ logger.error(f"{self.agent_id} synthesis failed: {str(e)}", exc_info=True)
349
+ return self._get_fallback_response(user_input, agent_outputs, assigned_consultant)
350
 
351
+ def _extract_skill_probabilities(self, skills_result: Dict[str, Any]) -> Dict[str, float]:
352
+ """Extract skill probabilities from skills identification result"""
353
+ if not skills_result:
354
+ return {}
355
+
356
+ # Check for skill_classification structure
357
+ skill_classification = skills_result.get('skill_classification', {})
358
+ if 'skill_probabilities' in skill_classification:
359
+ return skill_classification['skill_probabilities']
360
+
361
+ # Check for direct skill_probabilities
362
+ if 'skill_probabilities' in skills_result:
363
+ return skills_result['skill_probabilities']
364
+
365
+ # Extract from identified_skills if structured differently
366
+ identified_skills = skills_result.get('identified_skills', [])
367
+ if isinstance(identified_skills, list):
368
+ probabilities = {}
369
+ for skill in identified_skills:
370
+ if isinstance(skill, dict) and 'skill' in skill and 'probability' in skill:
371
+ # Map skill name to expert profile name if needed
372
+ skill_name = skill['skill']
373
+ probability = skill['probability']
374
+ probabilities[skill_name] = probability
375
+ elif isinstance(skill, dict) and 'category' in skill:
376
+ skill_name = skill['category']
377
+ probability = skill.get('probability', skill.get('confidence', 0.5))
378
+ probabilities[skill_name] = probability
379
+ return probabilities
380
+
381
+ return {}
382
 
383
+ def _generate_expert_preamble(self, assigned_consultant: Dict[str, Any],
384
+ consultant_assignment: Dict[str, Any]) -> str:
385
+ """Generate expert consultant preamble for LLM prompt"""
386
+
387
+ if assigned_consultant.get('ultra_expert'):
388
+ # Ultra-expert preamble
389
+ preamble = f"""You are responding as a {assigned_consultant['title']} - an unprecedented combination of industry-leading experts.
390
+
391
+ ULTRA-EXPERT PROFILE:
392
+ - Integrated Expertise: {assigned_consultant['expertise_count']} specialized domains
393
+ - Combined Experience: {assigned_consultant['total_experience_years']}+ years across multiple industries
394
+ - Integration Approach: Cross-domain synthesis with deep specialization
395
+ - Response Style: {assigned_consultant['style']}
396
+
397
+ DOMAIN INTEGRATION: {', '.join([f"{skill} ({weight:.1%})" for skill, weight in assigned_consultant['domain_integration']])}
398
+
399
+ SPECIALIZED EXPERTISE AREAS:
400
+ {chr(10).join([f"β€’ {expertise}" for expertise in assigned_consultant['expertise'][:8]])}
401
+
402
+ ASSIGNMENT RATIONALE: {consultant_assignment['assignment_rationale']}
403
+
404
+ KNOWLEDGE DEPTH REQUIREMENT:
405
+ - Provide insights equivalent to a visionary thought leader combining expertise from multiple domains
406
+ - Synthesize knowledge across {assigned_consultant['expertise_count']} specialization areas
407
+ - Apply interdisciplinary thinking and cross-domain innovation
408
+ - Leverage combined {assigned_consultant['total_experience_years']}+ years of integrated experience
409
+
410
+ ULTRA-EXPERT RESPONSE GUIDELINES:
411
+ - Draw from extensive cross-domain experience and pattern recognition
412
+ - Provide multi-perspective analysis combining different expert viewpoints
413
+ - Include interdisciplinary frameworks and innovative approaches
414
+ - Acknowledge complexity while providing actionable, synthesized recommendations
415
+ - Balance broad visionary thinking with deep domain-specific insights
416
+ - Use integrative problem-solving that spans multiple expertise areas
417
+ """
418
  else:
419
+ # Standard single expert preamble
420
+ preamble = f"""You are responding as a {assigned_consultant['title']} with the following profile:
421
+
422
+ EXPERTISE PROFILE:
423
+ - Primary Expertise: {assigned_consultant['primary_expertise']}
424
+ - Core Skills: {', '.join(assigned_consultant['expertise'])}
425
+ - Background: {assigned_consultant['background']}
426
+ - Response Style: {assigned_consultant['style']}
427
+
428
+ ASSIGNMENT RATIONALE: {consultant_assignment['assignment_rationale']}
429
+
430
+ EXPERTISE WEIGHTS: {', '.join([f"{skill}: {weight:.1%}" for skill, weight in consultant_assignment['expertise_weights'].items()])}
431
+
432
+ """
433
+
434
+ if assigned_consultant.get('secondary_expertise'):
435
+ preamble += f"SECONDARY EXPERTISE: {', '.join(assigned_consultant['secondary_expertise'])}\n"
436
+
437
+ preamble += f"""
438
+ KNOWLEDGE DEPTH REQUIREMENT: Provide insights equivalent to a highly experienced, industry-leading {assigned_consultant['title']} with deep domain expertise and practical experience.
439
+
440
+ RESPONSE GUIDELINES:
441
+ - Draw from extensive practical experience in your field
442
+ - Provide industry-specific insights and best practices
443
+ - Include relevant frameworks, methodologies, or tools
444
+ - Acknowledge complexity while remaining actionable
445
+ - Balance theoretical knowledge with real-world application
446
+ """
447
 
448
+ return preamble
449
 
450
+ def _build_synthesis_prompt_with_expert(self, user_input: str, context: Dict[str, Any],
451
+ agent_outputs: List[Dict[str, Any]],
452
+ expert_preamble: str,
453
+ assigned_consultant: Dict[str, Any]) -> str:
454
+ """Build synthesis prompt with expert consultant context"""
455
 
456
+ # Build context section with summarization for long conversations
457
+ context_section = self._build_context_section(context)
 
458
 
459
+ # Build agent outputs section if any
460
+ agent_outputs_section = ""
461
+ if agent_outputs:
462
+ agent_outputs_section = f"\n\nAgent Analysis Results:\n"
463
+ for i, output in enumerate(agent_outputs, 1):
464
+ agent_outputs_section += f"Agent {i}: {output.get('result', output.get('final_response', str(output)))}\n"
 
 
 
 
 
 
 
 
 
 
 
 
 
465
 
466
+ # Construct full prompt
467
+ prompt = f"""{expert_preamble}
468
 
469
+ User Question: {user_input}
 
 
 
470
 
471
+ {context_section}{agent_outputs_section}
 
 
 
 
472
 
473
+ Instructions: Provide a comprehensive, helpful response that directly addresses the question from your expert perspective. If there's conversation context, use it to answer the current question appropriately. Be detailed, informative, and leverage your specialized expertise in {assigned_consultant.get('primary_expertise', 'general consulting')}.
 
 
 
474
 
475
+ Response:"""
 
 
 
 
 
 
 
476
 
477
+ return prompt
478
+
479
+ def _build_context_section(self, context: Dict[str, Any]) -> str:
480
+ """Build context section with summarization for long conversations"""
481
+ if not context:
482
+ return ""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
483
 
484
+ interactions = context.get('interactions', [])
485
+
486
+ if len(interactions) <= 8:
487
+ # Show all interactions for short conversations
488
+ context_section = "\n\nPrevious conversation:\n"
489
+ for i, interaction in enumerate(interactions, 1):
490
+ context_section += f"Q{i}: {interaction.get('user_input', '')}\n"
491
+ if interaction.get('assistant_response') or interaction.get('response'):
492
+ context_section += f"A{i}: {interaction.get('assistant_response') or interaction.get('response', '')}\n\n"
493
  else:
494
+ # Summarize older interactions, show recent ones in full
495
+ recent_interactions = interactions[-8:] # Last 8 interactions
496
+ older_interactions = interactions[:-8] # Everything before last 8
497
+
498
+ # Create summary of older interactions
499
+ summary = self._summarize_interactions(older_interactions)
500
+
501
+ context_section = f"\n\nConversation Summary (earlier context):\n{summary}\n\nRecent conversation details:\n"
502
+
503
+ for i, interaction in enumerate(recent_interactions, 1):
504
+ context_section += f"Q{i}: {interaction.get('user_input', '')}\n"
505
+ if interaction.get('assistant_response') or interaction.get('response'):
506
+ context_section += f"A{i}: {interaction.get('assistant_response') or interaction.get('response', '')}\n\n"
507
+
508
+ return context_section
509
 
510
+ def _summarize_interactions(self, interactions: List[Dict[str, Any]]) -> str:
511
+ """Summarize older interactions to preserve key context"""
512
+ if not interactions:
513
+ return "No prior context."
514
 
515
+ # Extract key topics and themes
516
+ topics = []
517
+ key_points = []
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
518
 
519
+ for interaction in interactions:
520
+ user_input = interaction.get('user_input', '')
521
+ assistant_response = interaction.get('assistant_response') or interaction.get('response', '')
522
+
523
+ # Extract topics (simple keyword extraction)
524
+ if user_input:
525
+ # Simple topic extraction - could be enhanced with NLP
526
+ user_words = user_input.lower().split()
527
+ key_terms = [word for word in user_words if len(word) > 4][:3]
528
+ topics.extend(key_terms)
529
+
530
+ # Extract key points from responses (first sentence)
531
+ if assistant_response:
532
+ first_sentence = assistant_response.split('.')[0][:100]
533
+ if first_sentence:
534
+ key_points.append(first_sentence + "...")
 
 
 
 
 
 
 
 
 
 
 
 
 
535
 
536
+ # Build summary
537
+ unique_topics = list(set(topics))[:5] # Top 5 unique topics
538
+ recent_points = key_points[-3:] # Last 3 key points
539
+
540
+ summary = f"Topics discussed: {', '.join(unique_topics)}\n"
541
+ summary += f"Key points: {' '.join(recent_points)}"
542
+
543
+ return summary
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
544
 
545
+ def _extract_intent_info(self, agent_outputs: List[Dict[str, Any]]) -> Dict[str, Any]:
546
+ """Extract intent information from agent outputs"""
547
+ for output in agent_outputs:
548
+ if 'primary_intent' in output:
549
+ return {
550
+ 'primary_intent': output['primary_intent'],
551
+ 'confidence': output.get('confidence_scores', {}).get(output['primary_intent'], 0.5),
552
+ 'source_agent': output.get('agent_id', 'unknown')
553
+ }
554
+ return None
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
555
 
556
  def _extract_source_references(self, agent_outputs: List[Dict[str, Any]]) -> List[str]:
557
  """Extract source references from agent outputs"""
 
561
  sources.append(agent_id)
562
  return list(set(sources)) # Remove duplicates
563
 
 
 
 
 
 
 
 
 
 
564
  def _calculate_quality_metrics(self, synthesis_result: Dict[str, Any]) -> Dict[str, Any]:
565
  """Calculate quality metrics for synthesis"""
566
  response = synthesis_result.get('final_response', '')
567
 
568
  return {
569
  "length": len(response),
570
+ "word_count": len(response.split()) if response else 0,
571
  "coherence_score": synthesis_result.get('coherence_score', 0.7),
572
  "source_count": len(synthesis_result.get('source_references', [])),
573
+ "has_structured_elements": bool(re.search(r'[β€’\d+\.]', response)) if response else False
574
  }
575
 
576
  def _check_intent_alignment(self, synthesis_result: Dict[str, Any], intent_info: Dict[str, Any]) -> Dict[str, Any]:
577
  """Check if synthesis aligns with detected intent"""
578
+ # Calculate alignment based on intent confidence and response quality
579
+ intent_confidence = intent_info.get('confidence', 0.5)
580
+ coherence_score = synthesis_result.get('coherence_score', 0.7)
581
+ # Alignment is average of intent confidence and coherence
582
+ alignment_score = (intent_confidence + coherence_score) / 2.0
583
 
584
  return {
585
  "intent_detected": intent_info.get('primary_intent'),
 
599
 
600
  return improvements
601
 
602
+ def _get_fallback_response(self, user_input: str, agent_outputs: List[Dict[str, Any]],
603
+ assigned_consultant: Dict[str, Any]) -> Dict[str, Any]:
604
+ """Provide fallback response when synthesis fails (LLM API failure only)"""
605
+ # Only use fallback when LLM API actually fails - not as default
606
+ if user_input:
607
+ fallback_text = f"Thank you for your question: '{user_input}'. I'm processing your request and will provide a detailed response shortly."
608
+ else:
609
+ fallback_text = "I apologize, but I encountered an issue processing your request. Please try again."
610
+
611
+ return {
612
+ "synthesized_response": fallback_text,
613
+ "draft_response": fallback_text,
614
+ "final_response": fallback_text,
615
+ "assigned_consultant": assigned_consultant,
616
+ "source_references": self._extract_source_references(agent_outputs),
617
+ "coherence_score": 0.5,
618
+ "improvement_opportunities": ["LLM API error - fallback activated"],
619
+ "synthesis_method": "expert_enhanced_fallback",
620
+ "agent_id": self.agent_id,
621
+ "synthesis_quality_metrics": self._calculate_quality_metrics({"final_response": fallback_text}),
622
+ "error": True,
623
+ "synthesis_metadata": {"expert_enhanced": True, "error": True, "llm_api_failed": True}
624
+ }
625
+
626
+
627
+ # Backward compatibility: ResponseSynthesisAgent is now EnhancedSynthesisAgent
628
+ ResponseSynthesisAgent = EnhancedSynthesisAgent
 
 
 
 
 
 
629
 
 
 
 
630
 
631
+ # Factory function for compatibility
632
+ def create_synthesis_agent(llm_router) -> EnhancedSynthesisAgent:
633
+ """Factory function to create enhanced synthesis agent"""
634
+ return EnhancedSynthesisAgent(llm_router)
src/orchestrator_engine.py CHANGED
@@ -160,7 +160,8 @@ class MVPOrchestrator:
160
  final_response = await self.agents['response_synthesis'].execute(
161
  agent_outputs=agent_results,
162
  user_input=user_input,
163
- context=context
 
164
  )
165
  self.execution_trace[-1].update({
166
  "status": "completed",
 
160
  final_response = await self.agents['response_synthesis'].execute(
161
  agent_outputs=agent_results,
162
  user_input=user_input,
163
+ context=context,
164
+ skills_result=skills_result
165
  )
166
  self.execution_trace[-1].update({
167
  "status": "completed",