JatsTheAIGen commited on
Commit
7e13c9b
·
1 Parent(s): d771375

Details enhancement V2

Browse files
Files changed (3) hide show
  1. mobile_handlers.py +17 -3
  2. orchestrator_engine.py +175 -35
  3. src/event_handlers.py +20 -1
mobile_handlers.py CHANGED
@@ -50,7 +50,7 @@ class MobileUXHandlers:
50
  # Format for mobile display
51
  formatted_response = self._format_for_mobile(
52
  result['final_response'],
53
- show_reasoning and result.get('reasoning_chain'),
54
  show_agent_trace and result.get('agent_trace')
55
  )
56
 
@@ -60,7 +60,7 @@ class MobileUXHandlers:
60
  yield {
61
  "chatbot": updated_history,
62
  "message_input": "",
63
- "reasoning_display": result.get('reasoning_chain', {}),
64
  "performance_display": result.get('performance_metrics', {})
65
  }
66
 
@@ -92,9 +92,23 @@ class MobileUXHandlers:
92
 
93
  # Add reasoning if requested
94
  if reasoning_chain:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
95
  formatted += f"""
96
  <div class="reasoning-mobile" style="margin-top: 15px; padding: 10px; background: #f5f5f5; border-radius: 8px; font-size: 14px;">
97
- <strong>Reasoning:</strong> {reasoning_chain[:200]}...
98
  </div>
99
  """
100
 
 
50
  # Format for mobile display
51
  formatted_response = self._format_for_mobile(
52
  result['final_response'],
53
+ show_reasoning and result.get('metadata', {}).get('reasoning_chain'),
54
  show_agent_trace and result.get('agent_trace')
55
  )
56
 
 
60
  yield {
61
  "chatbot": updated_history,
62
  "message_input": "",
63
+ "reasoning_display": result.get('metadata', {}).get('reasoning_chain', {}),
64
  "performance_display": result.get('performance_metrics', {})
65
  }
66
 
 
92
 
93
  # Add reasoning if requested
94
  if reasoning_chain:
95
+ # Handle both old and new reasoning chain formats
96
+ if isinstance(reasoning_chain, dict):
97
+ # New enhanced format - extract key information
98
+ chain_of_thought = reasoning_chain.get('chain_of_thought', {})
99
+ if chain_of_thought:
100
+ first_step = list(chain_of_thought.values())[0] if chain_of_thought else {}
101
+ hypothesis = first_step.get('hypothesis', 'Processing...')
102
+ reasoning_text = f"Hypothesis: {hypothesis}"
103
+ else:
104
+ reasoning_text = "Enhanced reasoning chain available"
105
+ else:
106
+ # Old format - direct string
107
+ reasoning_text = str(reasoning_chain)[:200]
108
+
109
  formatted += f"""
110
  <div class="reasoning-mobile" style="margin-top: 15px; padding: 10px; background: #f5f5f5; border-radius: 8px; font-size: 14px;">
111
+ <strong>Reasoning:</strong> {reasoning_text}...
112
  </div>
113
  """
114
 
orchestrator_engine.py CHANGED
@@ -46,14 +46,15 @@ class MVPOrchestrator:
46
 
47
  # Add context analysis to reasoning chain
48
  reasoning_chain["chain_of_thought"]["step_1"] = {
49
- "hypothesis": "Analyzing conversation context and user history",
50
  "evidence": [
51
  f"Previous interactions: {len(context.get('interactions', []))}",
52
  f"Session duration: {self._calculate_session_duration(context)}",
53
- f"Topic continuity: {self._analyze_topic_continuity(context, user_input)}"
 
54
  ],
55
  "confidence": 0.85,
56
- "reasoning": "Context analysis provides foundation for intent recognition and response personalization"
57
  }
58
 
59
  # Step 3: Intent recognition with enhanced CoT
@@ -75,14 +76,15 @@ class MVPOrchestrator:
75
 
76
  # Add intent reasoning to chain
77
  reasoning_chain["chain_of_thought"]["step_2"] = {
78
- "hypothesis": f"User intent is '{intent_result.get('primary_intent', 'unknown')}'",
79
  "evidence": [
80
  f"Pattern analysis: {self._extract_pattern_evidence(user_input)}",
81
  f"Confidence scores: {intent_result.get('confidence_scores', {})}",
82
- f"Secondary intents: {intent_result.get('secondary_intents', [])}"
 
83
  ],
84
  "confidence": intent_result.get('confidence_scores', {}).get(intent_result.get('primary_intent', 'unknown'), 0.7),
85
- "reasoning": f"Intent recognition based on linguistic patterns and context analysis"
86
  }
87
 
88
  # Step 4: Agent execution planning with reasoning
@@ -91,14 +93,15 @@ class MVPOrchestrator:
91
 
92
  # Add execution planning reasoning
93
  reasoning_chain["chain_of_thought"]["step_3"] = {
94
- "hypothesis": f"Optimal agent sequence for '{intent_result.get('primary_intent', 'unknown')}' intent",
95
  "evidence": [
96
  f"Intent complexity: {self._assess_intent_complexity(intent_result)}",
97
  f"Required agents: {execution_plan.get('agents_to_execute', [])}",
98
- f"Execution strategy: {execution_plan.get('execution_order', 'sequential')}"
 
99
  ],
100
  "confidence": 0.80,
101
- "reasoning": "Agent selection based on intent requirements and system capabilities"
102
  }
103
 
104
  # Step 5: Parallel agent execution
@@ -125,14 +128,15 @@ class MVPOrchestrator:
125
 
126
  # Add synthesis reasoning
127
  reasoning_chain["chain_of_thought"]["step_4"] = {
128
- "hypothesis": f"Response synthesis using '{final_response.get('synthesis_method', 'unknown')}' method",
129
  "evidence": [
130
  f"Synthesis quality: {final_response.get('coherence_score', 0.7)}",
131
  f"Source integration: {len(final_response.get('source_references', []))} sources",
132
- f"Response length: {len(str(final_response.get('final_response', '')))} characters"
 
133
  ],
134
  "confidence": final_response.get('coherence_score', 0.7),
135
- "reasoning": "Multi-source information integration with quality optimization"
136
  }
137
 
138
  # Step 7: Safety and bias check with reasoning
@@ -153,14 +157,15 @@ class MVPOrchestrator:
153
 
154
  # Add safety reasoning
155
  reasoning_chain["chain_of_thought"]["step_5"] = {
156
- "hypothesis": "Response passes safety and bias checks",
157
  "evidence": [
158
  f"Safety score: {safety_checked.get('safety_analysis', {}).get('overall_safety_score', 0.8)}",
159
  f"Warnings generated: {len(safety_checked.get('warnings', []))}",
160
- f"Analysis method: {safety_checked.get('safety_analysis', {}).get('analysis_method', 'unknown')}"
 
161
  ],
162
  "confidence": safety_checked.get('safety_analysis', {}).get('overall_safety_score', 0.8),
163
- "reasoning": "Comprehensive safety analysis with non-blocking warning system"
164
  }
165
 
166
  # Generate alternative paths and uncertainty analysis
@@ -384,34 +389,43 @@ class MVPOrchestrator:
384
  return "Complex, multi-faceted intent"
385
 
386
  def _generate_alternative_paths(self, intent_result: dict, user_input: str) -> list:
387
- """Generate alternative reasoning paths"""
388
  primary_intent = intent_result.get('primary_intent', 'unknown')
389
  secondary_intents = intent_result.get('secondary_intents', [])
 
390
 
391
  alternative_paths = []
392
 
393
  # Add secondary intents as alternative paths
394
  for secondary_intent in secondary_intents:
395
  alternative_paths.append({
396
- "path": f"Alternative intent: {secondary_intent}",
397
- "reasoning": f"Could interpret as {secondary_intent} based on linguistic patterns",
398
  "confidence": intent_result.get('confidence_scores', {}).get(secondary_intent, 0.3),
399
- "rejected_reason": f"Primary intent '{primary_intent}' has higher confidence"
400
  })
401
 
402
- # Add method-based alternatives
403
- if primary_intent == "information_request":
404
  alternative_paths.append({
405
- "path": "Technical deep-dive approach",
406
- "reasoning": "Could provide mathematical foundations and technical details",
407
  "confidence": 0.6,
408
- "rejected_reason": "User level suggests conceptual approach"
 
 
 
 
 
 
 
 
409
  })
410
 
411
  return alternative_paths
412
 
413
  def _identify_uncertainty_areas(self, intent_result: dict, final_response: dict, safety_checked: dict) -> list:
414
- """Identify areas of uncertainty in the reasoning"""
415
  uncertainty_areas = []
416
 
417
  # Intent uncertainty
@@ -419,33 +433,42 @@ class MVPOrchestrator:
419
  confidence = intent_result.get('confidence_scores', {}).get(primary_intent, 0.5)
420
  if confidence < 0.8:
421
  uncertainty_areas.append({
422
- "aspect": f"Intent classification ({primary_intent})",
423
  "confidence": confidence,
424
- "mitigation": "Provided multiple interpretation options"
425
  })
426
 
427
  # Response quality uncertainty
428
  coherence_score = final_response.get('coherence_score', 0.7)
429
  if coherence_score < 0.8:
430
  uncertainty_areas.append({
431
- "aspect": "Response coherence and structure",
432
  "confidence": coherence_score,
433
- "mitigation": "Applied quality enhancement techniques"
434
  })
435
 
436
  # Safety uncertainty
437
  safety_score = safety_checked.get('safety_analysis', {}).get('overall_safety_score', 0.8)
438
  if safety_score < 0.9:
439
  uncertainty_areas.append({
440
- "aspect": "Content safety and bias assessment",
441
  "confidence": safety_score,
442
- "mitigation": "Generated advisory warnings for user awareness"
 
 
 
 
 
 
 
 
 
443
  })
444
 
445
  return uncertainty_areas
446
 
447
  def _extract_evidence_sources(self, intent_result: dict, final_response: dict, context: dict) -> list:
448
- """Extract evidence sources for reasoning"""
449
  evidence_sources = []
450
 
451
  # Intent evidence
@@ -453,7 +476,7 @@ class MVPOrchestrator:
453
  "type": "linguistic_analysis",
454
  "source": "Pattern matching and NLP analysis",
455
  "relevance": 0.9,
456
- "description": "Intent classification based on linguistic patterns"
457
  })
458
 
459
  # Context evidence
@@ -463,7 +486,7 @@ class MVPOrchestrator:
463
  "type": "conversation_history",
464
  "source": f"Previous {len(interactions)} interactions",
465
  "relevance": 0.7,
466
- "description": "Conversation context and topic continuity"
467
  })
468
 
469
  # Synthesis evidence
@@ -472,9 +495,19 @@ class MVPOrchestrator:
472
  "type": "synthesis_method",
473
  "source": f"{synthesis_method} approach",
474
  "relevance": 0.8,
475
- "description": f"Response generated using {synthesis_method} methodology"
476
  })
477
 
 
 
 
 
 
 
 
 
 
 
478
  return evidence_sources
479
 
480
  def _calibrate_confidence_scores(self, reasoning_chain: dict) -> dict:
@@ -499,4 +532,111 @@ class MVPOrchestrator:
499
  },
500
  "calibration_method": "Weighted average of step confidences"
501
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
502
 
 
46
 
47
  # Add context analysis to reasoning chain
48
  reasoning_chain["chain_of_thought"]["step_1"] = {
49
+ "hypothesis": f"User is asking about: '{self._extract_main_topic(user_input)}'",
50
  "evidence": [
51
  f"Previous interactions: {len(context.get('interactions', []))}",
52
  f"Session duration: {self._calculate_session_duration(context)}",
53
+ f"Topic continuity: {self._analyze_topic_continuity(context, user_input)}",
54
+ f"Query keywords: {self._extract_keywords(user_input)}"
55
  ],
56
  "confidence": 0.85,
57
+ "reasoning": f"Context analysis shows user is focused on {self._extract_main_topic(user_input)} with {len(context.get('interactions', []))} previous interactions"
58
  }
59
 
60
  # Step 3: Intent recognition with enhanced CoT
 
76
 
77
  # Add intent reasoning to chain
78
  reasoning_chain["chain_of_thought"]["step_2"] = {
79
+ "hypothesis": f"User intent is '{intent_result.get('primary_intent', 'unknown')}' for topic '{self._extract_main_topic(user_input)}'",
80
  "evidence": [
81
  f"Pattern analysis: {self._extract_pattern_evidence(user_input)}",
82
  f"Confidence scores: {intent_result.get('confidence_scores', {})}",
83
+ f"Secondary intents: {intent_result.get('secondary_intents', [])}",
84
+ f"Query complexity: {self._assess_query_complexity(user_input)}"
85
  ],
86
  "confidence": intent_result.get('confidence_scores', {}).get(intent_result.get('primary_intent', 'unknown'), 0.7),
87
+ "reasoning": f"Intent '{intent_result.get('primary_intent', 'unknown')}' detected for {self._extract_main_topic(user_input)} based on linguistic patterns and context"
88
  }
89
 
90
  # Step 4: Agent execution planning with reasoning
 
93
 
94
  # Add execution planning reasoning
95
  reasoning_chain["chain_of_thought"]["step_3"] = {
96
+ "hypothesis": f"Optimal approach for '{intent_result.get('primary_intent', 'unknown')}' intent on '{self._extract_main_topic(user_input)}'",
97
  "evidence": [
98
  f"Intent complexity: {self._assess_intent_complexity(intent_result)}",
99
  f"Required agents: {execution_plan.get('agents_to_execute', [])}",
100
+ f"Execution strategy: {execution_plan.get('execution_order', 'sequential')}",
101
+ f"Response scope: {self._determine_response_scope(user_input)}"
102
  ],
103
  "confidence": 0.80,
104
+ "reasoning": f"Agent selection optimized for {intent_result.get('primary_intent', 'unknown')} intent regarding {self._extract_main_topic(user_input)}"
105
  }
106
 
107
  # Step 5: Parallel agent execution
 
128
 
129
  # Add synthesis reasoning
130
  reasoning_chain["chain_of_thought"]["step_4"] = {
131
+ "hypothesis": f"Response synthesis for '{self._extract_main_topic(user_input)}' using '{final_response.get('synthesis_method', 'unknown')}' method",
132
  "evidence": [
133
  f"Synthesis quality: {final_response.get('coherence_score', 0.7)}",
134
  f"Source integration: {len(final_response.get('source_references', []))} sources",
135
+ f"Response length: {len(str(final_response.get('final_response', '')))} characters",
136
+ f"Content relevance: {self._assess_content_relevance(user_input, final_response)}"
137
  ],
138
  "confidence": final_response.get('coherence_score', 0.7),
139
+ "reasoning": f"Multi-source synthesis for {self._extract_main_topic(user_input)} using {final_response.get('synthesis_method', 'unknown')} approach"
140
  }
141
 
142
  # Step 7: Safety and bias check with reasoning
 
157
 
158
  # Add safety reasoning
159
  reasoning_chain["chain_of_thought"]["step_5"] = {
160
+ "hypothesis": f"Safety validation for response about '{self._extract_main_topic(user_input)}'",
161
  "evidence": [
162
  f"Safety score: {safety_checked.get('safety_analysis', {}).get('overall_safety_score', 0.8)}",
163
  f"Warnings generated: {len(safety_checked.get('warnings', []))}",
164
+ f"Analysis method: {safety_checked.get('safety_analysis', {}).get('analysis_method', 'unknown')}",
165
+ f"Content appropriateness: {self._assess_content_appropriateness(user_input, safety_checked)}"
166
  ],
167
  "confidence": safety_checked.get('safety_analysis', {}).get('overall_safety_score', 0.8),
168
+ "reasoning": f"Safety analysis for {self._extract_main_topic(user_input)} content with non-blocking warning system"
169
  }
170
 
171
  # Generate alternative paths and uncertainty analysis
 
389
  return "Complex, multi-faceted intent"
390
 
391
  def _generate_alternative_paths(self, intent_result: dict, user_input: str) -> list:
392
+ """Generate alternative reasoning paths based on actual content"""
393
  primary_intent = intent_result.get('primary_intent', 'unknown')
394
  secondary_intents = intent_result.get('secondary_intents', [])
395
+ main_topic = self._extract_main_topic(user_input)
396
 
397
  alternative_paths = []
398
 
399
  # Add secondary intents as alternative paths
400
  for secondary_intent in secondary_intents:
401
  alternative_paths.append({
402
+ "path": f"Alternative intent: {secondary_intent} for {main_topic}",
403
+ "reasoning": f"Could interpret as {secondary_intent} based on linguistic patterns in the query about {main_topic}",
404
  "confidence": intent_result.get('confidence_scores', {}).get(secondary_intent, 0.3),
405
+ "rejected_reason": f"Primary intent '{primary_intent}' has higher confidence for {main_topic} topic"
406
  })
407
 
408
+ # Add method-based alternatives based on content
409
+ if 'curriculum' in user_input.lower() or 'course' in user_input.lower():
410
  alternative_paths.append({
411
+ "path": "Structured educational framework approach",
412
+ "reasoning": f"Could provide a more structured educational framework for {main_topic}",
413
  "confidence": 0.6,
414
+ "rejected_reason": f"Current approach better matches user's specific request for {main_topic}"
415
+ })
416
+
417
+ if 'detailed' in user_input.lower() or 'comprehensive' in user_input.lower():
418
+ alternative_paths.append({
419
+ "path": "High-level overview approach",
420
+ "reasoning": f"Could provide a high-level overview instead of detailed content for {main_topic}",
421
+ "confidence": 0.4,
422
+ "rejected_reason": f"User specifically requested detailed information about {main_topic}"
423
  })
424
 
425
  return alternative_paths
426
 
427
  def _identify_uncertainty_areas(self, intent_result: dict, final_response: dict, safety_checked: dict) -> list:
428
+ """Identify areas of uncertainty in the reasoning based on actual content"""
429
  uncertainty_areas = []
430
 
431
  # Intent uncertainty
 
433
  confidence = intent_result.get('confidence_scores', {}).get(primary_intent, 0.5)
434
  if confidence < 0.8:
435
  uncertainty_areas.append({
436
+ "aspect": f"Intent classification ({primary_intent}) for user's specific request",
437
  "confidence": confidence,
438
+ "mitigation": "Provided multiple interpretation options and context-aware analysis"
439
  })
440
 
441
  # Response quality uncertainty
442
  coherence_score = final_response.get('coherence_score', 0.7)
443
  if coherence_score < 0.8:
444
  uncertainty_areas.append({
445
+ "aspect": "Response coherence and structure for the specific topic",
446
  "confidence": coherence_score,
447
+ "mitigation": "Applied quality enhancement techniques and content relevance checks"
448
  })
449
 
450
  # Safety uncertainty
451
  safety_score = safety_checked.get('safety_analysis', {}).get('overall_safety_score', 0.8)
452
  if safety_score < 0.9:
453
  uncertainty_areas.append({
454
+ "aspect": "Content safety and bias assessment for educational content",
455
  "confidence": safety_score,
456
+ "mitigation": "Generated advisory warnings for user awareness and content appropriateness"
457
+ })
458
+
459
+ # Content relevance uncertainty
460
+ response_text = str(final_response.get('final_response', ''))
461
+ if len(response_text) < 100: # Very short response
462
+ uncertainty_areas.append({
463
+ "aspect": "Response completeness for user's detailed request",
464
+ "confidence": 0.6,
465
+ "mitigation": "Enhanced response generation with topic-specific content"
466
  })
467
 
468
  return uncertainty_areas
469
 
470
  def _extract_evidence_sources(self, intent_result: dict, final_response: dict, context: dict) -> list:
471
+ """Extract evidence sources for reasoning based on actual content"""
472
  evidence_sources = []
473
 
474
  # Intent evidence
 
476
  "type": "linguistic_analysis",
477
  "source": "Pattern matching and NLP analysis",
478
  "relevance": 0.9,
479
+ "description": f"Intent classification based on linguistic patterns for '{intent_result.get('primary_intent', 'unknown')}' intent"
480
  })
481
 
482
  # Context evidence
 
486
  "type": "conversation_history",
487
  "source": f"Previous {len(interactions)} interactions",
488
  "relevance": 0.7,
489
+ "description": f"Conversation context and topic continuity analysis"
490
  })
491
 
492
  # Synthesis evidence
 
495
  "type": "synthesis_method",
496
  "source": f"{synthesis_method} approach",
497
  "relevance": 0.8,
498
+ "description": f"Response generated using {synthesis_method} methodology with quality optimization"
499
  })
500
 
501
+ # Content-specific evidence
502
+ response_text = str(final_response.get('final_response', ''))
503
+ if len(response_text) > 1000:
504
+ evidence_sources.append({
505
+ "type": "content_analysis",
506
+ "source": "Comprehensive content generation",
507
+ "relevance": 0.85,
508
+ "description": "Detailed response generation based on user's specific requirements"
509
+ })
510
+
511
  return evidence_sources
512
 
513
  def _calibrate_confidence_scores(self, reasoning_chain: dict) -> dict:
 
532
  },
533
  "calibration_method": "Weighted average of step confidences"
534
  }
535
+
536
+ def _extract_main_topic(self, user_input: str) -> str:
537
+ """Extract the main topic from user input for context-aware reasoning"""
538
+ input_lower = user_input.lower()
539
+
540
+ # Topic extraction based on keywords
541
+ if any(word in input_lower for word in ['curriculum', 'course', 'teach', 'learning', 'education']):
542
+ if 'ai' in input_lower or 'chatbot' in input_lower or 'assistant' in input_lower:
543
+ return "AI chatbot course curriculum"
544
+ elif 'programming' in input_lower or 'python' in input_lower:
545
+ return "Programming course curriculum"
546
+ else:
547
+ return "Educational course design"
548
+
549
+ elif any(word in input_lower for word in ['machine learning', 'ml', 'neural network', 'deep learning']):
550
+ return "Machine learning concepts"
551
+
552
+ elif any(word in input_lower for word in ['ai', 'artificial intelligence', 'chatbot', 'assistant']):
553
+ return "Artificial intelligence and chatbots"
554
+
555
+ elif any(word in input_lower for word in ['data science', 'data analysis', 'analytics']):
556
+ return "Data science and analysis"
557
+
558
+ elif any(word in input_lower for word in ['programming', 'coding', 'development', 'software']):
559
+ return "Software development and programming"
560
+
561
+ else:
562
+ # Extract first few words as topic
563
+ words = user_input.split()[:4]
564
+ return " ".join(words) if words else "General inquiry"
565
+
566
+ def _extract_keywords(self, user_input: str) -> str:
567
+ """Extract key terms from user input"""
568
+ input_lower = user_input.lower()
569
+ keywords = []
570
+
571
+ # Extract important terms
572
+ important_terms = [
573
+ 'curriculum', 'course', 'teach', 'learning', 'education',
574
+ 'ai', 'artificial intelligence', 'chatbot', 'assistant',
575
+ 'machine learning', 'ml', 'neural network', 'deep learning',
576
+ 'programming', 'python', 'development', 'software',
577
+ 'data science', 'analytics', 'analysis'
578
+ ]
579
+
580
+ for term in important_terms:
581
+ if term in input_lower:
582
+ keywords.append(term)
583
+
584
+ return ", ".join(keywords[:5]) if keywords else "General terms"
585
+
586
+ def _assess_query_complexity(self, user_input: str) -> str:
587
+ """Assess the complexity of the user query"""
588
+ word_count = len(user_input.split())
589
+ question_count = user_input.count('?')
590
+
591
+ if word_count > 50 and question_count > 2:
592
+ return "Highly complex multi-part query"
593
+ elif word_count > 30 and question_count > 1:
594
+ return "Moderately complex query"
595
+ elif word_count > 15:
596
+ return "Standard complexity query"
597
+ else:
598
+ return "Simple query"
599
+
600
+ def _determine_response_scope(self, user_input: str) -> str:
601
+ """Determine the scope of response needed"""
602
+ input_lower = user_input.lower()
603
+
604
+ if any(word in input_lower for word in ['detailed', 'comprehensive', 'complete', 'full']):
605
+ return "Comprehensive detailed response"
606
+ elif any(word in input_lower for word in ['brief', 'short', 'summary', 'overview']):
607
+ return "Brief summary response"
608
+ elif any(word in input_lower for word in ['step by step', 'tutorial', 'guide', 'how to']):
609
+ return "Step-by-step instructional response"
610
+ else:
611
+ return "Standard informative response"
612
+
613
+ def _assess_content_relevance(self, user_input: str, final_response: dict) -> str:
614
+ """Assess how relevant the response content is to the user input"""
615
+ response_text = str(final_response.get('final_response', ''))
616
+
617
+ # Simple relevance check based on keyword overlap
618
+ input_words = set(user_input.lower().split())
619
+ response_words = set(response_text.lower().split())
620
+
621
+ overlap = len(input_words.intersection(response_words))
622
+ total_input_words = len(input_words)
623
+
624
+ if overlap / total_input_words > 0.3:
625
+ return "High relevance to user query"
626
+ elif overlap / total_input_words > 0.15:
627
+ return "Moderate relevance to user query"
628
+ else:
629
+ return "Low relevance to user query"
630
+
631
+ def _assess_content_appropriateness(self, user_input: str, safety_checked: dict) -> str:
632
+ """Assess content appropriateness for the topic"""
633
+ warnings = safety_checked.get('warnings', [])
634
+ safety_score = safety_checked.get('safety_analysis', {}).get('overall_safety_score', 0.8)
635
+
636
+ if safety_score > 0.9 and len(warnings) == 0:
637
+ return "Highly appropriate content"
638
+ elif safety_score > 0.8 and len(warnings) <= 1:
639
+ return "Appropriate content with minor notes"
640
+ else:
641
+ return "Content requires review"
642
 
src/event_handlers.py CHANGED
@@ -43,7 +43,26 @@ class EventHandlers:
43
  performance_data = {}
44
 
45
  if show_reasoning:
46
- reasoning_data = {"reasoning": "Mock reasoning chain for demonstration"}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
47
 
48
  if show_agent_trace:
49
  performance_data = {"agents_used": ["intent", "synthesis", "safety"]}
 
43
  performance_data = {}
44
 
45
  if show_reasoning:
46
+ reasoning_data = {
47
+ "chain_of_thought": {
48
+ "step_1": {
49
+ "hypothesis": "Mock reasoning for demonstration",
50
+ "evidence": ["Mock mode active", f"User input: {message[:50]}..."],
51
+ "confidence": 0.5,
52
+ "reasoning": "Demonstration mode - enhanced reasoning chain not available"
53
+ }
54
+ },
55
+ "alternative_paths": [],
56
+ "uncertainty_areas": [
57
+ {
58
+ "aspect": "System mode",
59
+ "confidence": 0.5,
60
+ "mitigation": "Mock mode - full reasoning chain not available"
61
+ }
62
+ ],
63
+ "evidence_sources": [],
64
+ "confidence_calibration": {"overall_confidence": 0.5, "mock_mode": True}
65
+ }
66
 
67
  if show_agent_trace:
68
  performance_data = {"agents_used": ["intent", "synthesis", "safety"]}