|
|
|
|
|
""" |
|
|
Integration script to add Process Flow Visualization to the Research Assistant UI |
|
|
This script modifies the existing app.py to include the process flow tab |
|
|
""" |
|
|
|
|
|
import gradio as gr |
|
|
import logging |
|
|
from typing import Dict, Any, List, Tuple |
|
|
import time |
|
|
from process_flow_visualizer import ( |
|
|
create_process_flow_tab, |
|
|
update_process_flow_visualization, |
|
|
clear_flow_history, |
|
|
export_flow_data |
|
|
) |
|
|
|
|
|
logger = logging.getLogger(__name__) |
|
|
|
|
|
def integrate_process_flow_into_app(): |
|
|
""" |
|
|
Integrate process flow visualization into the existing app structure |
|
|
""" |
|
|
|
|
|
|
|
|
def create_mobile_optimized_interface_with_flow(): |
|
|
"""Create the mobile-optimized Gradio interface with Process Flow tab""" |
|
|
|
|
|
interface_components = {} |
|
|
|
|
|
with gr.Blocks( |
|
|
title="AI Research Assistant MVP", |
|
|
theme=gr.themes.Soft( |
|
|
primary_hue="blue", |
|
|
secondary_hue="gray", |
|
|
font=("Inter", "system-ui", "sans-serif") |
|
|
), |
|
|
css=""" |
|
|
/* Mobile-first responsive CSS */ |
|
|
.mobile-container { |
|
|
max-width: 100vw; |
|
|
margin: 0 auto; |
|
|
padding: 0 12px; |
|
|
} |
|
|
|
|
|
/* Touch-friendly button sizing */ |
|
|
.gradio-button { |
|
|
min-height: 44px !important; |
|
|
min-width: 44px !important; |
|
|
font-size: 16px !important; |
|
|
} |
|
|
|
|
|
/* Mobile-optimized chat interface */ |
|
|
.chatbot-container { |
|
|
height: 60vh !important; |
|
|
max-height: 60vh !important; |
|
|
overflow-y: auto !important; |
|
|
-webkit-overflow-scrolling: touch !important; |
|
|
} |
|
|
|
|
|
/* Process Flow specific styles */ |
|
|
.process-flow-container { |
|
|
font-family: 'Inter', system-ui, sans-serif; |
|
|
background: linear-gradient(135deg, #f5f7fa 0%, #c3cfe2 100%); |
|
|
border-radius: 12px; |
|
|
padding: 20px; |
|
|
margin: 10px 0; |
|
|
box-shadow: 0 4px 6px rgba(0, 0, 0, 0.1); |
|
|
} |
|
|
|
|
|
.flow-step { |
|
|
display: flex; |
|
|
align-items: center; |
|
|
margin: 15px 0; |
|
|
padding: 12px; |
|
|
background: rgba(255, 255, 255, 0.8); |
|
|
border-radius: 8px; |
|
|
border-left: 4px solid #4CAF50; |
|
|
transition: all 0.3s ease; |
|
|
} |
|
|
|
|
|
.flow-step:hover { |
|
|
transform: translateX(5px); |
|
|
box-shadow: 0 2px 8px rgba(0, 0, 0, 0.15); |
|
|
} |
|
|
|
|
|
.metrics-grid { |
|
|
display: grid; |
|
|
grid-template-columns: repeat(auto-fit, minmax(200px, 1fr)); |
|
|
gap: 15px; |
|
|
margin-top: 20px; |
|
|
} |
|
|
|
|
|
.metric-card { |
|
|
background: rgba(255, 255, 255, 0.9); |
|
|
padding: 15px; |
|
|
border-radius: 8px; |
|
|
text-align: center; |
|
|
border-top: 3px solid #3498db; |
|
|
} |
|
|
|
|
|
/* Mobile input enhancements */ |
|
|
.textbox-input { |
|
|
font-size: 16px !important; |
|
|
min-height: 44px !important; |
|
|
padding: 12px !important; |
|
|
} |
|
|
|
|
|
/* Responsive grid adjustments */ |
|
|
@media (max-width: 768px) { |
|
|
.gradio-row { |
|
|
flex-direction: column !important; |
|
|
gap: 8px !important; |
|
|
} |
|
|
|
|
|
.gradio-column { |
|
|
width: 100% !important; |
|
|
} |
|
|
|
|
|
.chatbot-container { |
|
|
height: 50vh !important; |
|
|
} |
|
|
|
|
|
.flow-step { |
|
|
flex-direction: column; |
|
|
text-align: center; |
|
|
} |
|
|
|
|
|
.metrics-grid { |
|
|
grid-template-columns: 1fr; |
|
|
} |
|
|
} |
|
|
|
|
|
/* Dark mode support */ |
|
|
@media (prefers-color-scheme: dark) { |
|
|
body { |
|
|
background: #1a1a1a; |
|
|
color: #ffffff; |
|
|
} |
|
|
} |
|
|
|
|
|
/* Hide scrollbars but maintain functionality */ |
|
|
.chatbot-container::-webkit-scrollbar { |
|
|
width: 4px; |
|
|
} |
|
|
|
|
|
/* Loading states */ |
|
|
.loading-indicator { |
|
|
display: flex; |
|
|
align-items: center; |
|
|
justify-content: center; |
|
|
padding: 20px; |
|
|
} |
|
|
|
|
|
/* Mobile menu enhancements */ |
|
|
.accordion-content { |
|
|
max-height: 200px !important; |
|
|
overflow-y: auto !important; |
|
|
} |
|
|
""" |
|
|
) as demo: |
|
|
|
|
|
|
|
|
with gr.Column(elem_classes="mobile-container"): |
|
|
gr.Markdown(""" |
|
|
# 🧠 Research Assistant |
|
|
*Academic AI with transparent reasoning* |
|
|
""") |
|
|
|
|
|
|
|
|
with gr.Row(): |
|
|
session_info = gr.Textbox( |
|
|
label="Session ID", |
|
|
value=str(uuid.uuid4())[:8], |
|
|
max_lines=1, |
|
|
show_label=False, |
|
|
container=False, |
|
|
scale=3 |
|
|
) |
|
|
interface_components['session_info'] = session_info |
|
|
|
|
|
new_session_btn = gr.Button( |
|
|
"🔄 New", |
|
|
size="sm", |
|
|
variant="secondary", |
|
|
scale=1, |
|
|
min_width=60 |
|
|
) |
|
|
interface_components['new_session_btn'] = new_session_btn |
|
|
|
|
|
menu_toggle = gr.Button( |
|
|
"⚙️", |
|
|
size="sm", |
|
|
variant="secondary", |
|
|
scale=1, |
|
|
min_width=60 |
|
|
) |
|
|
interface_components['menu_toggle'] = menu_toggle |
|
|
|
|
|
|
|
|
with gr.Tabs() as main_tabs: |
|
|
with gr.TabItem("💬 Chat", id="chat_tab"): |
|
|
chatbot = gr.Chatbot( |
|
|
label="", |
|
|
show_label=False, |
|
|
height="60vh", |
|
|
elem_classes="chatbot-container", |
|
|
type="messages" |
|
|
) |
|
|
interface_components['chatbot'] = chatbot |
|
|
|
|
|
|
|
|
with gr.Row(): |
|
|
message_input = gr.Textbox( |
|
|
placeholder="Ask me anything...", |
|
|
show_label=False, |
|
|
max_lines=3, |
|
|
container=False, |
|
|
scale=4, |
|
|
autofocus=True |
|
|
) |
|
|
interface_components['message_input'] = message_input |
|
|
|
|
|
send_btn = gr.Button( |
|
|
"↑ Send", |
|
|
variant="primary", |
|
|
scale=1, |
|
|
min_width=80 |
|
|
) |
|
|
interface_components['send_btn'] = send_btn |
|
|
|
|
|
|
|
|
with gr.TabItem("🔍 Details", id="details_tab"): |
|
|
with gr.Accordion("Reasoning Chain", open=False): |
|
|
reasoning_display = gr.JSON( |
|
|
label="", |
|
|
show_label=False |
|
|
) |
|
|
interface_components['reasoning_display'] = reasoning_display |
|
|
|
|
|
with gr.Accordion("Agent Performance", open=False): |
|
|
performance_display = gr.JSON( |
|
|
label="", |
|
|
show_label=False |
|
|
) |
|
|
interface_components['performance_display'] = performance_display |
|
|
|
|
|
with gr.Accordion("Session Context", open=False): |
|
|
context_display = gr.JSON( |
|
|
label="", |
|
|
show_label=False |
|
|
) |
|
|
interface_components['context_display'] = context_display |
|
|
|
|
|
|
|
|
process_flow_tab = create_process_flow_tab(interface_components) |
|
|
interface_components['process_flow_tab'] = process_flow_tab |
|
|
|
|
|
|
|
|
with gr.Row(visible=False, elem_id="mobile_nav") as mobile_navigation: |
|
|
chat_nav_btn = gr.Button("💬 Chat", variant="secondary", size="sm", min_width=0) |
|
|
details_nav_btn = gr.Button("🔍 Details", variant="secondary", size="sm", min_width=0) |
|
|
flow_nav_btn = gr.Button("🔄 Flow", variant="secondary", size="sm", min_width=0) |
|
|
settings_nav_btn = gr.Button("⚙️ Settings", variant="secondary", size="sm", min_width=0) |
|
|
|
|
|
interface_components['mobile_navigation'] = mobile_navigation |
|
|
interface_components['flow_nav_btn'] = flow_nav_btn |
|
|
|
|
|
|
|
|
with gr.Column(visible=False, elem_id="settings_panel") as settings: |
|
|
interface_components['settings_panel'] = settings |
|
|
|
|
|
with gr.Accordion("Display Options", open=True): |
|
|
show_reasoning = gr.Checkbox( |
|
|
label="Show reasoning chain", |
|
|
value=True, |
|
|
info="Display step-by-step reasoning" |
|
|
) |
|
|
interface_components['show_reasoning'] = show_reasoning |
|
|
|
|
|
show_agent_trace = gr.Checkbox( |
|
|
label="Show agent execution trace", |
|
|
value=False, |
|
|
info="Display which agents processed your request" |
|
|
) |
|
|
interface_components['show_agent_trace'] = show_agent_trace |
|
|
|
|
|
show_process_flow = gr.Checkbox( |
|
|
label="Show process flow visualization", |
|
|
value=True, |
|
|
info="Display detailed LLM inference and agent execution flow" |
|
|
) |
|
|
interface_components['show_process_flow'] = show_process_flow |
|
|
|
|
|
compact_mode = gr.Checkbox( |
|
|
label="Compact mode", |
|
|
value=False, |
|
|
info="Optimize for smaller screens" |
|
|
) |
|
|
interface_components['compact_mode'] = compact_mode |
|
|
|
|
|
with gr.Accordion("Performance Options", open=False): |
|
|
response_speed = gr.Radio( |
|
|
choices=["Fast", "Balanced", "Thorough"], |
|
|
value="Balanced", |
|
|
label="Response Speed Preference" |
|
|
) |
|
|
interface_components['response_speed'] = response_speed |
|
|
|
|
|
cache_enabled = gr.Checkbox( |
|
|
label="Enable context caching", |
|
|
value=True, |
|
|
info="Faster responses using session memory" |
|
|
) |
|
|
interface_components['cache_enabled'] = cache_enabled |
|
|
|
|
|
save_prefs_btn = gr.Button("Save Preferences", variant="primary") |
|
|
interface_components['save_prefs_btn'] = save_prefs_btn |
|
|
|
|
|
return demo, interface_components |
|
|
|
|
|
return create_mobile_optimized_interface_with_flow |
|
|
|
|
|
def create_enhanced_chat_handler(): |
|
|
""" |
|
|
Create enhanced chat handler that includes process flow visualization |
|
|
""" |
|
|
|
|
|
async def enhanced_chat_handler(message: str, history: List, session_id: str, |
|
|
show_reasoning: bool, show_agent_trace: bool, |
|
|
show_process_flow: bool, request: gr.Request) -> Tuple: |
|
|
""" |
|
|
Enhanced chat handler with process flow visualization |
|
|
""" |
|
|
start_time = time.time() |
|
|
|
|
|
try: |
|
|
|
|
|
from app import process_message_async |
|
|
|
|
|
|
|
|
result = await process_message_async(message, history, session_id) |
|
|
|
|
|
|
|
|
updated_history, empty_string, reasoning_data, performance_data, context_data, session_id = result |
|
|
|
|
|
|
|
|
processing_time = time.time() - start_time |
|
|
|
|
|
|
|
|
flow_updates = {} |
|
|
if show_process_flow: |
|
|
|
|
|
intent_result = { |
|
|
"primary_intent": "information_request", |
|
|
"confidence_scores": {"information_request": 0.8}, |
|
|
"secondary_intents": [], |
|
|
"reasoning_chain": ["Step 1: Analyze user input", "Step 2: Determine intent"], |
|
|
"context_tags": ["general"], |
|
|
"processing_time": 0.15, |
|
|
"agent_id": "INTENT_REC_001" |
|
|
} |
|
|
|
|
|
synthesis_result = { |
|
|
"final_response": updated_history[-1]["content"] if updated_history else "", |
|
|
"draft_response": "", |
|
|
"source_references": ["INTENT_REC_001"], |
|
|
"coherence_score": 0.85, |
|
|
"synthesis_method": "llm_enhanced", |
|
|
"intent_alignment": {"intent_detected": "information_request", "alignment_score": 0.8}, |
|
|
"processing_time": processing_time - 0.15, |
|
|
"agent_id": "RESP_SYNTH_001" |
|
|
} |
|
|
|
|
|
safety_result = { |
|
|
"original_response": updated_history[-1]["content"] if updated_history else "", |
|
|
"safety_checked_response": updated_history[-1]["content"] if updated_history else "", |
|
|
"warnings": [], |
|
|
"safety_analysis": { |
|
|
"toxicity_score": 0.1, |
|
|
"bias_indicators": [], |
|
|
"privacy_concerns": [], |
|
|
"overall_safety_score": 0.9, |
|
|
"confidence_scores": {"safety": 0.9} |
|
|
}, |
|
|
"blocked": False, |
|
|
"processing_time": 0.1, |
|
|
"agent_id": "SAFETY_BIAS_001" |
|
|
} |
|
|
|
|
|
|
|
|
flow_updates = update_process_flow_visualization( |
|
|
user_input=message, |
|
|
intent_result=intent_result, |
|
|
synthesis_result=synthesis_result, |
|
|
safety_result=safety_result, |
|
|
final_response=updated_history[-1]["content"] if updated_history else "", |
|
|
session_id=session_id, |
|
|
processing_time=processing_time |
|
|
) |
|
|
|
|
|
|
|
|
return ( |
|
|
updated_history, |
|
|
empty_string, |
|
|
reasoning_data, |
|
|
performance_data, |
|
|
context_data, |
|
|
session_id, |
|
|
flow_updates.get("flow_display", ""), |
|
|
flow_updates.get("flow_stats", {}), |
|
|
flow_updates.get("performance_metrics", {}), |
|
|
flow_updates.get("intent_details", {}), |
|
|
flow_updates.get("synthesis_details", {}), |
|
|
flow_updates.get("safety_details", {}) |
|
|
) |
|
|
|
|
|
except Exception as e: |
|
|
logger.error(f"Error in enhanced chat handler: {e}") |
|
|
|
|
|
error_history = list(history) if history else [] |
|
|
error_history.append({"role": "user", "content": message}) |
|
|
error_history.append({"role": "assistant", "content": f"Error: {str(e)}"}) |
|
|
|
|
|
return ( |
|
|
error_history, |
|
|
"", |
|
|
{"error": str(e)}, |
|
|
{"error": str(e)}, |
|
|
{"error": str(e)}, |
|
|
session_id, |
|
|
"", |
|
|
{"error": str(e)}, |
|
|
{"error": str(e)}, |
|
|
{}, |
|
|
{}, |
|
|
{} |
|
|
) |
|
|
|
|
|
return enhanced_chat_handler |
|
|
|
|
|
def setup_process_flow_event_handlers(interface_components: Dict[str, Any]): |
|
|
""" |
|
|
Setup event handlers for process flow components |
|
|
""" |
|
|
|
|
|
|
|
|
if 'clear_flow_btn' in interface_components: |
|
|
interface_components['clear_flow_btn'].click( |
|
|
fn=clear_flow_history, |
|
|
outputs=[ |
|
|
interface_components.get('flow_display'), |
|
|
interface_components.get('flow_stats'), |
|
|
interface_components.get('performance_metrics'), |
|
|
interface_components.get('intent_details'), |
|
|
interface_components.get('synthesis_details'), |
|
|
interface_components.get('safety_details') |
|
|
] |
|
|
) |
|
|
|
|
|
|
|
|
if 'export_flow_btn' in interface_components: |
|
|
interface_components['export_flow_btn'].click( |
|
|
fn=export_flow_data, |
|
|
outputs=[gr.File(label="Download Flow Data")] |
|
|
) |
|
|
|
|
|
|
|
|
if 'share_flow_btn' in interface_components: |
|
|
interface_components['share_flow_btn'].click( |
|
|
fn=lambda: gr.Info("Flow sharing feature coming soon!"), |
|
|
outputs=[] |
|
|
) |
|
|
|
|
|
|
|
|
def integrate_process_flow(): |
|
|
""" |
|
|
Main function to integrate process flow visualization |
|
|
""" |
|
|
logger.info("Integrating Process Flow Visualization into Research Assistant UI") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
return { |
|
|
"create_interface": integrate_process_flow_into_app(), |
|
|
"create_handler": create_enhanced_chat_handler(), |
|
|
"setup_handlers": setup_process_flow_event_handlers |
|
|
} |
|
|
|
|
|
if __name__ == "__main__": |
|
|
|
|
|
integration = integrate_process_flow() |
|
|
print("Process Flow Visualization integration ready!") |
|
|
print("Available functions:") |
|
|
print("- create_interface: Modified interface creation") |
|
|
print("- create_handler: Enhanced chat handler") |
|
|
print("- setup_handlers: Event handler setup") |
|
|
|