Research_AI_Assistant / mobile_handlers.py
JatsTheAIGen's picture
Initial commit V1
66dbebd
raw
history blame
5.88 kB
# mobile_handlers.py
import gradio as gr
class MobileUXHandlers:
def __init__(self, orchestrator):
self.orchestrator = orchestrator
self.mobile_state = {}
async def handle_mobile_submit(self, message, chat_history, session_id,
show_reasoning, show_agent_trace, request: gr.Request):
"""
Mobile-optimized submission handler with enhanced UX
"""
# Get mobile device info
user_agent = request.headers.get("user-agent", "").lower()
is_mobile = any(device in user_agent for device in ['mobile', 'android', 'iphone'])
# Mobile-specific optimizations
if is_mobile:
return await self._mobile_optimized_processing(
message, chat_history, session_id, show_reasoning, show_agent_trace
)
else:
return await self._desktop_processing(
message, chat_history, session_id, show_reasoning, show_agent_trace
)
async def _mobile_optimized_processing(self, message, chat_history, session_id,
show_reasoning, show_agent_trace):
"""
Mobile-specific processing with enhanced UX feedback
"""
try:
# Immediate feedback for mobile users
yield {
"chatbot": chat_history + [[message, "Thinking..."]],
"message_input": "",
"reasoning_display": {"status": "processing"},
"performance_display": {"status": "processing"}
}
# Process with mobile-optimized parameters
result = await self.orchestrator.process_request(
session_id=session_id,
user_input=message,
mobile_optimized=True, # Special flag for mobile
max_tokens=800 # Shorter responses for mobile
)
# Format for mobile display
formatted_response = self._format_for_mobile(
result['final_response'],
show_reasoning and result.get('reasoning_chain'),
show_agent_trace and result.get('agent_trace')
)
# Update chat history
updated_history = chat_history + [[message, formatted_response]]
yield {
"chatbot": updated_history,
"message_input": "",
"reasoning_display": result.get('reasoning_chain', {}),
"performance_display": result.get('performance_metrics', {})
}
except Exception as e:
# Mobile-friendly error handling
error_response = self._get_mobile_friendly_error(e)
yield {
"chatbot": chat_history + [[message, error_response]],
"message_input": message, # Keep message for retry
"reasoning_display": {"error": "Processing failed"},
"performance_display": {"error": str(e)}
}
def _format_for_mobile(self, response, reasoning_chain, agent_trace):
"""
Format response for optimal mobile readability
"""
# Split long responses for mobile
if len(response) > 400:
paragraphs = self._split_into_paragraphs(response, max_length=300)
response = "\n\n".join(paragraphs)
# Add mobile-optimized formatting
formatted = f"""
<div class="mobile-response">
{response}
</div>
"""
# Add reasoning if requested
if reasoning_chain:
formatted += f"""
<div class="reasoning-mobile" style="margin-top: 15px; padding: 10px; background: #f5f5f5; border-radius: 8px; font-size: 14px;">
<strong>Reasoning:</strong> {reasoning_chain[:200]}...
</div>
"""
return formatted
def _get_mobile_friendly_error(self, error):
"""
User-friendly error messages for mobile
"""
error_messages = {
"timeout": "⏱️ Taking longer than expected. Please try a simpler question.",
"network": "📡 Connection issue. Check your internet and try again.",
"rate_limit": "🚦 Too many requests. Please wait a moment.",
"default": "❌ Something went wrong. Please try again."
}
error_type = "default"
if "timeout" in str(error).lower():
error_type = "timeout"
elif "network" in str(error).lower() or "connection" in str(error).lower():
error_type = "network"
elif "rate" in str(error).lower():
error_type = "rate_limit"
return error_messages[error_type]
async def _desktop_processing(self, message, chat_history, session_id,
show_reasoning, show_agent_trace):
"""
Desktop processing without mobile optimizations
"""
# TODO: Implement desktop-specific processing
return {
"chatbot": chat_history,
"message_input": "",
"reasoning_display": {},
"performance_display": {}
}
def _split_into_paragraphs(self, text, max_length=300):
"""
Split text into mobile-friendly paragraphs
"""
# TODO: Implement intelligent paragraph splitting
words = text.split()
paragraphs = []
current_para = []
for word in words:
current_para.append(word)
if len(' '.join(current_para)) > max_length:
paragraphs.append(' '.join(current_para[:-1]))
current_para = [current_para[-1]]
if current_para:
paragraphs.append(' '.join(current_para))
return paragraphs