Speedofmastery commited on
Commit
cdcafca
Β·
verified Β·
1 Parent(s): 250bd0e

Upload folder using huggingface_hub

Browse files
Files changed (1) hide show
  1. app.py +10 -40
app.py CHANGED
@@ -468,26 +468,16 @@ def use_ai_model(model_name, input_text, user_session="guest"):
468
  try:
469
  # Use HuggingFace Inference API for REAL AI responses
470
  if category in ["image_gen", "image_edit"]:
471
- response = f"🎨 {model_name} is generating your image...
472
-
473
- "
474
- response += f"πŸ“Έ Prompt: '{input_text}'
475
-
476
- "
477
  response += f"ℹ️ Image generation models require special handling. "
478
- response += f"The model '{model_name}' will create an image based on your prompt.
479
-
480
- "
481
  response += f"πŸ’‘ To view the generated image, use the Image Generation interface."
482
  return response
483
 
484
  elif category == "audio":
485
- response = f"🎡 {model_name} audio processing...
486
-
487
- "
488
- response += f"Input: '{input_text}'
489
-
490
- "
491
  response += f"ℹ️ Audio models require audio file input or special parameters. "
492
  response += f"Please use the Audio Processing interface for full functionality."
493
  return response
@@ -518,39 +508,19 @@ def use_ai_model(model_name, input_text, user_session="guest"):
518
  icons = {"software_engineer": "πŸ’»", "education": "πŸŽ“", "multimodal": "πŸ€–", "text": "🧠"}
519
  icon = icons.get(category, "✨")
520
 
521
- return f"{icon} **{model_name}**
522
-
523
- {full_response}"
524
 
525
  except Exception as e:
526
  error_msg = str(e)
527
  if "404" in error_msg or "not found" in error_msg.lower():
528
- return f"⚠️ Model '{model_name}' is not available via Inference API.
529
-
530
- Try using a popular model like:
531
- - Qwen/Qwen2.5-72B-Instruct
532
- - meta-llama/Llama-3.3-70B-Instruct
533
- - mistralai/Mistral-7B-Instruct-v0.3"
534
  elif "rate limit" in error_msg.lower():
535
- return f"⏱️ Rate limit reached. Please:
536
- 1. Wait a moment and try again
537
- 2. Add your HF_TOKEN in Space settings for higher limits
538
- 3. Use a different model
539
-
540
- Error: {error_msg}"
541
  else:
542
- return f"❌ Error calling {model_name}:
543
- {error_msg}
544
-
545
- Try:
546
- 1. Check if model name is correct
547
- 2. Try a different model
548
- 3. Add HF_TOKEN for authentication"
549
 
550
  except Exception as e:
551
- return f"❌ Unexpected error: {str(e)}
552
-
553
- Please try again or use a different model."
554
 
555
 
556
  def get_cloudflare_status():
 
468
  try:
469
  # Use HuggingFace Inference API for REAL AI responses
470
  if category in ["image_gen", "image_edit"]:
471
+ response = f"🎨 {model_name} is generating your image...\n\n"
472
+ response += f"πŸ“Έ Prompt: '{input_text}'\n\n"
 
 
 
 
473
  response += f"ℹ️ Image generation models require special handling. "
474
+ response += f"The model '{model_name}' will create an image based on your prompt.\n\n"
 
 
475
  response += f"πŸ’‘ To view the generated image, use the Image Generation interface."
476
  return response
477
 
478
  elif category == "audio":
479
+ response = f"🎡 {model_name} audio processing...\n\n"
480
+ response += f"Input: '{input_text}'\n\n"
 
 
 
 
481
  response += f"ℹ️ Audio models require audio file input or special parameters. "
482
  response += f"Please use the Audio Processing interface for full functionality."
483
  return response
 
508
  icons = {"software_engineer": "πŸ’»", "education": "πŸŽ“", "multimodal": "πŸ€–", "text": "🧠"}
509
  icon = icons.get(category, "✨")
510
 
511
+ return f"{icon} **{model_name}**\n\n{full_response}"
 
 
512
 
513
  except Exception as e:
514
  error_msg = str(e)
515
  if "404" in error_msg or "not found" in error_msg.lower():
516
+ return f"⚠️ Model '{model_name}' is not available via Inference API.\n\nTry using a popular model like:\n- Qwen/Qwen2.5-72B-Instruct\n- meta-llama/Llama-3.3-70B-Instruct\n- mistralai/Mistral-7B-Instruct-v0.3"
 
 
 
 
 
517
  elif "rate limit" in error_msg.lower():
518
+ return f"⏱️ Rate limit reached. Please:\n1. Wait a moment and try again\n2. Add your HF_TOKEN in Space settings for higher limits\n3. Use a different model\n\nError: {error_msg}"
 
 
 
 
 
519
  else:
520
+ return f"❌ Error calling {model_name}:\n{error_msg}\n\nTry:\n1. Check if model name is correct\n2. Try a different model\n3. Add HF_TOKEN for authentication"
 
 
 
 
 
 
521
 
522
  except Exception as e:
523
+ return f"❌ Unexpected error: {str(e)}\n\nPlease try again or use a different model."
 
 
524
 
525
 
526
  def get_cloudflare_status():