Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
|
@@ -269,7 +269,7 @@ def generate_dissertation_api():
|
|
| 269 |
logging.warning(f"Impossible de supprimer le fichier temporaire {temp_file_path}: {e}")
|
| 270 |
|
| 271 |
# Utiliser le modèle multimodal
|
| 272 |
-
model_name = "models/gemini-
|
| 273 |
|
| 274 |
else: # Type 1 et 2 (texte uniquement)
|
| 275 |
context_str = ""
|
|
@@ -290,7 +290,7 @@ def generate_dissertation_api():
|
|
| 290 |
|
| 291 |
final_prompt = prompt_template.format(phi_prompt=data_for_log['question'], context=context_str)
|
| 292 |
contents = [final_prompt]
|
| 293 |
-
model_name = "models/gemini-
|
| 294 |
|
| 295 |
# --- Appel à l'IA ---
|
| 296 |
config = types.GenerateContentConfig(
|
|
|
|
| 269 |
logging.warning(f"Impossible de supprimer le fichier temporaire {temp_file_path}: {e}")
|
| 270 |
|
| 271 |
# Utiliser le modèle multimodal
|
| 272 |
+
model_name = "models/gemini-flash-latest"
|
| 273 |
|
| 274 |
else: # Type 1 et 2 (texte uniquement)
|
| 275 |
context_str = ""
|
|
|
|
| 290 |
|
| 291 |
final_prompt = prompt_template.format(phi_prompt=data_for_log['question'], context=context_str)
|
| 292 |
contents = [final_prompt]
|
| 293 |
+
model_name = "models/gemini-flash-latest"
|
| 294 |
|
| 295 |
# --- Appel à l'IA ---
|
| 296 |
config = types.GenerateContentConfig(
|