Commit
·
f2ae1a5
1
Parent(s):
ffe0724
Fix Gemini model name for API compatibility
Browse files- Use models/gemini-2.0-flash-001 as the correct model path
- Simplify to generate_content instead of chat API
- Include conversation history in prompt context
🤖 Generated with [Claude Code](https://claude.com/claude-code)
Co-Authored-By: Claude <noreply@anthropic.com>
app.py
CHANGED
|
@@ -844,20 +844,21 @@ def chat_with_gemini(
|
|
| 844 |
|
| 845 |
try:
|
| 846 |
genai.configure(api_key=api_key)
|
| 847 |
-
model = genai.GenerativeModel('gemini-1.5-flash')
|
| 848 |
|
| 849 |
-
#
|
| 850 |
-
|
| 851 |
-
for msg in history:
|
| 852 |
-
role = "user" if msg.get("role") == "user" else "model"
|
| 853 |
-
chat_history.append({"role": role, "parts": [msg.get("content", "")]})
|
| 854 |
|
| 855 |
-
#
|
| 856 |
-
|
|
|
|
|
|
|
|
|
|
| 857 |
|
| 858 |
-
|
| 859 |
-
|
| 860 |
-
|
|
|
|
|
|
|
| 861 |
|
| 862 |
assistant_message = response.text
|
| 863 |
|
|
|
|
| 844 |
|
| 845 |
try:
|
| 846 |
genai.configure(api_key=api_key)
|
|
|
|
| 847 |
|
| 848 |
+
# Use models/gemini-2.0-flash-001 which is the correct model path
|
| 849 |
+
model = genai.GenerativeModel('models/gemini-2.0-flash-001')
|
|
|
|
|
|
|
|
|
|
| 850 |
|
| 851 |
+
# Build context from history
|
| 852 |
+
context_parts = [LINEAGE_AGENT_PROMPT, "\n\nConversation history:"]
|
| 853 |
+
for msg in history[-6:]: # Keep last 6 messages for context
|
| 854 |
+
role = "User" if msg.get("role") == "user" else "Assistant"
|
| 855 |
+
context_parts.append(f"{role}: {msg.get('content', '')}")
|
| 856 |
|
| 857 |
+
context_parts.append(f"\nUser: {message}\nAssistant:")
|
| 858 |
+
|
| 859 |
+
# Generate response
|
| 860 |
+
full_prompt = "\n".join(context_parts)
|
| 861 |
+
response = model.generate_content(full_prompt)
|
| 862 |
|
| 863 |
assistant_message = response.text
|
| 864 |
|