Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,71 +1,86 @@
|
|
| 1 |
import gradio as gr
|
| 2 |
from huggingface_hub import InferenceClient
|
|
|
|
| 3 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 4 |
|
| 5 |
-
def
|
| 6 |
-
|
| 7 |
-
|
| 8 |
-
|
| 9 |
-
|
| 10 |
-
|
| 11 |
-
|
| 12 |
-
|
| 13 |
-
):
|
| 14 |
-
"""
|
| 15 |
-
For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
|
| 16 |
-
"""
|
| 17 |
-
client = InferenceClient(token=hf_token.token, model="openai/gpt-oss-20b")
|
| 18 |
|
| 19 |
-
|
| 20 |
-
|
| 21 |
-
|
| 22 |
-
|
| 23 |
-
|
| 24 |
-
|
| 25 |
-
|
| 26 |
-
|
| 27 |
-
|
| 28 |
-
|
| 29 |
-
|
| 30 |
-
|
| 31 |
-
|
| 32 |
-
|
| 33 |
-
|
| 34 |
-
|
| 35 |
-
|
| 36 |
-
|
| 37 |
-
|
| 38 |
-
|
| 39 |
-
|
| 40 |
-
|
| 41 |
-
|
| 42 |
-
|
| 43 |
-
|
| 44 |
-
|
| 45 |
-
"""
|
| 46 |
-
chatbot = gr.ChatInterface(
|
| 47 |
-
respond,
|
| 48 |
-
type="messages",
|
| 49 |
-
additional_inputs=[
|
| 50 |
-
gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
|
| 51 |
-
gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
|
| 52 |
-
gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
|
| 53 |
-
gr.Slider(
|
| 54 |
-
minimum=0.1,
|
| 55 |
-
maximum=1.0,
|
| 56 |
-
value=0.95,
|
| 57 |
-
step=0.05,
|
| 58 |
-
label="Top-p (nucleus sampling)",
|
| 59 |
-
),
|
| 60 |
-
],
|
| 61 |
-
)
|
| 62 |
-
|
| 63 |
-
with gr.Blocks() as demo:
|
| 64 |
-
with gr.Sidebar():
|
| 65 |
-
gr.LoginButton()
|
| 66 |
-
chatbot.render()
|
| 67 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 68 |
|
| 69 |
if __name__ == "__main__":
|
| 70 |
-
demo.launch()
|
| 71 |
-
|
|
|
|
| 1 |
import gradio as gr
|
| 2 |
from huggingface_hub import InferenceClient
|
| 3 |
+
from typing import List, Tuple
|
| 4 |
|
| 5 |
+
# Список 10 моделей (добавь свои)
|
| 6 |
+
MODELS = [
|
| 7 |
+
"microsoft/Phi-3-mini-4k-instruct",
|
| 8 |
+
"Qwen/Qwen2.5-0.5B-Instruct",
|
| 9 |
+
"Qwen/Qwen2.5-1.5B-Instruct",
|
| 10 |
+
"google/gemma-2-2b-it",
|
| 11 |
+
"HuggingFaceH4/zephyr-7b-beta",
|
| 12 |
+
"mistralai/Mistral-7B-Instruct-v0.3",
|
| 13 |
+
"unsloth/Mistral-Nemo-Instruct-2407-12B-GGUF",
|
| 14 |
+
"microsoft/Phi-3.5-mini-instruct",
|
| 15 |
+
"NousResearch/Hermes-2-Theta-Llama-3.1-8B",
|
| 16 |
+
"cognitivecomputations/dolphin-2.9-llama3-8b"
|
| 17 |
+
]
|
| 18 |
|
| 19 |
+
def convert_history(history: List[Tuple[str, str]]) -> List[dict]:
|
| 20 |
+
"""Конвертирует Gradio history [[user_msg, bot_msg]] в OpenAI messages."""
|
| 21 |
+
messages = []
|
| 22 |
+
for user_msg, bot_msg in history:
|
| 23 |
+
messages.append({"role": "user", "content": user_msg})
|
| 24 |
+
if bot_msg: # Если ответ есть
|
| 25 |
+
messages.append({"role": "assistant", "content": bot_msg})
|
| 26 |
+
return messages
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 27 |
|
| 28 |
+
def chat_response(message: str, history: List[Tuple[str, str]], model_id: str, system_prompt: str):
|
| 29 |
+
"""Главная функция: отправляет чат в HF Inference API."""
|
| 30 |
+
try:
|
| 31 |
+
client = InferenceClient(model=model_id)
|
| 32 |
+
|
| 33 |
+
# Строим messages
|
| 34 |
+
messages = []
|
| 35 |
+
if system_prompt.strip(): # Если system не пустой
|
| 36 |
+
messages.append({"role": "system", "content": system_prompt})
|
| 37 |
+
|
| 38 |
+
messages.extend(convert_history(history)) # История чата
|
| 39 |
+
messages.append({"role": "user", "content": message}) # Текущее сообщение
|
| 40 |
+
|
| 41 |
+
# Генерируем ответ (max_new_tokens=512 для скорости)
|
| 42 |
+
response = client.chat_completion(
|
| 43 |
+
messages=messages,
|
| 44 |
+
max_tokens=512, # Лимит токенов ответа (токены ~ слова/4)
|
| 45 |
+
temperature=0.7, # Креативность (0=детерминировано, 1=случайно)
|
| 46 |
+
stream=False # Без стриминга для простоты
|
| 47 |
+
)
|
| 48 |
+
|
| 49 |
+
return response.choices[0].message.content
|
| 50 |
+
|
| 51 |
+
except Exception as e:
|
| 52 |
+
# Обработка ошибок (rate limit, модель не найдена)
|
| 53 |
+
return f"Ошибка: {str(e)}. Проверь модель или подожди (API лимит)."
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 54 |
|
| 55 |
+
# UI: ChatInterface с доп. полями
|
| 56 |
+
with gr.Blocks(title="Тест Чат-Ботов HF") as demo:
|
| 57 |
+
gr.Markdown("# Тестер ИИ-моделей HF\nВыбери модель, system prompt (опционально), чатай!")
|
| 58 |
+
|
| 59 |
+
# Дополнительные inputs (выше чата)
|
| 60 |
+
model_dropdown = gr.Dropdown(
|
| 61 |
+
choices=MODELS,
|
| 62 |
+
value=MODELS[0], # По умолчанию Phi-3
|
| 63 |
+
label="Модель HF (выбери или замени на свою)",
|
| 64 |
+
interactive=True
|
| 65 |
+
)
|
| 66 |
+
system_input = gr.Textbox(
|
| 67 |
+
label="System Prompt (системное сообщение, опционально)",
|
| 68 |
+
placeholder="Пример: Ты полезный ассистент. Отвечай кратко.",
|
| 69 |
+
lines=2
|
| 70 |
+
)
|
| 71 |
+
|
| 72 |
+
# Чат
|
| 73 |
+
chat = gr.ChatInterface(
|
| 74 |
+
fn=chat_response,
|
| 75 |
+
additional_inputs=[model_dropdown, system_input],
|
| 76 |
+
title="Чат с моделью",
|
| 77 |
+
description="Тестируй промпты, jailbreak, код. Смени модель — чат обновится.",
|
| 78 |
+
examples=None, # Добавь свои примеры позже
|
| 79 |
+
cache_examples=False,
|
| 80 |
+
retry_btn="🔄 Повторить",
|
| 81 |
+
undo_btn="↶ Назад",
|
| 82 |
+
clear_btn="🗑️ Очистить"
|
| 83 |
+
)
|
| 84 |
|
| 85 |
if __name__ == "__main__":
|
| 86 |
+
demo.queue(max_size=10).launch(share=True, debug=True) # share=True для публичной ссылки
|
|
|