Spaces:
Running
Running
| import streamlit as st | |
| from together import Together | |
| import os | |
| from typing import Iterator | |
| from PIL import Image | |
| import base64 | |
| from PyPDF2 import PdfReader | |
| API_KEY = os.getenv("TOGETHER_API_KEY") | |
| if not API_KEY: | |
| raise ValueError("API key is missing! Make sure TOGETHER_API_KEY is set in the Secrets.") | |
| def get_client(): | |
| return Together(api_key=API_KEY) | |
| def process_file(file) -> str: | |
| if file is None: | |
| return "" | |
| try: | |
| if file.type == "application/pdf": | |
| text = "" | |
| pdf_reader = PdfReader(file) | |
| for page in pdf_reader.pages: | |
| text += page.extract_text() + "\n" | |
| return text | |
| elif file.type.startswith("image/"): | |
| return base64.b64encode(file.getvalue()).decode("utf-8") | |
| else: | |
| return file.getvalue().decode('utf-8') | |
| except Exception as e: | |
| st.error(f"νμΌ μ²λ¦¬ μ€ μ€λ₯ λ°μ: {str(e)}") | |
| return "" | |
| def generate_response( | |
| message: str, | |
| history: list[tuple[str, str]], | |
| system_message: str, | |
| max_tokens: int, | |
| temperature: float, | |
| top_p: float, | |
| files=None | |
| ) -> Iterator[str]: | |
| client = get_client() | |
| try: | |
| # λ©μμ§ λ°°μ΄ μ΄κΈ°ν | |
| messages = [] | |
| # μμ€ν λ©μμ§κ° μλ κ²½μ°μλ§ μΆκ° | |
| if system_message.strip(): | |
| messages.append({ | |
| "role": "system", | |
| "content": system_message | |
| }) | |
| # λν νμ€ν 리 μΆκ° | |
| for user_msg, assistant_msg in history: | |
| messages.append({ | |
| "role": "user", | |
| "content": user_msg | |
| }) | |
| messages.append({ | |
| "role": "assistant", | |
| "content": assistant_msg | |
| }) | |
| # νμ¬ λ©μμ§μ νμΌ λ΄μ© μ€λΉ | |
| current_content = message | |
| if files: | |
| file_contents = [] | |
| for file in files: | |
| content = process_file(file) | |
| if content: | |
| file_contents.append(f"νμΌ λ΄μ©:\n{content}") | |
| if file_contents: | |
| current_content = current_content + "\n\n" + "\n\n".join(file_contents) | |
| # νμ¬ λ©μμ§ μΆκ° | |
| messages.append({ | |
| "role": "user", | |
| "content": current_content | |
| }) | |
| # API μμ² μ€μ | |
| request_params = { | |
| "model": "deepseek-ai/DeepSeek-R1", | |
| "messages": messages, | |
| "max_tokens": max_tokens, | |
| "temperature": temperature, | |
| "top_p": top_p, | |
| "stream": True | |
| } | |
| # API νΈμΆ | |
| try: | |
| stream = client.chat.completions.create(**request_params) | |
| for chunk in stream: | |
| if hasattr(chunk.choices[0].delta, 'content') and chunk.choices[0].delta.content: | |
| yield chunk.choices[0].delta.content | |
| except Exception as e: | |
| if "rate limit" in str(e).lower(): | |
| yield "API νΈμΆ νλμ λλ¬νμ΅λλ€. μ μ ν λ€μ μλν΄μ£ΌμΈμ." | |
| else: | |
| error_message = str(e) | |
| # Together.aiμ μ€λ₯ μλ΅ λΆμ | |
| if "Input validation error" in error_message: | |
| yield "μ λ ₯ νμμ΄ μ¬λ°λ₯΄μ§ μμ΅λλ€. μμ€ν κ΄λ¦¬μμκ² λ¬Έμν΄μ£ΌμΈμ." | |
| else: | |
| yield f"API νΈμΆ μ€ μ€λ₯κ° λ°μνμ΅λλ€: {error_message}" | |
| except Exception as e: | |
| yield f"μ€λ₯κ° λ°μνμ΅λλ€: {str(e)}" | |
| def main(): | |
| st.set_page_config(page_title="DeepSeek μ±ν ", page_icon="π", layout="wide") | |
| if "messages" not in st.session_state: | |
| st.session_state.messages = [] | |
| st.title("DeepSeek μ±ν ") | |
| st.markdown("DeepSeek AI λͺ¨λΈκ³Ό λννμΈμ. νμν κ²½μ° νμΌμ μ λ‘λν μ μμ΅λλ€.") | |
| with st.sidebar: | |
| st.header("μ€μ ") | |
| system_message = st.text_area( | |
| "μμ€ν λ©μμ§", | |
| value="λΉμ μ κΉμ΄ μκ² μκ°νλ AIμ λλ€. λ¬Έμ λ₯Ό κΉμ΄ κ³ λ €νκ³ μ²΄κ³μ μΈ μΆλ‘ κ³Όμ μ ν΅ν΄ μ¬λ°λ₯Έ ν΄κ²°μ± μ λμΆνμΈμ. λ°λμ νκΈλ‘ λ΅λ³νμΈμ.", | |
| height=100 | |
| ) | |
| max_tokens = st.slider("μ΅λ ν ν° μ", 1, 4096, 2048) # ν ν° μ ν μ‘°μ | |
| temperature = st.slider("μ¨λ", 0.0, 2.0, 0.7, 0.1) # μ¨λ λ²μ μ‘°μ | |
| top_p = st.slider("Top-p", 0.0, 1.0, 0.7, 0.1) # top_p λ²μ μ‘°μ | |
| uploaded_file = st.file_uploader( | |
| "νμΌ μ λ‘λ (μ νμ¬ν)", | |
| type=['txt', 'py', 'md', 'pdf', 'png', 'jpg', 'jpeg'], | |
| accept_multiple_files=True | |
| ) | |
| for message in st.session_state.messages: | |
| with st.chat_message(message["role"]): | |
| st.markdown(message["content"]) | |
| if prompt := st.chat_input("무μμ μκ³ μΆμΌμ κ°μ?"): | |
| st.session_state.messages.append({"role": "user", "content": prompt}) | |
| with st.chat_message("user"): | |
| st.markdown(prompt) | |
| with st.chat_message("assistant"): | |
| response_placeholder = st.empty() | |
| full_response = "" | |
| history = [(msg["content"], next_msg["content"]) | |
| for msg, next_msg in zip(st.session_state.messages[::2], st.session_state.messages[1::2])] | |
| for response_chunk in generate_response( | |
| prompt, | |
| history, | |
| system_message, | |
| max_tokens, | |
| temperature, | |
| top_p, | |
| uploaded_file | |
| ): | |
| full_response += response_chunk | |
| response_placeholder.markdown(full_response + "β") | |
| response_placeholder.markdown(full_response) | |
| st.session_state.messages.append({"role": "assistant", "content": full_response}) | |
| if __name__ == "__main__": | |
| main() |