Update app.py
Browse files
app.py
CHANGED
|
@@ -1,81 +1,64 @@
|
|
| 1 |
-
from langchain_community.chat_message_histories import StreamlitChatMessageHistory
|
| 2 |
-
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
|
| 3 |
-
from langchain_core.runnables.history import RunnableWithMessageHistory
|
| 4 |
-
from langchain_google_genai import GoogleGenerativeAIimport
|
| 5 |
-
import os
|
| 6 |
-
from dotenv import load_dotenv
|
| 7 |
-
|
| 8 |
-
|
| 9 |
import streamlit as st
|
| 10 |
|
|
|
|
|
|
|
|
|
|
| 11 |
|
| 12 |
-
|
| 13 |
-
api_key=os.getenv("GOOGLE_API_KEY")
|
| 14 |
-
# Configure l'API de Gemini
|
| 15 |
-
#llm = GoogleGenerativeAI(model="models/text-bison-001", google_api_key=api_key)
|
| 16 |
-
#genai.configure(api_key=api_key)
|
| 17 |
-
|
| 18 |
-
|
| 19 |
-
st.set_page_config(page_title="StreamlitChatMessageHistory", page_icon="π")
|
| 20 |
-
st.title("π StreamlitChatMessageHistory")
|
| 21 |
-
|
| 22 |
-
"""
|
| 23 |
-
A basic example of using StreamlitChatMessageHistory to help LLMChain remember messages in a conversation.
|
| 24 |
-
The messages are stored in Session State across re-runs automatically. You can view the contents of Session State
|
| 25 |
-
in the expander below. View the
|
| 26 |
-
[source code for this app](https://github.com/langchain-ai/streamlit-agent/blob/main/streamlit_agent/basic_memory.py).
|
| 27 |
-
"""
|
| 28 |
-
|
| 29 |
-
# Set up memory
|
| 30 |
-
msgs = StreamlitChatMessageHistory(key="langchain_messages")
|
| 31 |
-
if len(msgs.messages) == 0:
|
| 32 |
-
msgs.add_ai_message("How can I help you?")
|
| 33 |
-
|
| 34 |
-
view_messages = st.expander("View the message contents in session state")
|
| 35 |
-
|
| 36 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 37 |
|
| 38 |
-
|
| 39 |
-
|
| 40 |
-
|
| 41 |
-
|
| 42 |
-
|
| 43 |
-
|
| 44 |
-
("human", "{question}"),
|
| 45 |
]
|
| 46 |
-
)
|
| 47 |
-
|
| 48 |
-
chain = prompt | GoogleGenerativeAI(model="models/gemini-2.0-flash-exp", google_api_key=api_key)
|
| 49 |
-
|
| 50 |
-
|
| 51 |
-
|
| 52 |
-
chain_with_history = RunnableWithMessageHistory(
|
| 53 |
-
chain,
|
| 54 |
-
lambda session_id: msgs,
|
| 55 |
-
input_messages_key="question",
|
| 56 |
-
history_messages_key="history",
|
| 57 |
-
)
|
| 58 |
-
|
| 59 |
-
# Render current messages from StreamlitChatMessageHistory
|
| 60 |
-
for msg in msgs.messages:
|
| 61 |
-
st.chat_message(msg.type).write(msg.content)
|
| 62 |
-
|
| 63 |
-
# If user inputs a new prompt, generate and draw a new response
|
| 64 |
-
if prompt := st.chat_input():
|
| 65 |
-
st.chat_message("human").write(prompt)
|
| 66 |
-
# Note: new messages are saved to history automatically by Langchain during run
|
| 67 |
-
config = {"configurable": {"session_id": "any"}}
|
| 68 |
-
response = chain_with_history.invoke({"question": prompt}, config)
|
| 69 |
-
st.chat_message("ai").write(response.content)
|
| 70 |
-
|
| 71 |
-
# Draw the messages at the end, so newly generated ones show up immediately
|
| 72 |
-
with view_messages:
|
| 73 |
-
"""
|
| 74 |
-
Message History initialized with:
|
| 75 |
-
```python
|
| 76 |
-
msgs = StreamlitChatMessageHistory(key="langchain_messages")
|
| 77 |
-
```
|
| 78 |
|
| 79 |
-
|
| 80 |
-
|
| 81 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
import streamlit as st
|
| 2 |
|
| 3 |
+
from llama_index.llms.gemini import Gemini
|
| 4 |
+
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader, Settings
|
| 5 |
+
import os
|
| 6 |
|
| 7 |
+
#os.environ["GOOGLE_API_KEY"] = GOOGLE_API_KEY
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 8 |
|
| 9 |
+
st.set_page_config(page_title="Chat with the Streamlit docs, powered by LlamaIndex", page_icon="π¦", layout="centered", initial_sidebar_state="auto", menu_items=None)
|
| 10 |
+
openai.api_key = st.secrets.openai_key
|
| 11 |
+
st.title("Chat with the Streamlit docs, powered by LlamaIndex π¬π¦")
|
| 12 |
+
st.info("Check out the full tutorial to build this app in our [blog post](https://blog.streamlit.io/build-a-chatbot-with-custom-data-sources-powered-by-llamaindex/)", icon="π")
|
| 13 |
|
| 14 |
+
if "messages" not in st.session_state.keys(): # Initialize the chat messages history
|
| 15 |
+
st.session_state.messages = [
|
| 16 |
+
{
|
| 17 |
+
"role": "assistant",
|
| 18 |
+
"content": "Ask me a question about Streamlit's open-source Python library!",
|
| 19 |
+
}
|
|
|
|
| 20 |
]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 21 |
|
| 22 |
+
@st.cache_resource(show_spinner=False)
|
| 23 |
+
def load_data():
|
| 24 |
+
reader = SimpleDirectoryReader(input_dir="./data", recursive=True)
|
| 25 |
+
docs = reader.load_data()
|
| 26 |
+
Settings.llm = Gemini(
|
| 27 |
+
model="gemini-2.0-flash-exp",
|
| 28 |
+
temperature=1,
|
| 29 |
+
system_prompt="""You are an expert on
|
| 30 |
+
the Streamlit Python library and your
|
| 31 |
+
job is to answer technical questions.
|
| 32 |
+
Assume that all questions are related
|
| 33 |
+
to the Streamlit Python library. Keep
|
| 34 |
+
your answers technical and based on
|
| 35 |
+
facts β do not hallucinate features.""",
|
| 36 |
+
)
|
| 37 |
+
index = VectorStoreIndex.from_documents(docs)
|
| 38 |
+
return index
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
index = load_data()
|
| 42 |
+
|
| 43 |
+
if "chat_engine" not in st.session_state.keys(): # Initialize the chat engine
|
| 44 |
+
st.session_state.chat_engine = index.as_chat_engine(
|
| 45 |
+
chat_mode="condense_question", verbose=True, streaming=True
|
| 46 |
+
)
|
| 47 |
+
|
| 48 |
+
if prompt := st.chat_input(
|
| 49 |
+
"Ask a question"
|
| 50 |
+
): # Prompt for user input and save to chat history
|
| 51 |
+
st.session_state.messages.append({"role": "user", "content": prompt})
|
| 52 |
+
|
| 53 |
+
for message in st.session_state.messages: # Write message history to UI
|
| 54 |
+
with st.chat_message(message["role"]):
|
| 55 |
+
st.write(message["content"])
|
| 56 |
+
|
| 57 |
+
# If last message is not from assistant, generate a new response
|
| 58 |
+
if st.session_state.messages[-1]["role"] != "assistant":
|
| 59 |
+
with st.chat_message("assistant"):
|
| 60 |
+
response_stream = st.session_state.chat_engine.stream_chat(prompt)
|
| 61 |
+
st.write_stream(response_stream.response_gen)
|
| 62 |
+
message = {"role": "assistant", "content": response_stream.response}
|
| 63 |
+
# Add response to message history
|
| 64 |
+
st.session_state.messages.append(message)
|