Spaces:
Runtime error
Runtime error
Commit
·
0d80cde
1
Parent(s):
d04c869
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,6 +1,7 @@
|
|
| 1 |
import streamlit as st
|
| 2 |
import joblib,torch
|
| 3 |
import time
|
|
|
|
| 4 |
device = 'cuda' if torch.cuda.is_available() else 'cpu'
|
| 5 |
loaded_tokenizer = joblib.load("finalized_tokenizer.sav")
|
| 6 |
loaded_model = joblib.load("finalized_model.sav")
|
|
@@ -9,6 +10,16 @@ st.title('Text Summarization using Pegasus')
|
|
| 9 |
|
| 10 |
txt = st.text_area('Enter Text to summarize here', '')
|
| 11 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 12 |
if st.button('Summarize'):
|
| 13 |
with st.spinner('Summarizing..'):
|
| 14 |
batch = loaded_tokenizer(txt, truncation=True, padding='longest', return_tensors="pt").to(device)
|
|
|
|
| 1 |
import streamlit as st
|
| 2 |
import joblib,torch
|
| 3 |
import time
|
| 4 |
+
from PIL import Image
|
| 5 |
device = 'cuda' if torch.cuda.is_available() else 'cpu'
|
| 6 |
loaded_tokenizer = joblib.load("finalized_tokenizer.sav")
|
| 7 |
loaded_model = joblib.load("finalized_model.sav")
|
|
|
|
| 10 |
|
| 11 |
txt = st.text_area('Enter Text to summarize here', '')
|
| 12 |
|
| 13 |
+
with st.sidebar:
|
| 14 |
+
st.subheader("Text Summarization using Pegasus")
|
| 15 |
+
|
| 16 |
+
st.write("PEGASUS uses an encoder-decoder model for sequence-to-sequence learning. In such a model, the encoder will first take into consideration the context of the whole input text and encode the input text into something called context vector, which is basically a numerical representation of the input text. This numerical representation will then be fed to the decoder whose job is decode the context vector to produce the summary.")
|
| 17 |
+
image =Image.open("Pegasus_model.png")
|
| 18 |
+
|
| 19 |
+
st.image(image, caption='Pegasus Model')
|
| 20 |
+
st.code("App built by Srishti Pandey",language="python")
|
| 21 |
+
|
| 22 |
+
|
| 23 |
if st.button('Summarize'):
|
| 24 |
with st.spinner('Summarizing..'):
|
| 25 |
batch = loaded_tokenizer(txt, truncation=True, padding='longest', return_tensors="pt").to(device)
|