Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,5 +1,6 @@
|
|
| 1 |
#!/usr/bin/env python
|
| 2 |
# encoding: utf-8
|
|
|
|
| 3 |
import spaces
|
| 4 |
import gradio as gr
|
| 5 |
from PIL import Image
|
|
@@ -10,37 +11,39 @@ import argparse
|
|
| 10 |
from transformers import AutoModel, AutoTokenizer
|
| 11 |
|
| 12 |
# README, How to run demo on different devices
|
|
|
|
|
|
|
| 13 |
|
| 14 |
-
# For Nvidia GPUs
|
| 15 |
-
# python
|
| 16 |
|
| 17 |
# For Mac with MPS (Apple silicon or AMD GPUs).
|
| 18 |
-
# PYTORCH_ENABLE_MPS_FALLBACK=1 python
|
| 19 |
|
| 20 |
# Argparser
|
| 21 |
parser = argparse.ArgumentParser(description='demo')
|
| 22 |
parser.add_argument('--device', type=str, default='cuda', help='cuda or mps')
|
|
|
|
| 23 |
args = parser.parse_args()
|
| 24 |
device = args.device
|
| 25 |
assert device in ['cuda', 'mps']
|
|
|
|
|
|
|
|
|
|
|
|
|
| 26 |
|
| 27 |
# Load model
|
| 28 |
-
model_path = 'openbmb/MiniCPM-
|
| 29 |
-
|
| 30 |
-
if device == 'mps':
|
| 31 |
-
print('Error: running int4 model with bitsandbytes on Mac is not supported right now.')
|
| 32 |
-
exit()
|
| 33 |
-
model = AutoModel.from_pretrained(model_path, trust_remote_code=True)
|
| 34 |
-
else:
|
| 35 |
-
model = AutoModel.from_pretrained(model_path, trust_remote_code=True).to(dtype=torch.float16)
|
| 36 |
-
model = model.to(device=device)
|
| 37 |
tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
|
|
|
|
|
|
|
| 38 |
model.eval()
|
| 39 |
|
| 40 |
|
| 41 |
|
| 42 |
ERROR_MSG = "Error, please retry"
|
| 43 |
-
model_name = 'MiniCPM-
|
| 44 |
|
| 45 |
form_radio = {
|
| 46 |
'choices': ['Beam Search', 'Sampling'],
|
|
@@ -134,33 +137,30 @@ def create_component(params, comp='Slider'):
|
|
| 134 |
|
| 135 |
@spaces.GPU(duration=120)
|
| 136 |
def chat(img, msgs, ctx, params=None, vision_hidden_states=None):
|
| 137 |
-
default_params = {"
|
| 138 |
if params is None:
|
| 139 |
params = default_params
|
| 140 |
if img is None:
|
| 141 |
-
|
| 142 |
-
|
| 143 |
-
|
| 144 |
-
|
| 145 |
-
|
| 146 |
-
|
| 147 |
-
|
| 148 |
-
|
| 149 |
-
|
| 150 |
-
|
| 151 |
-
|
| 152 |
-
|
| 153 |
-
|
| 154 |
-
|
| 155 |
-
|
| 156 |
-
|
| 157 |
-
|
| 158 |
-
|
| 159 |
-
|
| 160 |
-
|
| 161 |
-
print(err)
|
| 162 |
-
traceback.print_exc()
|
| 163 |
-
yield ERROR_MSG
|
| 164 |
|
| 165 |
|
| 166 |
def upload_img(image, _chatbot, _app_session):
|
|
@@ -173,51 +173,46 @@ def upload_img(image, _chatbot, _app_session):
|
|
| 173 |
return _chatbot, _app_session
|
| 174 |
|
| 175 |
|
| 176 |
-
def respond(_chat_bot, _app_cfg, params_form, num_beams, repetition_penalty, repetition_penalty_2, top_p, top_k, temperature):
|
| 177 |
-
_question = _chat_bot[-1][0]
|
| 178 |
-
print('<Question>:', _question)
|
| 179 |
if _app_cfg.get('ctx', None) is None:
|
| 180 |
-
_chat_bot
|
| 181 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 182 |
else:
|
| 183 |
-
_context =
|
| 184 |
-
|
| 185 |
-
_context.append({"role": "user", "content": _question})
|
| 186 |
-
else:
|
| 187 |
-
_context = [{"role": "user", "content": _question}]
|
| 188 |
-
if params_form == 'Beam Search':
|
| 189 |
-
params = {
|
| 190 |
-
'sampling': False,
|
| 191 |
-
'stream': False,
|
| 192 |
-
'num_beams': num_beams,
|
| 193 |
-
'repetition_penalty': repetition_penalty,
|
| 194 |
-
"max_new_tokens": 896
|
| 195 |
-
}
|
| 196 |
-
else:
|
| 197 |
-
params = {
|
| 198 |
-
'sampling': True,
|
| 199 |
-
'stream': True,
|
| 200 |
-
'top_p': top_p,
|
| 201 |
-
'top_k': top_k,
|
| 202 |
-
'temperature': temperature,
|
| 203 |
-
'repetition_penalty': repetition_penalty_2,
|
| 204 |
-
"max_new_tokens": 896
|
| 205 |
-
}
|
| 206 |
-
|
| 207 |
-
gen = chat(_app_cfg['img'], _context, None, params)
|
| 208 |
-
_chat_bot[-1][1] = ""
|
| 209 |
-
for _char in gen:
|
| 210 |
-
_chat_bot[-1][1] += _char
|
| 211 |
-
_context[-1]["content"] += _char
|
| 212 |
-
yield (_chat_bot, _app_cfg)
|
| 213 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 214 |
|
| 215 |
-
|
| 216 |
-
_chat_bot.append((_question,
|
|
|
|
|
|
|
|
|
|
| 217 |
return '', _chat_bot, _app_cfg
|
| 218 |
|
| 219 |
|
| 220 |
-
def regenerate_button_clicked(_question, _chat_bot, _app_cfg):
|
| 221 |
if len(_chat_bot) <= 1:
|
| 222 |
_chat_bot.append(('Regenerate', 'No question for regeneration.'))
|
| 223 |
return '', _chat_bot, _app_cfg
|
|
@@ -227,18 +222,9 @@ def regenerate_button_clicked(_question, _chat_bot, _app_cfg):
|
|
| 227 |
_question = _chat_bot[-1][0]
|
| 228 |
_chat_bot = _chat_bot[:-1]
|
| 229 |
_app_cfg['ctx'] = _app_cfg['ctx'][:-2]
|
| 230 |
-
return
|
| 231 |
-
# return respond(_chat_bot, _app_cfg, params_form, num_beams, repetition_penalty, repetition_penalty_2, top_p, top_k, temperature)
|
| 232 |
|
| 233 |
|
| 234 |
-
def clear_button_clicked(_question, _chat_bot, _app_cfg, _bt_pic):
|
| 235 |
-
_chat_bot.clear()
|
| 236 |
-
_app_cfg['sts'] = None
|
| 237 |
-
_app_cfg['ctx'] = None
|
| 238 |
-
_app_cfg['img'] = None
|
| 239 |
-
_bt_pic = None
|
| 240 |
-
return '', _chat_bot, _app_cfg, _bt_pic
|
| 241 |
-
|
| 242 |
|
| 243 |
with gr.Blocks() as demo:
|
| 244 |
with gr.Row():
|
|
@@ -253,43 +239,27 @@ with gr.Blocks() as demo:
|
|
| 253 |
temperature = create_component(temperature_slider)
|
| 254 |
repetition_penalty_2 = create_component(repetition_penalty_slider2)
|
| 255 |
regenerate = create_component({'value': 'Regenerate'}, comp='Button')
|
| 256 |
-
clear = create_component({'value': 'Clear'}, comp='Button')
|
| 257 |
with gr.Column(scale=3, min_width=500):
|
| 258 |
app_session = gr.State({'sts':None,'ctx':None,'img':None})
|
| 259 |
bt_pic = gr.Image(label="Upload an image to start")
|
| 260 |
chat_bot = gr.Chatbot(label=f"Chat with {model_name}")
|
| 261 |
txt_message = gr.Textbox(label="Input text")
|
| 262 |
|
| 263 |
-
clear.click(
|
| 264 |
-
clear_button_clicked,
|
| 265 |
-
[txt_message, chat_bot, app_session, bt_pic],
|
| 266 |
-
[txt_message, chat_bot, app_session, bt_pic],
|
| 267 |
-
queue=False
|
| 268 |
-
)
|
| 269 |
-
txt_message.submit(
|
| 270 |
-
request,
|
| 271 |
-
#[txt_message, chat_bot, app_session, params_form, num_beams, repetition_penalty, repetition_penalty_2, top_p, top_k, temperature],
|
| 272 |
-
[txt_message, chat_bot, app_session],
|
| 273 |
-
[txt_message, chat_bot, app_session],
|
| 274 |
-
queue=False
|
| 275 |
-
).then(
|
| 276 |
-
respond,
|
| 277 |
-
[chat_bot, app_session, params_form, num_beams, repetition_penalty, repetition_penalty_2, top_p, top_k, temperature],
|
| 278 |
-
[chat_bot, app_session]
|
| 279 |
-
)
|
| 280 |
regenerate.click(
|
| 281 |
regenerate_button_clicked,
|
| 282 |
-
[txt_message, chat_bot, app_session],
|
| 283 |
-
[txt_message, chat_bot, app_session]
|
| 284 |
-
|
| 285 |
-
|
| 286 |
-
respond,
|
| 287 |
-
[chat_bot, app_session, params_form, num_beams, repetition_penalty, repetition_penalty_2, top_p, top_k, temperature],
|
| 288 |
-
[chat_bot, app_session]
|
| 289 |
)
|
| 290 |
bt_pic.upload(lambda: None, None, chat_bot, queue=False).then(upload_img, inputs=[bt_pic,chat_bot,app_session], outputs=[chat_bot,app_session])
|
| 291 |
|
| 292 |
# launch
|
| 293 |
#demo.launch(share=False, debug=True, show_api=False, server_port=8080, server_name="0.0.0.0")
|
| 294 |
-
demo.
|
| 295 |
-
|
|
|
|
|
|
|
|
|
| 1 |
#!/usr/bin/env python
|
| 2 |
# encoding: utf-8
|
| 3 |
+
import timm
|
| 4 |
import spaces
|
| 5 |
import gradio as gr
|
| 6 |
from PIL import Image
|
|
|
|
| 11 |
from transformers import AutoModel, AutoTokenizer
|
| 12 |
|
| 13 |
# README, How to run demo on different devices
|
| 14 |
+
# For Nvidia GPUs support BF16 (like A100, H100, RTX3090)
|
| 15 |
+
# python web_demo.py --device cuda --dtype bf16
|
| 16 |
|
| 17 |
+
# For Nvidia GPUs do NOT support BF16 (like V100, T4, RTX2080)
|
| 18 |
+
# python web_demo.py --device cuda --dtype fp16
|
| 19 |
|
| 20 |
# For Mac with MPS (Apple silicon or AMD GPUs).
|
| 21 |
+
# PYTORCH_ENABLE_MPS_FALLBACK=1 python web_demo.py --device mps --dtype fp16
|
| 22 |
|
| 23 |
# Argparser
|
| 24 |
parser = argparse.ArgumentParser(description='demo')
|
| 25 |
parser.add_argument('--device', type=str, default='cuda', help='cuda or mps')
|
| 26 |
+
parser.add_argument('--dtype', type=str, default='bf16', help='bf16 or fp16')
|
| 27 |
args = parser.parse_args()
|
| 28 |
device = args.device
|
| 29 |
assert device in ['cuda', 'mps']
|
| 30 |
+
if args.dtype == 'bf16':
|
| 31 |
+
dtype = torch.bfloat16
|
| 32 |
+
else:
|
| 33 |
+
dtype = torch.float16
|
| 34 |
|
| 35 |
# Load model
|
| 36 |
+
model_path = 'openbmb/MiniCPM-V-2'
|
| 37 |
+
model = AutoModel.from_pretrained(model_path, trust_remote_code=True).to(dtype=torch.bfloat16)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 38 |
tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
|
| 39 |
+
|
| 40 |
+
model = model.to(device=device, dtype=dtype)
|
| 41 |
model.eval()
|
| 42 |
|
| 43 |
|
| 44 |
|
| 45 |
ERROR_MSG = "Error, please retry"
|
| 46 |
+
model_name = 'MiniCPM-V 2.0'
|
| 47 |
|
| 48 |
form_radio = {
|
| 49 |
'choices': ['Beam Search', 'Sampling'],
|
|
|
|
| 137 |
|
| 138 |
@spaces.GPU(duration=120)
|
| 139 |
def chat(img, msgs, ctx, params=None, vision_hidden_states=None):
|
| 140 |
+
default_params = {"num_beams":3, "repetition_penalty": 1.2, "max_new_tokens": 1024}
|
| 141 |
if params is None:
|
| 142 |
params = default_params
|
| 143 |
if img is None:
|
| 144 |
+
return -1, "Error, invalid image, please upload a new image", None, None
|
| 145 |
+
try:
|
| 146 |
+
image = img.convert('RGB')
|
| 147 |
+
answer, context, _ = model.chat(
|
| 148 |
+
image=image,
|
| 149 |
+
msgs=msgs,
|
| 150 |
+
context=None,
|
| 151 |
+
tokenizer=tokenizer,
|
| 152 |
+
**params
|
| 153 |
+
)
|
| 154 |
+
res = re.sub(r'(<box>.*</box>)', '', answer)
|
| 155 |
+
res = res.replace('<ref>', '')
|
| 156 |
+
res = res.replace('</ref>', '')
|
| 157 |
+
res = res.replace('<box>', '')
|
| 158 |
+
answer = res.replace('</box>', '')
|
| 159 |
+
return -1, answer, None, None
|
| 160 |
+
except Exception as err:
|
| 161 |
+
print(err)
|
| 162 |
+
traceback.print_exc()
|
| 163 |
+
return -1, ERROR_MSG, None, None
|
|
|
|
|
|
|
|
|
|
| 164 |
|
| 165 |
|
| 166 |
def upload_img(image, _chatbot, _app_session):
|
|
|
|
| 173 |
return _chatbot, _app_session
|
| 174 |
|
| 175 |
|
| 176 |
+
def respond(_question, _chat_bot, _app_cfg, params_form, num_beams, repetition_penalty, repetition_penalty_2, top_p, top_k, temperature):
|
|
|
|
|
|
|
| 177 |
if _app_cfg.get('ctx', None) is None:
|
| 178 |
+
_chat_bot.append((_question, 'Please upload an image to start'))
|
| 179 |
+
return '', _chat_bot, _app_cfg
|
| 180 |
+
|
| 181 |
+
_context = _app_cfg['ctx'].copy()
|
| 182 |
+
if _context:
|
| 183 |
+
_context.append({"role": "user", "content": _question})
|
| 184 |
else:
|
| 185 |
+
_context = [{"role": "user", "content": _question}]
|
| 186 |
+
print('<User>:', _question)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 187 |
|
| 188 |
+
if params_form == 'Beam Search':
|
| 189 |
+
params = {
|
| 190 |
+
'sampling': False,
|
| 191 |
+
'num_beams': num_beams,
|
| 192 |
+
'repetition_penalty': repetition_penalty,
|
| 193 |
+
"max_new_tokens": 896
|
| 194 |
+
}
|
| 195 |
+
else:
|
| 196 |
+
params = {
|
| 197 |
+
'sampling': True,
|
| 198 |
+
'top_p': top_p,
|
| 199 |
+
'top_k': top_k,
|
| 200 |
+
'temperature': temperature,
|
| 201 |
+
'repetition_penalty': repetition_penalty_2,
|
| 202 |
+
"max_new_tokens": 896
|
| 203 |
+
}
|
| 204 |
+
code, _answer, _, sts = chat(_app_cfg['img'], _context, None, params)
|
| 205 |
+
print('<Assistant>:', _answer)
|
| 206 |
|
| 207 |
+
_context.append({"role": "assistant", "content": _answer})
|
| 208 |
+
_chat_bot.append((_question, _answer))
|
| 209 |
+
if code == 0:
|
| 210 |
+
_app_cfg['ctx']=_context
|
| 211 |
+
_app_cfg['sts']=sts
|
| 212 |
return '', _chat_bot, _app_cfg
|
| 213 |
|
| 214 |
|
| 215 |
+
def regenerate_button_clicked(_question, _chat_bot, _app_cfg, params_form, num_beams, repetition_penalty, repetition_penalty_2, top_p, top_k, temperature):
|
| 216 |
if len(_chat_bot) <= 1:
|
| 217 |
_chat_bot.append(('Regenerate', 'No question for regeneration.'))
|
| 218 |
return '', _chat_bot, _app_cfg
|
|
|
|
| 222 |
_question = _chat_bot[-1][0]
|
| 223 |
_chat_bot = _chat_bot[:-1]
|
| 224 |
_app_cfg['ctx'] = _app_cfg['ctx'][:-2]
|
| 225 |
+
return respond(_question, _chat_bot, _app_cfg, params_form, num_beams, repetition_penalty, repetition_penalty_2, top_p, top_k, temperature)
|
|
|
|
| 226 |
|
| 227 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 228 |
|
| 229 |
with gr.Blocks() as demo:
|
| 230 |
with gr.Row():
|
|
|
|
| 239 |
temperature = create_component(temperature_slider)
|
| 240 |
repetition_penalty_2 = create_component(repetition_penalty_slider2)
|
| 241 |
regenerate = create_component({'value': 'Regenerate'}, comp='Button')
|
|
|
|
| 242 |
with gr.Column(scale=3, min_width=500):
|
| 243 |
app_session = gr.State({'sts':None,'ctx':None,'img':None})
|
| 244 |
bt_pic = gr.Image(label="Upload an image to start")
|
| 245 |
chat_bot = gr.Chatbot(label=f"Chat with {model_name}")
|
| 246 |
txt_message = gr.Textbox(label="Input text")
|
| 247 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 248 |
regenerate.click(
|
| 249 |
regenerate_button_clicked,
|
| 250 |
+
[txt_message, chat_bot, app_session, params_form, num_beams, repetition_penalty, repetition_penalty_2, top_p, top_k, temperature],
|
| 251 |
+
[txt_message, chat_bot, app_session]
|
| 252 |
+
)
|
| 253 |
+
txt_message.submit(
|
| 254 |
+
respond,
|
| 255 |
+
[txt_message, chat_bot, app_session, params_form, num_beams, repetition_penalty, repetition_penalty_2, top_p, top_k, temperature],
|
| 256 |
+
[txt_message, chat_bot, app_session]
|
| 257 |
)
|
| 258 |
bt_pic.upload(lambda: None, None, chat_bot, queue=False).then(upload_img, inputs=[bt_pic,chat_bot,app_session], outputs=[chat_bot,app_session])
|
| 259 |
|
| 260 |
# launch
|
| 261 |
#demo.launch(share=False, debug=True, show_api=False, server_port=8080, server_name="0.0.0.0")
|
| 262 |
+
demo.launch()
|
| 263 |
+
|
| 264 |
+
|
| 265 |
+
|