Spaces:
Runtime error
Runtime error
| import gradio as gr | |
| import cv2 | |
| import numpy as np | |
| from PIL import Image | |
| import base64 | |
| from io import BytesIO | |
| from models.image_text_transformation import ImageTextTransformation | |
| import argparse | |
| import torch | |
| parser = argparse.ArgumentParser() | |
| parser.add_argument('--gpt_version', choices=['gpt-3.5-turbo', 'gpt4'], default='gpt-3.5-turbo') | |
| parser.add_argument('--image_caption', action='store_true', dest='image_caption', default=True, help='Set this flag to True if you want to use BLIP2 Image Caption') | |
| parser.add_argument('--dense_caption', action='store_true', dest='dense_caption', default=True, help='Set this flag to True if you want to use Dense Caption') | |
| parser.add_argument('--semantic_segment', action='store_true', dest='semantic_segment', default=True, help='Set this flag to True if you want to use semantic segmentation') | |
| parser.add_argument('--sam_arch', choices=['vit_b', 'vit_l', 'vit_h'], dest='sam_arch', default='vit_b', help='vit_b is the default model (fast but not accurate), vit_l and vit_h are larger models') | |
| parser.add_argument('--captioner_base_model', choices=['blip', 'blip2'], dest='captioner_base_model', default='blip', help='blip2 requires 15G GPU memory, blip requires 6G GPU memory') | |
| parser.add_argument('--region_classify_model', choices=['ssa', 'edit_anything'], dest='region_classify_model', default='edit_anything', help='Select the region classification model: edit anything is ten times faster than ssa, but less accurate.') | |
| parser.add_argument('--image_caption_device', choices=['cuda', 'cpu'], default='cuda', help='Select the device: cuda or cpu, gpu memory larger than 14G is recommended') | |
| parser.add_argument('--dense_caption_device', choices=['cuda', 'cpu'], default='cuda', help='Select the device: cuda or cpu, < 6G GPU is not recommended>') | |
| parser.add_argument('--semantic_segment_device', choices=['cuda', 'cpu'], default='cuda', help='Select the device: cuda or cpu, gpu memory larger than 14G is recommended. Make sue this model and image_caption model on same device.') | |
| parser.add_argument('--contolnet_device', choices=['cuda', 'cpu'], default='cpu', help='Select the device: cuda or cpu, <6G GPU is not recommended>') | |
| args = parser.parse_args() | |
| device = "cuda" if torch.cuda.is_available() else "cpu" | |
| # device = "cpu" | |
| if device == "cuda": | |
| args.image_caption_device = "cuda" | |
| args.dense_caption_device = "cuda" | |
| args.semantic_segment_device = "cuda" | |
| args.contolnet_device = "cuda" | |
| else: | |
| args.image_caption_device = "cpu" | |
| args.dense_caption_device = "cpu" | |
| args.semantic_segment_device = "cpu" | |
| args.contolnet_device = "cpu" | |
| def pil_image_to_base64(image): | |
| buffered = BytesIO() | |
| image.save(buffered, format="JPEG") | |
| img_str = base64.b64encode(buffered.getvalue()).decode() | |
| return img_str | |
| def add_logo(): | |
| with open("examples/logo.png", "rb") as f: | |
| logo_base64 = base64.b64encode(f.read()).decode() | |
| return logo_base64 | |
| def process_image(image_src, options=None, processor=None): | |
| print(options) | |
| if options is None: | |
| options = [] | |
| processor.args.semantic_segment = "Semantic Segment" in options | |
| image_generation_status = "Image Generation" in options | |
| image_caption, dense_caption, region_semantic, gen_text = processor.image_to_text(image_src) | |
| if image_generation_status: | |
| gen_image = processor.text_to_image(gen_text) | |
| gen_image_str = pil_image_to_base64(gen_image) | |
| # Combine the outputs into a single HTML output | |
| custom_output = f''' | |
| <h2>Image->Text:</h2> | |
| <div style="display: flex; flex-wrap: wrap;"> | |
| <div style="flex: 1;"> | |
| <h3>Image Caption</h3> | |
| <p>{image_caption}</p> | |
| </div> | |
| <div style="flex: 1;"> | |
| <h3>Dense Caption</h3> | |
| <p>{dense_caption}</p> | |
| </div> | |
| <div style="flex: 1;"> | |
| <h3>Region Semantic</h3> | |
| <p>{region_semantic}</p> | |
| </div> | |
| </div> | |
| <div style="display: flex; flex-wrap: wrap;"> | |
| <div style="flex: 1;"> | |
| <h3>GPT4 Reasoning:</h3> | |
| <p>{gen_text}</p> | |
| </div> | |
| </div> | |
| ''' | |
| if image_generation_status: | |
| custom_output += f''' | |
| <h2>Text->Image:</h2> | |
| <div style="display: flex; flex-wrap: wrap;"> | |
| <div style="flex: 1;"> | |
| <h3>Generated Image</h3> | |
| <img src="data:image/jpeg;base64,{gen_image_str}" width="400" style="vertical-align: middle;"> | |
| </div> | |
| </div> | |
| ''' | |
| return custom_output | |
| processor = ImageTextTransformation(args) | |
| # Create Gradio input and output components | |
| image_input = gr.inputs.Image(type='filepath', label="Input Image") | |
| semantic_segment_checkbox = gr.inputs.Checkbox(label="Semantic Segment", default=False) | |
| image_generation_checkbox = gr.inputs.Checkbox(label="Image Generation", default=False) | |
| extra_title = r'' + '\n' + \ | |
| r'[](https://huggingface.co/spaces/Awiny/Image2Paragraph?duplicate=true)' + '\n\n' | |
| logo_base64 = add_logo() | |
| # Create the title with the logo | |
| title_with_logo = \ | |
| f'<img src="data:image/jpeg;base64,{logo_base64}" width="400" style="vertical-align: middle;"> Understanding Image with Text' | |
| examples = [ | |
| ["examples/test_4.jpg"], | |
| ] | |
| # Create Gradio interface | |
| interface = gr.Interface( | |
| fn=lambda image, options: process_image(image, options, processor), | |
| inputs=[image_input, | |
| gr.CheckboxGroup( | |
| label="Options", | |
| choices=["Image Generation", "Semantic Segment"], | |
| ), | |
| ], | |
| outputs=gr.outputs.HTML(), | |
| title=title_with_logo, | |
| examples=examples, | |
| description=extra_title +""" | |
| Image.txt. This code support image to text transformation. Then the generated text can do retrieval, question answering et al to conduct zero-shot. | |
| \n Github: https://github.com/showlab/Image2Paragraph | |
| \n Twitter: https://twitter.com/awinyimgprocess/status/1646225454599372800?s=46&t=HvOe9T2n35iFuCHP5aIHpQ | |
| \n For online demo, we use smallest model to speed up. For better result, look for github for using large models. | |
| \n Ttext2image model is controlnet, which used canny edge as reference. | |
| \n To speed up, we generate image with small size 384, run the code local for high-quality sample. | |
| """ | |
| ) | |
| # Launch the interface | |
| interface.launch() |