Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,3 +1,4 @@
|
|
|
|
|
| 1 |
import gradio as gr
|
| 2 |
import torch
|
| 3 |
from PIL import Image
|
|
@@ -6,7 +7,6 @@ from longcat_image.models import LongCatImageTransformer2DModel
|
|
| 6 |
from longcat_image.pipelines import LongCatImageEditPipeline
|
| 7 |
import numpy as np
|
| 8 |
import os
|
| 9 |
-
import spaces
|
| 10 |
|
| 11 |
# Global variables for model
|
| 12 |
pipe = None
|
|
@@ -21,26 +21,27 @@ def initialize_model():
|
|
| 21 |
|
| 22 |
try:
|
| 23 |
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
| 24 |
-
|
| 25 |
|
| 26 |
-
|
| 27 |
-
if not os.path.exists(checkpoint_dir):
|
| 28 |
-
raise ValueError(f"Model not found at {checkpoint_dir}. Please download the model first.")
|
| 29 |
|
|
|
|
| 30 |
text_processor = AutoProcessor.from_pretrained(
|
| 31 |
-
|
| 32 |
subfolder='tokenizer'
|
| 33 |
)
|
| 34 |
|
|
|
|
| 35 |
transformer = LongCatImageTransformer2DModel.from_pretrained(
|
| 36 |
-
|
| 37 |
subfolder='transformer',
|
| 38 |
torch_dtype=torch.bfloat16,
|
| 39 |
use_safetensors=True
|
| 40 |
).to(device)
|
| 41 |
|
|
|
|
| 42 |
pipe = LongCatImageEditPipeline.from_pretrained(
|
| 43 |
-
|
| 44 |
transformer=transformer,
|
| 45 |
text_processor=text_processor,
|
| 46 |
)
|
|
@@ -127,7 +128,7 @@ with gr.Blocks(fill_height=True) as demo:
|
|
| 127 |
Built with <a href="https://huggingface.co/spaces/akhaliq/anycoder" target="_blank" style="color: #4A90E2; text-decoration: none;">anycoder</a>
|
| 128 |
</p>
|
| 129 |
<p style="font-size: 12px; color: #888; margin-top: 5px;">
|
| 130 |
-
⚡ Powered by Zero-GPU
|
| 131 |
</p>
|
| 132 |
</div>
|
| 133 |
""")
|
|
@@ -188,6 +189,7 @@ with gr.Blocks(fill_height=True) as demo:
|
|
| 188 |
<div style="padding: 10px; background-color: #f0f7ff; border-radius: 8px; margin-top: 10px;">
|
| 189 |
<p style="margin: 0; font-size: 12px; color: #555;">
|
| 190 |
⏱️ <strong>Note:</strong> Zero-GPU provides 120 seconds of GPU time per request.
|
|
|
|
| 191 |
Processing typically takes 30-60 seconds depending on settings.
|
| 192 |
</p>
|
| 193 |
</div>
|
|
@@ -210,6 +212,7 @@ with gr.Blocks(fill_height=True) as demo:
|
|
| 210 |
- Try different guidance scales for varied results
|
| 211 |
- Higher inference steps = better quality (but slower)
|
| 212 |
- GPU time is limited - optimize your settings for speed
|
|
|
|
| 213 |
""")
|
| 214 |
|
| 215 |
# Examples section
|
|
@@ -239,7 +242,7 @@ with gr.Blocks(fill_height=True) as demo:
|
|
| 239 |
gr.HTML("""
|
| 240 |
<div style="text-align: center; margin-top: 40px; padding: 20px; border-top: 1px solid #eee;">
|
| 241 |
<p style="color: #666; font-size: 14px;">
|
| 242 |
-
Powered by LongCat Image Edit with Zero-GPU |
|
| 243 |
<a href="https://huggingface.co/spaces/akhaliq/anycoder" target="_blank" style="color: #4A90E2;">Built with anycoder</a>
|
| 244 |
</p>
|
| 245 |
</div>
|
|
|
|
| 1 |
+
import spaces
|
| 2 |
import gradio as gr
|
| 3 |
import torch
|
| 4 |
from PIL import Image
|
|
|
|
| 7 |
from longcat_image.pipelines import LongCatImageEditPipeline
|
| 8 |
import numpy as np
|
| 9 |
import os
|
|
|
|
| 10 |
|
| 11 |
# Global variables for model
|
| 12 |
pipe = None
|
|
|
|
| 21 |
|
| 22 |
try:
|
| 23 |
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
| 24 |
+
model_id = 'meituan-longcat/LongCat-Image-Edit'
|
| 25 |
|
| 26 |
+
print(f"🔄 Loading model from {model_id}...")
|
|
|
|
|
|
|
| 27 |
|
| 28 |
+
# Load text processor
|
| 29 |
text_processor = AutoProcessor.from_pretrained(
|
| 30 |
+
model_id,
|
| 31 |
subfolder='tokenizer'
|
| 32 |
)
|
| 33 |
|
| 34 |
+
# Load transformer
|
| 35 |
transformer = LongCatImageTransformer2DModel.from_pretrained(
|
| 36 |
+
model_id,
|
| 37 |
subfolder='transformer',
|
| 38 |
torch_dtype=torch.bfloat16,
|
| 39 |
use_safetensors=True
|
| 40 |
).to(device)
|
| 41 |
|
| 42 |
+
# Load pipeline
|
| 43 |
pipe = LongCatImageEditPipeline.from_pretrained(
|
| 44 |
+
model_id,
|
| 45 |
transformer=transformer,
|
| 46 |
text_processor=text_processor,
|
| 47 |
)
|
|
|
|
| 128 |
Built with <a href="https://huggingface.co/spaces/akhaliq/anycoder" target="_blank" style="color: #4A90E2; text-decoration: none;">anycoder</a>
|
| 129 |
</p>
|
| 130 |
<p style="font-size: 12px; color: #888; margin-top: 5px;">
|
| 131 |
+
⚡ Powered by Zero-GPU | 🤗 Model: <a href="https://huggingface.co/meituan-longcat/LongCat-Image-Edit" target="_blank" style="color: #4A90E2;">meituan-longcat/LongCat-Image-Edit</a>
|
| 132 |
</p>
|
| 133 |
</div>
|
| 134 |
""")
|
|
|
|
| 189 |
<div style="padding: 10px; background-color: #f0f7ff; border-radius: 8px; margin-top: 10px;">
|
| 190 |
<p style="margin: 0; font-size: 12px; color: #555;">
|
| 191 |
⏱️ <strong>Note:</strong> Zero-GPU provides 120 seconds of GPU time per request.
|
| 192 |
+
First run may take longer as the model loads from Hugging Face Hub.
|
| 193 |
Processing typically takes 30-60 seconds depending on settings.
|
| 194 |
</p>
|
| 195 |
</div>
|
|
|
|
| 212 |
- Try different guidance scales for varied results
|
| 213 |
- Higher inference steps = better quality (but slower)
|
| 214 |
- GPU time is limited - optimize your settings for speed
|
| 215 |
+
- Model loads automatically from Hugging Face Hub
|
| 216 |
""")
|
| 217 |
|
| 218 |
# Examples section
|
|
|
|
| 242 |
gr.HTML("""
|
| 243 |
<div style="text-align: center; margin-top: 40px; padding: 20px; border-top: 1px solid #eee;">
|
| 244 |
<p style="color: #666; font-size: 14px;">
|
| 245 |
+
Powered by <a href="https://huggingface.co/meituan-longcat/LongCat-Image-Edit" target="_blank" style="color: #4A90E2;">LongCat Image Edit</a> with Zero-GPU |
|
| 246 |
<a href="https://huggingface.co/spaces/akhaliq/anycoder" target="_blank" style="color: #4A90E2;">Built with anycoder</a>
|
| 247 |
</p>
|
| 248 |
</div>
|