Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
|
@@ -105,26 +105,35 @@ pipe.load_lora_weights("dx8152/Qwen-Image-Edit-2509-Relight",
|
|
| 105 |
pipe.transformer.set_attn_processor(QwenDoubleStreamAttnProcessorFA3())
|
| 106 |
MAX_SEED = np.iinfo(np.int32).max
|
| 107 |
|
| 108 |
-
# --- Helper Function for Aspect Ratio ---
|
|
|
|
| 109 |
def update_dimensions_on_upload(image):
|
|
|
|
| 110 |
if image is None:
|
| 111 |
-
return 1024, 1024
|
| 112 |
-
|
| 113 |
original_width, original_height = image.size
|
| 114 |
-
|
| 115 |
-
|
| 116 |
-
if original_width > original_height:
|
| 117 |
-
|
| 118 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 119 |
else:
|
| 120 |
-
|
| 121 |
-
new_width =
|
| 122 |
-
|
| 123 |
-
|
| 124 |
-
|
| 125 |
-
|
| 126 |
-
|
| 127 |
-
|
|
|
|
|
|
|
| 128 |
|
| 129 |
# --- Main Inference Function ---
|
| 130 |
@spaces.GPU
|
|
@@ -177,17 +186,12 @@ def infer(
|
|
| 177 |
# --- Wrapper for Examples ---
|
| 178 |
@spaces.GPU
|
| 179 |
def infer_example(input_image, prompt, lora_adapter):
|
| 180 |
-
# *** FIX: The input is already a PIL Image object from Gradio, not a path. ***
|
| 181 |
-
# We no longer need Image.open() here.
|
| 182 |
input_pil = input_image.convert("RGB")
|
| 183 |
-
|
| 184 |
-
# Calculate aspect ratio for the example image
|
| 185 |
width, height = update_dimensions_on_upload(input_pil)
|
| 186 |
-
|
| 187 |
-
# Set reasonable default values for example inference to get good results
|
| 188 |
guidance_scale = 1.0
|
| 189 |
steps = 4
|
| 190 |
-
|
| 191 |
# Call the main infer function
|
| 192 |
result, seed = infer(input_pil, prompt, lora_adapter, 0, True, guidance_scale, steps, width, height)
|
| 193 |
return result, seed
|
|
|
|
| 105 |
pipe.transformer.set_attn_processor(QwenDoubleStreamAttnProcessorFA3())
|
| 106 |
MAX_SEED = np.iinfo(np.int32).max
|
| 107 |
|
| 108 |
+
# --- Helper Function for Aspect Ratio (Corrected) ---
|
| 109 |
+
@torch.no_grad()
|
| 110 |
def update_dimensions_on_upload(image):
|
| 111 |
+
# *** FIX: This function now correctly preserves aspect ratio for all image sizes. ***
|
| 112 |
if image is None:
|
| 113 |
+
return 1024, 1024 # Default for no image
|
| 114 |
+
|
| 115 |
original_width, original_height = image.size
|
| 116 |
+
max_dim = 1024
|
| 117 |
+
|
| 118 |
+
if original_width > max_dim or original_height > max_dim:
|
| 119 |
+
# If the image is larger than the max dimension, scale it down
|
| 120 |
+
if original_width > original_height:
|
| 121 |
+
new_width = max_dim
|
| 122 |
+
new_height = int(max_dim * original_height / original_width)
|
| 123 |
+
else:
|
| 124 |
+
new_height = max_dim
|
| 125 |
+
new_width = int(max_dim * original_width / original_height)
|
| 126 |
else:
|
| 127 |
+
# If the image is smaller, use its original dimensions
|
| 128 |
+
new_width = original_width
|
| 129 |
+
new_height = original_height
|
| 130 |
+
|
| 131 |
+
# Ensure final dimensions are multiples of 8 for model compatibility
|
| 132 |
+
final_width = (new_width // 8) * 8
|
| 133 |
+
final_height = (new_height // 8) * 8
|
| 134 |
+
|
| 135 |
+
return final_width, final_height
|
| 136 |
+
|
| 137 |
|
| 138 |
# --- Main Inference Function ---
|
| 139 |
@spaces.GPU
|
|
|
|
| 186 |
# --- Wrapper for Examples ---
|
| 187 |
@spaces.GPU
|
| 188 |
def infer_example(input_image, prompt, lora_adapter):
|
|
|
|
|
|
|
| 189 |
input_pil = input_image.convert("RGB")
|
| 190 |
+
# Calculate correct aspect ratio for the example image using the corrected function
|
|
|
|
| 191 |
width, height = update_dimensions_on_upload(input_pil)
|
| 192 |
+
# Set reasonable default values for example inference
|
|
|
|
| 193 |
guidance_scale = 1.0
|
| 194 |
steps = 4
|
|
|
|
| 195 |
# Call the main infer function
|
| 196 |
result, seed = infer(input_pil, prompt, lora_adapter, 0, True, guidance_scale, steps, width, height)
|
| 197 |
return result, seed
|