Commit
·
62db9e6
1
Parent(s):
97034d3
Optimize CPU performance: reduce inference steps and guidance scale for faster processing
Browse files- app/colorize_model.py +11 -3
app/colorize_model.py
CHANGED
|
@@ -111,18 +111,23 @@ class ColorizeModel:
|
|
| 111 |
|
| 112 |
return image
|
| 113 |
|
| 114 |
-
def colorize(self, image: Image.Image, num_inference_steps: int =
|
| 115 |
"""
|
| 116 |
Colorize a grayscale image
|
| 117 |
|
| 118 |
Args:
|
| 119 |
image: PIL Image (grayscale or color)
|
| 120 |
-
num_inference_steps: Number of inference steps
|
| 121 |
|
| 122 |
Returns:
|
| 123 |
Colorized PIL Image
|
| 124 |
"""
|
| 125 |
try:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 126 |
# Preprocess image
|
| 127 |
control_image = self.preprocess_image(image)
|
| 128 |
original_size = image.size
|
|
@@ -131,6 +136,9 @@ class ColorizeModel:
|
|
| 131 |
prompt = "colorize this black and white image, high quality, detailed, vibrant colors, natural colors"
|
| 132 |
negative_prompt = "black and white, grayscale, monochrome, low quality, blurry, desaturated"
|
| 133 |
|
|
|
|
|
|
|
|
|
|
| 134 |
# Generate colorized image based on model type
|
| 135 |
if self.model_type == "controlnet":
|
| 136 |
# Use ControlNet pipeline
|
|
@@ -139,7 +147,7 @@ class ColorizeModel:
|
|
| 139 |
image=control_image,
|
| 140 |
negative_prompt=negative_prompt,
|
| 141 |
num_inference_steps=num_inference_steps,
|
| 142 |
-
guidance_scale=
|
| 143 |
controlnet_conditioning_scale=1.0,
|
| 144 |
generator=torch.Generator(device=self.device).manual_seed(42)
|
| 145 |
)
|
|
|
|
| 111 |
|
| 112 |
return image
|
| 113 |
|
| 114 |
+
def colorize(self, image: Image.Image, num_inference_steps: int = None) -> Image.Image:
|
| 115 |
"""
|
| 116 |
Colorize a grayscale image
|
| 117 |
|
| 118 |
Args:
|
| 119 |
image: PIL Image (grayscale or color)
|
| 120 |
+
num_inference_steps: Number of inference steps (auto-adjusted for CPU/GPU)
|
| 121 |
|
| 122 |
Returns:
|
| 123 |
Colorized PIL Image
|
| 124 |
"""
|
| 125 |
try:
|
| 126 |
+
# Optimize inference steps based on device
|
| 127 |
+
if num_inference_steps is None:
|
| 128 |
+
# Use fewer steps on CPU for faster processing
|
| 129 |
+
num_inference_steps = 8 if self.device == "cpu" else 20
|
| 130 |
+
|
| 131 |
# Preprocess image
|
| 132 |
control_image = self.preprocess_image(image)
|
| 133 |
original_size = image.size
|
|
|
|
| 136 |
prompt = "colorize this black and white image, high quality, detailed, vibrant colors, natural colors"
|
| 137 |
negative_prompt = "black and white, grayscale, monochrome, low quality, blurry, desaturated"
|
| 138 |
|
| 139 |
+
# Adjust guidance scale for CPU (lower = faster)
|
| 140 |
+
guidance_scale = 5.0 if self.device == "cpu" else 7.5
|
| 141 |
+
|
| 142 |
# Generate colorized image based on model type
|
| 143 |
if self.model_type == "controlnet":
|
| 144 |
# Use ControlNet pipeline
|
|
|
|
| 147 |
image=control_image,
|
| 148 |
negative_prompt=negative_prompt,
|
| 149 |
num_inference_steps=num_inference_steps,
|
| 150 |
+
guidance_scale=guidance_scale,
|
| 151 |
controlnet_conditioning_scale=1.0,
|
| 152 |
generator=torch.Generator(device=self.device).manual_seed(42)
|
| 153 |
)
|