Commit
·
a315115
1
Parent(s):
f40725d
Improve fallback colorization with better color hints and saturation
Browse files- app/main_fastai.py +28 -10
app/main_fastai.py
CHANGED
|
@@ -215,8 +215,9 @@ async def health_check():
|
|
| 215 |
|
| 216 |
def simple_colorize_fallback(image: Image.Image) -> Image.Image:
|
| 217 |
"""
|
| 218 |
-
|
| 219 |
This provides basic colorization when the model doesn't load
|
|
|
|
| 220 |
"""
|
| 221 |
# Convert to LAB color space
|
| 222 |
if image.mode != "RGB":
|
|
@@ -224,26 +225,43 @@ def simple_colorize_fallback(image: Image.Image) -> Image.Image:
|
|
| 224 |
|
| 225 |
# Convert to numpy array
|
| 226 |
img_array = np.array(image)
|
|
|
|
| 227 |
|
| 228 |
# Convert RGB to LAB
|
| 229 |
lab = cv2.cvtColor(img_array, cv2.COLOR_RGB2LAB)
|
| 230 |
|
| 231 |
-
#
|
| 232 |
l, a, b = cv2.split(lab)
|
| 233 |
|
| 234 |
-
# Enhance lightness
|
| 235 |
-
clahe = cv2.createCLAHE(clipLimit=
|
| 236 |
-
|
| 237 |
|
| 238 |
-
# Add
|
| 239 |
-
#
|
| 240 |
-
|
| 241 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 242 |
|
| 243 |
# Merge channels and convert back to RGB
|
| 244 |
-
lab_colored = cv2.merge([
|
| 245 |
colored_rgb = cv2.cvtColor(lab_colored, cv2.COLOR_LAB2RGB)
|
| 246 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 247 |
return Image.fromarray(colored_rgb)
|
| 248 |
|
| 249 |
|
|
|
|
| 215 |
|
| 216 |
def simple_colorize_fallback(image: Image.Image) -> Image.Image:
|
| 217 |
"""
|
| 218 |
+
Enhanced fallback colorization using LAB color space with better color hints
|
| 219 |
This provides basic colorization when the model doesn't load
|
| 220 |
+
Note: This is a simple heuristic-based approach and won't match trained models
|
| 221 |
"""
|
| 222 |
# Convert to LAB color space
|
| 223 |
if image.mode != "RGB":
|
|
|
|
| 225 |
|
| 226 |
# Convert to numpy array
|
| 227 |
img_array = np.array(image)
|
| 228 |
+
original_shape = img_array.shape
|
| 229 |
|
| 230 |
# Convert RGB to LAB
|
| 231 |
lab = cv2.cvtColor(img_array, cv2.COLOR_RGB2LAB)
|
| 232 |
|
| 233 |
+
# Split channels
|
| 234 |
l, a, b = cv2.split(lab)
|
| 235 |
|
| 236 |
+
# Enhance lightness with CLAHE (Contrast Limited Adaptive Histogram Equalization)
|
| 237 |
+
clahe = cv2.createCLAHE(clipLimit=3.0, tileGridSize=(8, 8))
|
| 238 |
+
l_enhanced = clahe.apply(l)
|
| 239 |
|
| 240 |
+
# Add intelligent color hints based on image characteristics
|
| 241 |
+
# Analyze the grayscale image to determine color hints
|
| 242 |
+
l_normalized = l.astype(np.float32) / 255.0
|
| 243 |
+
|
| 244 |
+
# Create color hints: warmer tones for mid-brightness areas
|
| 245 |
+
# a channel: green-red axis (positive = red, negative = green)
|
| 246 |
+
# b channel: blue-yellow axis (positive = yellow, negative = blue)
|
| 247 |
+
|
| 248 |
+
# Add warm tones (slight red and yellow bias) based on brightness
|
| 249 |
+
# Darker areas get cooler tones, mid-brightness gets warmer
|
| 250 |
+
brightness_mask = np.clip((l_normalized - 0.3) * 2, 0, 1) # Emphasize mid-brightness
|
| 251 |
+
|
| 252 |
+
# Add color hints: warm tones for skin/faces, cooler for shadows
|
| 253 |
+
a_hint = np.clip(a.astype(np.float32) + brightness_mask * 8 + (1 - brightness_mask) * 2, 0, 255).astype(np.uint8)
|
| 254 |
+
b_hint = np.clip(b.astype(np.float32) + brightness_mask * 12 + (1 - brightness_mask) * 3, 0, 255).astype(np.uint8)
|
| 255 |
|
| 256 |
# Merge channels and convert back to RGB
|
| 257 |
+
lab_colored = cv2.merge([l_enhanced, a_hint, b_hint])
|
| 258 |
colored_rgb = cv2.cvtColor(lab_colored, cv2.COLOR_LAB2RGB)
|
| 259 |
|
| 260 |
+
# Apply slight saturation boost
|
| 261 |
+
hsv = cv2.cvtColor(colored_rgb, cv2.COLOR_RGB2HSV)
|
| 262 |
+
hsv[:, :, 1] = np.clip(hsv[:, :, 1].astype(np.float32) * 1.2, 0, 255).astype(np.uint8)
|
| 263 |
+
colored_rgb = cv2.cvtColor(hsv, cv2.COLOR_HSV2RGB)
|
| 264 |
+
|
| 265 |
return Image.fromarray(colored_rgb)
|
| 266 |
|
| 267 |
|