Commit
·
b860aca
1
Parent(s):
845bd8d
Clean up ResNetGenerator architecture - simplified structure
Browse files- app/pytorch_colorizer.py +31 -30
app/pytorch_colorizer.py
CHANGED
|
@@ -41,45 +41,46 @@ class ResNetBlock(nn.Module):
|
|
| 41 |
class ResNetGenerator(nn.Module):
|
| 42 |
"""
|
| 43 |
ResNet Generator for Image Colorization
|
| 44 |
-
|
|
|
|
| 45 |
"""
|
| 46 |
def __init__(self, input_nc=1, output_nc=3, ngf=64, n_blocks=9):
|
| 47 |
super(ResNetGenerator, self).__init__()
|
| 48 |
|
| 49 |
-
|
| 50 |
-
|
| 51 |
-
|
| 52 |
-
|
| 53 |
-
|
| 54 |
-
|
| 55 |
-
|
| 56 |
# Downsampling
|
| 57 |
-
|
| 58 |
-
|
| 59 |
-
|
| 60 |
-
|
| 61 |
-
|
| 62 |
-
|
| 63 |
-
|
|
|
|
|
|
|
| 64 |
# ResNet blocks
|
| 65 |
-
mult = 2 ** n_downsampling
|
| 66 |
for i in range(n_blocks):
|
| 67 |
-
|
| 68 |
-
|
| 69 |
# Upsampling
|
| 70 |
-
|
| 71 |
-
|
| 72 |
-
|
| 73 |
-
|
| 74 |
-
|
| 75 |
-
|
| 76 |
-
|
| 77 |
-
|
| 78 |
-
|
| 79 |
-
|
|
|
|
| 80 |
|
| 81 |
-
# Wrap in Sequential with 'layers'
|
| 82 |
-
self.layers = nn.Sequential(*
|
| 83 |
|
| 84 |
def forward(self, input):
|
| 85 |
return self.layers(input)
|
|
|
|
| 41 |
class ResNetGenerator(nn.Module):
|
| 42 |
"""
|
| 43 |
ResNet Generator for Image Colorization
|
| 44 |
+
Simplified architecture - the exact structure is hard to reverse-engineer from state_dict.
|
| 45 |
+
This is a standard ResNet-based generator that might work with non-strict loading.
|
| 46 |
"""
|
| 47 |
def __init__(self, input_nc=1, output_nc=3, ngf=64, n_blocks=9):
|
| 48 |
super(ResNetGenerator, self).__init__()
|
| 49 |
|
| 50 |
+
# Standard ResNet generator architecture
|
| 51 |
+
model_layers = []
|
| 52 |
+
# Initial conv
|
| 53 |
+
model_layers.append(nn.Conv2d(input_nc, ngf, kernel_size=7, padding=3, bias=False))
|
| 54 |
+
model_layers.append(nn.BatchNorm2d(ngf))
|
| 55 |
+
model_layers.append(nn.ReLU(inplace=True))
|
|
|
|
| 56 |
# Downsampling
|
| 57 |
+
model_layers.append(nn.Conv2d(ngf, ngf*2, kernel_size=3, stride=2, padding=1, bias=False))
|
| 58 |
+
model_layers.append(nn.BatchNorm2d(ngf*2))
|
| 59 |
+
model_layers.append(nn.ReLU(inplace=True))
|
| 60 |
+
model_layers.append(nn.Conv2d(ngf*2, ngf*4, kernel_size=3, stride=2, padding=1, bias=False))
|
| 61 |
+
model_layers.append(nn.BatchNorm2d(ngf*4))
|
| 62 |
+
model_layers.append(nn.ReLU(inplace=True))
|
| 63 |
+
model_layers.append(nn.Conv2d(ngf*4, ngf*8, kernel_size=3, stride=2, padding=1, bias=False))
|
| 64 |
+
model_layers.append(nn.BatchNorm2d(ngf*8))
|
| 65 |
+
model_layers.append(nn.ReLU(inplace=True))
|
| 66 |
# ResNet blocks
|
|
|
|
| 67 |
for i in range(n_blocks):
|
| 68 |
+
model_layers.append(ResNetBlock(ngf*8))
|
|
|
|
| 69 |
# Upsampling
|
| 70 |
+
model_layers.append(nn.ConvTranspose2d(ngf*8, ngf*4, kernel_size=3, stride=2, padding=1, output_padding=1, bias=False))
|
| 71 |
+
model_layers.append(nn.BatchNorm2d(ngf*4))
|
| 72 |
+
model_layers.append(nn.ReLU(inplace=True))
|
| 73 |
+
model_layers.append(nn.ConvTranspose2d(ngf*4, ngf*2, kernel_size=3, stride=2, padding=1, output_padding=1, bias=False))
|
| 74 |
+
model_layers.append(nn.BatchNorm2d(ngf*2))
|
| 75 |
+
model_layers.append(nn.ReLU(inplace=True))
|
| 76 |
+
model_layers.append(nn.ConvTranspose2d(ngf*2, ngf, kernel_size=3, stride=2, padding=1, output_padding=1, bias=False))
|
| 77 |
+
model_layers.append(nn.BatchNorm2d(ngf))
|
| 78 |
+
model_layers.append(nn.ReLU(inplace=True))
|
| 79 |
+
model_layers.append(nn.Conv2d(ngf, output_nc, kernel_size=7, padding=3, bias=False))
|
| 80 |
+
model_layers.append(nn.Tanh())
|
| 81 |
|
| 82 |
+
# Wrap in Sequential with 'layers' to match state_dict
|
| 83 |
+
self.layers = nn.Sequential(*model_layers)
|
| 84 |
|
| 85 |
def forward(self, input):
|
| 86 |
return self.layers(input)
|