Skip to content
This repository was archived by the owner on Jun 22, 2022. It is now read-only.

Commit a3561ce

Browse files
author
minerva-ml
committed
solution-1
1 parent 8ac4f9b commit a3561ce

28 files changed

+2500
-2133
lines changed
File renamed without changes.

common_blocks/architectures/__init__.py

Whitespace-only changes.
Lines changed: 197 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,197 @@
1+
import numpy as np
2+
from torch import nn
3+
from torch.nn import functional as F
4+
import torch
5+
6+
7+
class Conv2dBnRelu(nn.Module):
8+
PADDING_METHODS = {'replication': nn.ReplicationPad2d,
9+
'reflection': nn.ReflectionPad2d,
10+
'zero': nn.ZeroPad2d,
11+
}
12+
13+
def __init__(self, in_channels, out_channels, kernel_size=(3, 3),
14+
use_relu=True, use_batch_norm=True, use_padding=True, padding_method='replication'):
15+
super().__init__()
16+
self.use_relu = use_relu
17+
self.use_batch_norm = use_batch_norm
18+
self.use_padding = use_padding
19+
self.kernel_w = kernel_size[0]
20+
self.kernel_h = kernel_size[1]
21+
self.padding_w = kernel_size[0] - 1
22+
self.padding_h = kernel_size[1] - 1
23+
24+
self.batch_norm = nn.BatchNorm2d(out_channels)
25+
self.relu = nn.ReLU(inplace=True)
26+
self.padding = Conv2dBnRelu.PADDING_METHODS[padding_method](padding=(0, self.padding_h, self.padding_w, 0))
27+
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, padding=0)
28+
29+
def forward(self, x):
30+
if self.use_padding:
31+
x = self.padding(x)
32+
x = self.conv(x)
33+
if self.use_batch_norm:
34+
x = self.batch_norm(x)
35+
if self.use_relu:
36+
x = self.relu(x)
37+
return x
38+
39+
40+
class DeconvConv2dBnRelu(nn.Module):
41+
def __init__(self, in_channels, out_channels, use_relu=True, use_batch_norm=True):
42+
super().__init__()
43+
self.use_relu = use_relu
44+
self.use_batch_norm = use_batch_norm
45+
46+
self.batch_norm = nn.BatchNorm2d(out_channels)
47+
self.relu = nn.ReLU(inplace=True)
48+
self.deconv = nn.ConvTranspose2d(in_channels, out_channels, kernel_size=3,
49+
stride=2, padding=1, output_padding=1)
50+
51+
def forward(self, x):
52+
x = self.deconv(x)
53+
if self.use_batch_norm:
54+
x = self.batch_norm(x)
55+
if self.use_relu:
56+
x = self.relu(x)
57+
return x
58+
59+
60+
class NoOperation(nn.Module):
61+
def forward(self, x):
62+
return x
63+
64+
65+
class DecoderBlock(nn.Module):
66+
def __init__(self, in_channels, middle_channels, out_channels):
67+
super(DecoderBlock, self).__init__()
68+
self.conv1 = Conv2dBnRelu(in_channels, middle_channels)
69+
self.conv2 = Conv2dBnRelu(middle_channels, out_channels)
70+
self.upsample = nn.Upsample(scale_factor=2, mode='bilinear')
71+
self.relu = nn.ReLU(inplace=True)
72+
self.channel_se = ChannelSELayer(out_channels, reduction=16)
73+
self.spatial_se = SpatialSELayer(out_channels)
74+
75+
def forward(self, x, e=None):
76+
x = self.upsample(x)
77+
if e is not None:
78+
x = torch.cat([x, e], 1)
79+
x = self.conv1(x)
80+
x = self.conv2(x)
81+
82+
channel_se = self.channel_se(x)
83+
spatial_se = self.spatial_se(x)
84+
85+
x = self.relu(channel_se + spatial_se)
86+
return x
87+
88+
89+
class ChannelSELayer(nn.Module):
90+
def __init__(self, channel, reduction=16):
91+
super().__init__()
92+
self.avg_pool = nn.AdaptiveAvgPool2d(1)
93+
self.fc = nn.Sequential(
94+
nn.Linear(channel, channel // reduction),
95+
nn.ReLU(inplace=True),
96+
nn.Linear(channel // reduction, channel),
97+
nn.Sigmoid()
98+
)
99+
100+
def forward(self, x):
101+
b, c, _, _ = x.size()
102+
y = self.avg_pool(x).view(b, c)
103+
y = self.fc(y).view(b, c, 1, 1)
104+
return x * y
105+
106+
107+
class SpatialSELayer(nn.Module):
108+
def __init__(self, channels):
109+
super().__init__()
110+
self.fc = nn.Conv2d(channels, 1, kernel_size=1)
111+
self.sigmoid = nn.Sigmoid()
112+
113+
def forward(self, x):
114+
module_input = x
115+
x = self.fc(x)
116+
x = self.sigmoid(x)
117+
return module_input * x
118+
119+
120+
class DepthChannelExcitation(nn.Module):
121+
def __init__(self, channels):
122+
super().__init__()
123+
124+
self.fc = nn.Sequential(nn.Linear(1, channels),
125+
nn.Sigmoid()
126+
)
127+
128+
def forward(self, x, d=None):
129+
b, c, _, _ = x.size()
130+
y = self.fc(d).view(b, c, 1, 1)
131+
return x * y
132+
133+
134+
class DepthSpatialExcitation(nn.Module):
135+
def __init__(self, grid_size=16):
136+
super().__init__()
137+
self.grid_size = grid_size
138+
self.grid_size_sqrt = int(np.sqrt(grid_size))
139+
140+
self.fc = nn.Sequential(nn.Linear(1, grid_size),
141+
nn.Sigmoid()
142+
)
143+
144+
def forward(self, x, d=None):
145+
b, _, h, w = x.size()
146+
y = self.fc(d).view(b, 1, self.grid_size_sqrt, self.grid_size_sqrt)
147+
scale_factor = h // self.grid_size_sqrt
148+
y = F.upsample(y, scale_factor=scale_factor, mode='bilinear')
149+
return x * y
150+
151+
152+
class GlobalConvolutionalNetwork(nn.Module):
153+
def __init__(self, in_channels, out_channels, kernel_size, use_relu=False):
154+
super().__init__()
155+
156+
self.conv1 = nn.Sequential(Conv2dBnRelu(in_channels=in_channels,
157+
out_channels=out_channels,
158+
kernel_size=(kernel_size, 1),
159+
use_relu=use_relu, use_padding=True),
160+
Conv2dBnRelu(in_channels=out_channels,
161+
out_channels=out_channels,
162+
kernel_size=(1, kernel_size),
163+
use_relu=use_relu, use_padding=True),
164+
)
165+
self.conv2 = nn.Sequential(Conv2dBnRelu(in_channels=in_channels,
166+
out_channels=out_channels,
167+
kernel_size=(1, kernel_size),
168+
use_relu=use_relu, use_padding=True),
169+
Conv2dBnRelu(in_channels=out_channels,
170+
out_channels=out_channels,
171+
kernel_size=(kernel_size, 1),
172+
use_relu=use_relu, use_padding=True),
173+
)
174+
175+
def forward(self, x):
176+
conv1 = self.conv1(x)
177+
conv2 = self.conv2(x)
178+
return conv1 + conv2
179+
180+
181+
class BoundaryRefinement(nn.Module):
182+
def __init__(self, in_channels, out_channels, kernel_size):
183+
super().__init__()
184+
185+
self.conv = nn.Sequential(Conv2dBnRelu(in_channels=in_channels,
186+
out_channels=out_channels,
187+
kernel_size=(kernel_size, kernel_size),
188+
use_relu=True, use_padding=True),
189+
Conv2dBnRelu(in_channels=in_channels,
190+
out_channels=out_channels,
191+
kernel_size=(kernel_size, kernel_size),
192+
use_relu=False, use_padding=True),
193+
)
194+
195+
def forward(self, x):
196+
conv = self.conv(x)
197+
return x + conv
Lines changed: 164 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,164 @@
1+
from torch import nn
2+
import torchvision
3+
import pretrainedmodels
4+
5+
6+
class ResNetEncoders(nn.Module):
7+
def __init__(self, encoder_depth, pretrained=False, pool0=False):
8+
super().__init__()
9+
10+
if encoder_depth == 18:
11+
self.encoder = torchvision.models.resnet18(pretrained=pretrained)
12+
elif encoder_depth == 34:
13+
self.encoder = torchvision.models.resnet34(pretrained=pretrained)
14+
elif encoder_depth == 50:
15+
self.encoder = torchvision.models.resnet50(pretrained=pretrained)
16+
elif encoder_depth == 101:
17+
self.encoder = torchvision.models.resnet101(pretrained=pretrained)
18+
elif encoder_depth == 152:
19+
self.encoder = torchvision.models.resnet152(pretrained=pretrained)
20+
else:
21+
raise NotImplementedError('only 18, 34, 50, 101, 152 version of Resnet are implemented')
22+
23+
if pool0:
24+
self.conv1 = nn.Sequential(self.encoder.conv1,
25+
self.encoder.bn1,
26+
self.encoder.relu,
27+
self.encoder.maxpool)
28+
else:
29+
self.conv1 = nn.Sequential(self.encoder.conv1,
30+
self.encoder.bn1,
31+
self.encoder.relu)
32+
33+
self.encoder2 = self.encoder.layer1
34+
self.encoder3 = self.encoder.layer2
35+
self.encoder4 = self.encoder.layer3
36+
self.encoder5 = self.encoder.layer4
37+
38+
def forward(self, x):
39+
conv1 = self.conv1(x)
40+
encoder2 = self.encoder2(conv1)
41+
encoder3 = self.encoder3(encoder2)
42+
encoder4 = self.encoder4(encoder3)
43+
encoder5 = self.encoder5(encoder4)
44+
45+
return encoder2, encoder3, encoder4, encoder5
46+
47+
48+
class SeResNetEncoders(nn.Module):
49+
def __init__(self, encoder_depth, pretrained='imagenet', pool0=False):
50+
super().__init__()
51+
52+
if encoder_depth == 50:
53+
self.encoder = pretrainedmodels.__dict__['se_resnet50'](num_classes=1000, pretrained=pretrained)
54+
elif encoder_depth == 101:
55+
self.encoder = pretrainedmodels.__dict__['se_resnet101'](num_classes=1000, pretrained=pretrained)
56+
elif encoder_depth == 152:
57+
self.encoder = pretrainedmodels.__dict__['se_resnet152'](num_classes=1000, pretrained=pretrained)
58+
else:
59+
raise NotImplementedError('only 50, 101, 152 version of Resnet are implemented')
60+
61+
if pool0:
62+
self.conv1 = nn.Sequential(self.encoder.layer0.conv1,
63+
self.encoder.layer0.bn1,
64+
self.encoder.layer0.relu1,
65+
self.encoder.layer0.pool0)
66+
else:
67+
self.conv1 = nn.Sequential(self.encoder.layer0.conv1,
68+
self.encoder.layer0.bn1,
69+
self.encoder.layer0.relu1)
70+
71+
self.encoder2 = self.encoder.layer1
72+
self.encoder3 = self.encoder.layer2
73+
self.encoder4 = self.encoder.layer3
74+
self.encoder5 = self.encoder.layer4
75+
76+
def forward(self, x):
77+
conv1 = self.conv1(x)
78+
encoder2 = self.encoder2(conv1)
79+
encoder3 = self.encoder3(encoder2)
80+
encoder4 = self.encoder4(encoder3)
81+
encoder5 = self.encoder5(encoder4)
82+
83+
return encoder2, encoder3, encoder4, encoder5
84+
85+
86+
class SeResNetXtEncoders(nn.Module):
87+
def __init__(self, encoder_depth, pretrained='imagenet', pool0=False):
88+
super().__init__()
89+
90+
if encoder_depth == 50:
91+
self.encoder = pretrainedmodels.__dict__['se_resnext50_32x4d'](num_classes=1000, pretrained=pretrained)
92+
elif encoder_depth == 101:
93+
self.encoder = pretrainedmodels.__dict__['se_resnext101_32x4d'](num_classes=1000, pretrained=pretrained)
94+
else:
95+
raise NotImplementedError('only 50, 101 version of Resnet are implemented')
96+
if pool0:
97+
self.conv1 = nn.Sequential(self.encoder.layer0.conv1,
98+
self.encoder.layer0.bn1,
99+
self.encoder.layer0.relu1,
100+
self.encoder.layer0.pool0)
101+
else:
102+
self.conv1 = nn.Sequential(self.encoder.layer0.conv1,
103+
self.encoder.layer0.bn1,
104+
self.encoder.layer0.relu1)
105+
106+
self.encoder2 = self.encoder.layer1
107+
self.encoder3 = self.encoder.layer2
108+
self.encoder4 = self.encoder.layer3
109+
self.encoder5 = self.encoder.layer4
110+
111+
def forward(self, x):
112+
conv1 = self.conv1(x)
113+
encoder2 = self.encoder2(conv1)
114+
encoder3 = self.encoder3(encoder2)
115+
encoder4 = self.encoder4(encoder3)
116+
encoder5 = self.encoder5(encoder4)
117+
118+
return encoder2, encoder3, encoder4, encoder5
119+
120+
121+
class DenseNetEncoders(nn.Module):
122+
def __init__(self, encoder_depth, pretrained='imagenet', pool0=False):
123+
super().__init__()
124+
125+
if encoder_depth == 121:
126+
self.encoder = pretrainedmodels.__dict__['densenet121'](num_classes=1000, pretrained=pretrained)
127+
elif encoder_depth == 161:
128+
self.encoder = pretrainedmodels.__dict__['densenet161'](num_classes=1000, pretrained=pretrained)
129+
elif encoder_depth == 169:
130+
self.encoder = pretrainedmodels.__dict__['densenet169'](num_classes=1000, pretrained=pretrained)
131+
elif encoder_depth == 201:
132+
self.encoder = pretrainedmodels.__dict__['densenet201'](num_classes=1000, pretrained=pretrained)
133+
else:
134+
raise NotImplementedError('only 121, 161, 169, 201 version of Densenet are implemented')
135+
136+
if pool0:
137+
self.conv1 = nn.Sequential(self.encoder.features.conv0,
138+
self.encoder.features.norm0,
139+
self.encoder.features.relu0,
140+
self.encoder.features.pool0)
141+
else:
142+
self.conv1 = nn.Sequential(self.encoder.features.conv0,
143+
self.encoder.features.norm0,
144+
self.encoder.features.relu0)
145+
146+
self.encoder2 = self.encoder.features.denseblock1
147+
self.transition1 = self.encoder.features.transition1
148+
self.encoder3 = self.encoder.features.denseblock2
149+
self.transition2 = self.encoder.features.transition2
150+
self.encoder4 = self.encoder.features.denseblock3
151+
self.transition3 = self.encoder.features.transition3
152+
self.encoder5 = self.encoder.features.denseblock4
153+
154+
def forward(self, x):
155+
conv1 = self.conv1(x)
156+
encoder2 = self.encoder2(conv1)
157+
transition1 = self.transition1(encoder2)
158+
encoder3 = self.encoder3(transition1)
159+
transition2 = self.transition2(encoder3)
160+
encoder4 = self.encoder4(transition2)
161+
transition3 = self.transition3(encoder4)
162+
encoder5 = self.encoder5(transition3)
163+
164+
return encoder2, encoder3, encoder4, encoder5

0 commit comments

Comments
 (0)