From 490ecf838374a913ba56535959c221ed8a58da30 Mon Sep 17 00:00:00 2001 From: milesial Date: Mon, 30 Mar 2020 20:16:55 -0700 Subject: [PATCH] Rework of the transposed conv / bilinear up route Former-commit-id: 07debeb9f2621b53ed513e5ab8a0307b4da57767 --- train.py | 4 ++-- unet/unet_model.py | 11 ++++++----- unet/unet_parts.py | 15 +++++++++------ 3 files changed, 17 insertions(+), 13 deletions(-) diff --git a/train.py b/train.py index 4aea941..ecad51d 100644 --- a/train.py +++ b/train.py @@ -154,11 +154,11 @@ if __name__ == '__main__': # - For 1 class and background, use n_classes=1 # - For 2 classes, use n_classes=1 # - For N > 2 classes, use n_classes=N - net = UNet(n_channels=3, n_classes=1) + net = UNet(n_channels=3, n_classes=1, bilinear=True) logging.info(f'Network:\n' f'\t{net.n_channels} input channels\n' f'\t{net.n_classes} output channels (classes)\n' - f'\t{"Bilinear" if net.bilinear else "Dilated conv"} upscaling') + f'\t{"Bilinear" if net.bilinear else "Transposed conv"} upscaling') if args.load: net.load_state_dict( diff --git a/unet/unet_model.py b/unet/unet_model.py index ee1f1bb..e5d2388 100644 --- a/unet/unet_model.py +++ b/unet/unet_model.py @@ -16,11 +16,12 @@ class UNet(nn.Module): self.down1 = Down(64, 128) self.down2 = Down(128, 256) self.down3 = Down(256, 512) - self.down4 = Down(512, 512) - self.up1 = Up(1024, 256, bilinear) - self.up2 = Up(512, 128, bilinear) - self.up3 = Up(256, 64, bilinear) - self.up4 = Up(128, 64, bilinear) + factor = 2 if bilinear else 1 + self.down4 = Down(512, 1024 // factor) + self.up1 = Up(1024, 512, bilinear) + self.up2 = Up(512, 256, bilinear) + self.up3 = Up(256, 128, bilinear) + self.up4 = Up(128, 64 * factor, bilinear) self.outc = OutConv(64, n_classes) def forward(self, x): diff --git a/unet/unet_parts.py b/unet/unet_parts.py index 05e1c1c..daaa2da 100644 --- a/unet/unet_parts.py +++ b/unet/unet_parts.py @@ -8,13 +8,15 @@ import torch.nn.functional as F class DoubleConv(nn.Module): """(convolution => [BN] => ReLU) * 2""" - def __init__(self, in_channels, out_channels): + def __init__(self, in_channels, out_channels, mid_channels=None): super().__init__() + if not mid_channels: + mid_channels = out_channels self.double_conv = nn.Sequential( - nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1), - nn.BatchNorm2d(out_channels), + nn.Conv2d(in_channels, mid_channels, kernel_size=3, padding=1), + nn.BatchNorm2d(mid_channels), nn.ReLU(inplace=True), - nn.Conv2d(out_channels, out_channels, kernel_size=3, padding=1), + nn.Conv2d(mid_channels, out_channels, kernel_size=3, padding=1), nn.BatchNorm2d(out_channels), nn.ReLU(inplace=True) ) @@ -46,10 +48,11 @@ class Up(nn.Module): # if bilinear, use the normal convolutions to reduce the number of channels if bilinear: self.up = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True) + self.conv = DoubleConv(in_channels, out_channels // 2, in_channels // 2) else: - self.up = nn.ConvTranspose2d(in_channels // 2, in_channels // 2, kernel_size=2, stride=2) + self.up = nn.ConvTranspose2d(in_channels , in_channels // 2, kernel_size=2, stride=2) + self.conv = DoubleConv(in_channels, out_channels) - self.conv = DoubleConv(in_channels, out_channels) def forward(self, x1, x2): x1 = self.up(x1)