2019-10-24 19:37:21 +00:00
|
|
|
import argparse
|
|
|
|
import logging
|
2018-06-08 17:27:32 +00:00
|
|
|
import os
|
2019-10-24 19:37:21 +00:00
|
|
|
import sys
|
2018-04-09 03:15:24 +00:00
|
|
|
|
2019-10-24 19:37:21 +00:00
|
|
|
import numpy as np
|
2017-08-17 19:16:19 +00:00
|
|
|
import torch
|
2017-08-19 08:59:51 +00:00
|
|
|
import torch.nn as nn
|
2018-04-09 03:15:24 +00:00
|
|
|
from torch import optim
|
2019-10-24 19:37:21 +00:00
|
|
|
from tqdm import tqdm
|
2017-08-17 19:16:19 +00:00
|
|
|
|
2017-08-19 08:59:51 +00:00
|
|
|
from eval import eval_net
|
2017-11-30 05:45:19 +00:00
|
|
|
from unet import UNet
|
2019-10-24 19:37:21 +00:00
|
|
|
|
2019-11-23 16:56:14 +00:00
|
|
|
from torch.utils.tensorboard import SummaryWriter
|
2019-11-23 13:22:42 +00:00
|
|
|
from utils.dataset import BasicDataset
|
|
|
|
from torch.utils.data import DataLoader, random_split
|
|
|
|
|
2019-10-24 19:37:21 +00:00
|
|
|
dir_img = 'data/imgs/'
|
|
|
|
dir_mask = 'data/masks/'
|
|
|
|
dir_checkpoint = 'checkpoints/'
|
|
|
|
|
2017-08-17 19:16:19 +00:00
|
|
|
|
2018-06-08 17:27:32 +00:00
|
|
|
def train_net(net,
|
2019-10-24 19:37:21 +00:00
|
|
|
device,
|
2018-06-08 17:27:32 +00:00
|
|
|
epochs=5,
|
|
|
|
batch_size=1,
|
2020-03-16 04:38:51 +00:00
|
|
|
lr=0.001,
|
2019-11-23 16:56:14 +00:00
|
|
|
val_percent=0.1,
|
2018-06-08 17:27:32 +00:00
|
|
|
save_cp=True,
|
|
|
|
img_scale=0.5):
|
2017-08-17 19:16:19 +00:00
|
|
|
|
2019-11-23 13:22:42 +00:00
|
|
|
dataset = BasicDataset(dir_img, dir_mask, img_scale)
|
|
|
|
n_val = int(len(dataset) * val_percent)
|
|
|
|
n_train = len(dataset) - n_val
|
|
|
|
train, val = random_split(dataset, [n_train, n_val])
|
2019-11-23 16:56:14 +00:00
|
|
|
train_loader = DataLoader(train, batch_size=batch_size, shuffle=True, num_workers=8, pin_memory=True)
|
2020-03-13 03:21:16 +00:00
|
|
|
val_loader = DataLoader(val, batch_size=batch_size, shuffle=False, num_workers=8, pin_memory=True, drop_last=True)
|
2019-11-23 16:56:14 +00:00
|
|
|
|
|
|
|
writer = SummaryWriter(comment=f'LR_{lr}_BS_{batch_size}_SCALE_{img_scale}')
|
|
|
|
global_step = 0
|
2017-08-17 19:16:19 +00:00
|
|
|
|
2019-10-24 19:37:21 +00:00
|
|
|
logging.info(f'''Starting training:
|
|
|
|
Epochs: {epochs}
|
|
|
|
Batch size: {batch_size}
|
|
|
|
Learning rate: {lr}
|
2019-11-23 13:22:42 +00:00
|
|
|
Training size: {n_train}
|
|
|
|
Validation size: {n_val}
|
2019-10-24 19:37:21 +00:00
|
|
|
Checkpoints: {save_cp}
|
|
|
|
Device: {device.type}
|
|
|
|
Images scaling: {img_scale}
|
|
|
|
''')
|
|
|
|
|
2020-03-16 04:38:51 +00:00
|
|
|
optimizer = optim.RMSprop(net.parameters(), lr=lr, weight_decay=1e-8, momentum=0.9)
|
|
|
|
scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'min' if net.n_classes > 1 else 'max', patience=2)
|
2019-10-30 18:54:57 +00:00
|
|
|
if net.n_classes > 1:
|
|
|
|
criterion = nn.CrossEntropyLoss()
|
|
|
|
else:
|
|
|
|
criterion = nn.BCEWithLogitsLoss()
|
2017-08-17 19:16:19 +00:00
|
|
|
|
|
|
|
for epoch in range(epochs):
|
2018-09-26 06:58:49 +00:00
|
|
|
net.train()
|
2018-04-09 03:15:24 +00:00
|
|
|
|
2017-08-17 19:16:19 +00:00
|
|
|
epoch_loss = 0
|
2019-10-24 19:37:21 +00:00
|
|
|
with tqdm(total=n_train, desc=f'Epoch {epoch + 1}/{epochs}', unit='img') as pbar:
|
2019-11-23 13:22:42 +00:00
|
|
|
for batch in train_loader:
|
|
|
|
imgs = batch['image']
|
|
|
|
true_masks = batch['mask']
|
|
|
|
assert imgs.shape[1] == net.n_channels, \
|
|
|
|
f'Network has been defined with {net.n_channels} input channels, ' \
|
|
|
|
f'but loaded images have {imgs.shape[1]} channels. Please check that ' \
|
|
|
|
'the images are loaded correctly.'
|
|
|
|
|
|
|
|
imgs = imgs.to(device=device, dtype=torch.float32)
|
2019-12-13 16:36:12 +00:00
|
|
|
mask_type = torch.float32 if net.n_classes == 1 else torch.long
|
|
|
|
true_masks = true_masks.to(device=device, dtype=mask_type)
|
2017-08-17 19:16:19 +00:00
|
|
|
|
2019-10-24 19:37:21 +00:00
|
|
|
masks_pred = net(imgs)
|
|
|
|
loss = criterion(masks_pred, true_masks)
|
|
|
|
epoch_loss += loss.item()
|
2019-11-23 16:56:14 +00:00
|
|
|
writer.add_scalar('Loss/train', loss.item(), global_step)
|
2017-08-17 19:16:19 +00:00
|
|
|
|
2019-10-24 19:37:21 +00:00
|
|
|
pbar.set_postfix(**{'loss (batch)': loss.item()})
|
2017-08-17 19:16:19 +00:00
|
|
|
|
2019-10-24 19:37:21 +00:00
|
|
|
optimizer.zero_grad()
|
|
|
|
loss.backward()
|
2020-03-16 04:38:51 +00:00
|
|
|
nn.utils.clip_grad_value_(net.parameters(), 0.1)
|
2019-10-24 19:37:21 +00:00
|
|
|
optimizer.step()
|
2017-08-19 08:59:51 +00:00
|
|
|
|
2019-11-23 16:56:14 +00:00
|
|
|
pbar.update(imgs.shape[0])
|
|
|
|
global_step += 1
|
|
|
|
if global_step % (len(dataset) // (10 * batch_size)) == 0:
|
2020-03-16 04:38:51 +00:00
|
|
|
for tag, value in net.named_parameters():
|
|
|
|
tag = tag.replace('.', '/')
|
|
|
|
writer.add_histogram('weights/' + tag, value.data.cpu().numpy(), global_step)
|
|
|
|
writer.add_histogram('grads/' + tag, value.grad.data.cpu().numpy(), global_step)
|
2020-03-11 08:06:23 +00:00
|
|
|
val_score = eval_net(net, val_loader, device)
|
2020-03-16 04:38:51 +00:00
|
|
|
scheduler.step(val_score)
|
|
|
|
writer.add_scalar('learning_rate', optimizer.param_groups[0]['lr'], global_step)
|
|
|
|
|
2019-11-23 16:56:14 +00:00
|
|
|
if net.n_classes > 1:
|
|
|
|
logging.info('Validation cross entropy: {}'.format(val_score))
|
|
|
|
writer.add_scalar('Loss/test', val_score, global_step)
|
|
|
|
else:
|
|
|
|
logging.info('Validation Dice Coeff: {}'.format(val_score))
|
|
|
|
writer.add_scalar('Dice/test', val_score, global_step)
|
|
|
|
|
|
|
|
writer.add_images('images', imgs, global_step)
|
|
|
|
if net.n_classes == 1:
|
|
|
|
writer.add_images('masks/true', true_masks, global_step)
|
|
|
|
writer.add_images('masks/pred', torch.sigmoid(masks_pred) > 0.5, global_step)
|
2018-06-08 17:27:32 +00:00
|
|
|
|
|
|
|
if save_cp:
|
2019-10-24 19:37:21 +00:00
|
|
|
try:
|
|
|
|
os.mkdir(dir_checkpoint)
|
|
|
|
logging.info('Created checkpoint directory')
|
|
|
|
except OSError:
|
|
|
|
pass
|
2017-08-17 19:16:19 +00:00
|
|
|
torch.save(net.state_dict(),
|
2019-10-24 19:37:21 +00:00
|
|
|
dir_checkpoint + f'CP_epoch{epoch + 1}.pth')
|
|
|
|
logging.info(f'Checkpoint {epoch + 1} saved !')
|
2017-08-17 19:16:19 +00:00
|
|
|
|
2019-11-23 16:56:14 +00:00
|
|
|
writer.close()
|
2017-08-17 19:16:19 +00:00
|
|
|
|
2018-06-08 17:27:32 +00:00
|
|
|
|
|
|
|
def get_args():
|
2019-10-24 19:37:21 +00:00
|
|
|
parser = argparse.ArgumentParser(description='Train the UNet on images and target masks',
|
|
|
|
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
|
|
|
|
parser.add_argument('-e', '--epochs', metavar='E', type=int, default=5,
|
|
|
|
help='Number of epochs', dest='epochs')
|
|
|
|
parser.add_argument('-b', '--batch-size', metavar='B', type=int, nargs='?', default=1,
|
|
|
|
help='Batch size', dest='batchsize')
|
|
|
|
parser.add_argument('-l', '--learning-rate', metavar='LR', type=float, nargs='?', default=0.1,
|
|
|
|
help='Learning rate', dest='lr')
|
|
|
|
parser.add_argument('-f', '--load', dest='load', type=str, default=False,
|
|
|
|
help='Load model from a .pth file')
|
|
|
|
parser.add_argument('-s', '--scale', dest='scale', type=float, default=0.5,
|
|
|
|
help='Downscaling factor of the images')
|
2019-11-23 16:56:14 +00:00
|
|
|
parser.add_argument('-v', '--validation', dest='val', type=float, default=10.0,
|
2019-10-24 19:37:21 +00:00
|
|
|
help='Percent of the data that is used as validation (0-100)')
|
|
|
|
|
|
|
|
return parser.parse_args()
|
|
|
|
|
|
|
|
|
2018-06-08 17:27:32 +00:00
|
|
|
if __name__ == '__main__':
|
2019-10-24 19:37:21 +00:00
|
|
|
logging.basicConfig(level=logging.INFO, format='%(levelname)s: %(message)s')
|
2018-06-08 17:27:32 +00:00
|
|
|
args = get_args()
|
2019-10-24 19:37:21 +00:00
|
|
|
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
|
|
|
logging.info(f'Using device {device}')
|
|
|
|
|
|
|
|
# Change here to adapt to your data
|
|
|
|
# n_channels=3 for RGB images
|
|
|
|
# n_classes is the number of probabilities you want to get per pixel
|
|
|
|
# - For 1 class and background, use n_classes=1
|
|
|
|
# - For 2 classes, use n_classes=1
|
|
|
|
# - For N > 2 classes, use n_classes=N
|
2018-06-08 17:27:32 +00:00
|
|
|
net = UNet(n_channels=3, n_classes=1)
|
2019-10-24 19:37:21 +00:00
|
|
|
logging.info(f'Network:\n'
|
|
|
|
f'\t{net.n_channels} input channels\n'
|
|
|
|
f'\t{net.n_classes} output channels (classes)\n'
|
|
|
|
f'\t{"Bilinear" if net.bilinear else "Dilated conv"} upscaling')
|
2017-08-19 08:59:51 +00:00
|
|
|
|
2018-06-08 17:27:32 +00:00
|
|
|
if args.load:
|
2019-10-24 19:37:21 +00:00
|
|
|
net.load_state_dict(
|
|
|
|
torch.load(args.load, map_location=device)
|
|
|
|
)
|
|
|
|
logging.info(f'Model loaded from {args.load}')
|
|
|
|
|
|
|
|
net.to(device=device)
|
|
|
|
# faster convolutions, but more memory
|
|
|
|
# cudnn.benchmark = True
|
|
|
|
|
2017-08-19 08:59:51 +00:00
|
|
|
try:
|
2019-12-21 21:04:23 +00:00
|
|
|
train_net(net=net,
|
|
|
|
epochs=args.epochs,
|
|
|
|
batch_size=args.batchsize,
|
|
|
|
lr=args.lr,
|
|
|
|
device=device,
|
|
|
|
img_scale=args.scale,
|
|
|
|
val_percent=args.val / 100)
|
|
|
|
except KeyboardInterrupt:
|
|
|
|
torch.save(net.state_dict(), 'INTERRUPTED.pth')
|
|
|
|
logging.info('Saved interrupt')
|
|
|
|
try:
|
|
|
|
sys.exit(0)
|
|
|
|
except SystemExit:
|
|
|
|
os._exit(0)
|