Merge pull request #141 from whenyd/master

Apply sigmoid before calc dice in eval_net()

Former-commit-id: 32ee0fe21170e98f3edb51f8639d8b26d7ce3475
This commit is contained in:
milesial 2020-03-14 15:02:50 -07:00 committed by GitHub
commit 54ba0e5d54
2 changed files with 15 additions and 15 deletions

26
eval.py
View file

@ -5,28 +5,28 @@ from tqdm import tqdm
from dice_loss import dice_coeff from dice_loss import dice_coeff
def eval_net(net, loader, device, n_val): def eval_net(net, loader, device):
"""Evaluation without the densecrf with the dice coefficient""" """Evaluation without the densecrf with the dice coefficient"""
net.eval() net.eval()
mask_type = torch.float32 if net.n_classes == 1 else torch.long
n_val = len(loader) # the number of batch
tot = 0 tot = 0
with tqdm(total=n_val, desc='Validation round', unit='img', leave=False) as pbar: with tqdm(total=n_val, desc='Validation round', unit='batch', leave=False) as pbar:
for batch in loader: for batch in loader:
imgs = batch['image'] imgs, true_masks = batch['image'], batch['mask']
true_masks = batch['mask']
imgs = imgs.to(device=device, dtype=torch.float32) imgs = imgs.to(device=device, dtype=torch.float32)
mask_type = torch.float32 if net.n_classes == 1 else torch.long
true_masks = true_masks.to(device=device, dtype=mask_type) true_masks = true_masks.to(device=device, dtype=mask_type)
mask_pred = net(imgs) with torch.no_grad():
mask_pred = net(imgs)
for true_mask, pred in zip(true_masks, mask_pred): if net.n_classes > 1:
tot += F.cross_entropy(mask_pred, true_masks).item()
else:
pred = torch.sigmoid(mask_pred)
pred = (pred > 0.5).float() pred = (pred > 0.5).float()
if net.n_classes > 1: tot += dice_coeff(pred, true_masks).item()
tot += F.cross_entropy(pred.unsqueeze(dim=0), true_mask.unsqueeze(dim=0)).item() pbar.update()
else:
tot += dice_coeff(pred, true_mask.squeeze(dim=1)).item()
pbar.update(imgs.shape[0])
return tot / n_val return tot / n_val

View file

@ -35,7 +35,7 @@ def train_net(net,
n_train = len(dataset) - n_val n_train = len(dataset) - n_val
train, val = random_split(dataset, [n_train, n_val]) train, val = random_split(dataset, [n_train, n_val])
train_loader = DataLoader(train, batch_size=batch_size, shuffle=True, num_workers=8, pin_memory=True) train_loader = DataLoader(train, batch_size=batch_size, shuffle=True, num_workers=8, pin_memory=True)
val_loader = DataLoader(val, batch_size=batch_size, shuffle=False, num_workers=8, pin_memory=True) val_loader = DataLoader(val, batch_size=batch_size, shuffle=False, num_workers=8, pin_memory=True, drop_last=True)
writer = SummaryWriter(comment=f'LR_{lr}_BS_{batch_size}_SCALE_{img_scale}') writer = SummaryWriter(comment=f'LR_{lr}_BS_{batch_size}_SCALE_{img_scale}')
global_step = 0 global_step = 0
@ -88,7 +88,7 @@ def train_net(net,
pbar.update(imgs.shape[0]) pbar.update(imgs.shape[0])
global_step += 1 global_step += 1
if global_step % (len(dataset) // (10 * batch_size)) == 0: if global_step % (len(dataset) // (10 * batch_size)) == 0:
val_score = eval_net(net, val_loader, device, n_val) val_score = eval_net(net, val_loader, device)
if net.n_classes > 1: if net.n_classes > 1:
logging.info('Validation cross entropy: {}'.format(val_score)) logging.info('Validation cross entropy: {}'.format(val_score))
writer.add_scalar('Loss/test', val_score, global_step) writer.add_scalar('Loss/test', val_score, global_step)