update
This commit is contained in:
parent
8e5c90481a
commit
c60695ab1b
|
@ -1,13 +1,3 @@
|
||||||
"""
|
|
||||||
@Author: Yue Wang
|
|
||||||
@Contact: yuewangx@mit.edu
|
|
||||||
@File: data.py
|
|
||||||
@Time: 2018/10/13 6:21 PM
|
|
||||||
Modified by
|
|
||||||
@Author: Tiange Xiang
|
|
||||||
@Contact: txia7609@uni.sydney.edu.au
|
|
||||||
@Time: 2021/1/21 3:10 PM
|
|
||||||
"""
|
|
||||||
|
|
||||||
|
|
||||||
import os
|
import os
|
||||||
|
|
|
@ -1,367 +1,440 @@
|
||||||
"""
|
|
||||||
Usage:
|
|
||||||
python main.py --model CurveNet --exp_name=demo1
|
|
||||||
|
|
||||||
@Author: An Tao
|
|
||||||
@Contact: ta19@mails.tsinghua.edu.cn
|
|
||||||
@File: main_partseg.py
|
|
||||||
@Time: 2019/12/31 11:17 AM
|
|
||||||
Modified by
|
|
||||||
@Author: Tiange Xiang
|
|
||||||
@Contact: txia7609@uni.sydney.edu.au
|
|
||||||
@Time: 2021/01/21 3:10 PM
|
|
||||||
"""
|
|
||||||
|
|
||||||
|
|
||||||
from __future__ import print_function
|
from __future__ import print_function
|
||||||
import os
|
import os
|
||||||
import datetime
|
|
||||||
import argparse
|
import argparse
|
||||||
import torch
|
import torch
|
||||||
import torch.nn as nn
|
|
||||||
import torch.nn.functional as F
|
|
||||||
import torch.optim as optim
|
import torch.optim as optim
|
||||||
from torch.optim.lr_scheduler import CosineAnnealingLR, StepLR, MultiStepLR
|
from torch.optim.lr_scheduler import CosineAnnealingLR, StepLR
|
||||||
from data import ShapeNetPart
|
from util.data_util import PartNormalDataset
|
||||||
import models as models
|
import torch.nn.functional as F
|
||||||
|
import torch.nn as nn
|
||||||
|
import model as models
|
||||||
import numpy as np
|
import numpy as np
|
||||||
from torch.utils.data import DataLoader
|
from torch.utils.data import DataLoader
|
||||||
from util import cal_loss, IOStream
|
from util.util import to_categorical, compute_overall_iou, IOStream
|
||||||
import sklearn.metrics as metrics
|
from tqdm import tqdm
|
||||||
|
from collections import defaultdict
|
||||||
|
from torch.autograd import Variable
|
||||||
|
import random
|
||||||
|
|
||||||
|
|
||||||
|
classes_str = ['aero','bag','cap','car','chair','ear','guitar','knife','lamp','lapt','moto','mug','Pistol','rock','stake','table']
|
||||||
|
|
||||||
seg_num = [4, 2, 2, 4, 4, 3, 3, 2, 4, 2, 6, 2, 3, 3, 3, 3]
|
|
||||||
index_start = [0, 4, 6, 8, 12, 16, 19, 22, 24, 28, 30, 36, 38, 41, 44, 47]
|
|
||||||
|
|
||||||
def _init_():
|
def _init_():
|
||||||
# fix random seed
|
|
||||||
if args.seed is not None:
|
|
||||||
torch.manual_seed(args.seed)
|
|
||||||
np.random.seed(args.seed)
|
|
||||||
torch.cuda.manual_seed_all(args.seed)
|
|
||||||
torch.cuda.manual_seed(args.seed)
|
|
||||||
torch.set_printoptions(10)
|
|
||||||
torch.backends.cudnn.benchmark = False
|
|
||||||
torch.backends.cudnn.deterministic = True
|
|
||||||
os.environ['PYTHONHASHSEED'] = str(args.seed)
|
|
||||||
|
|
||||||
# prepare file structures
|
|
||||||
if not os.path.exists('checkpoints'):
|
if not os.path.exists('checkpoints'):
|
||||||
os.makedirs('checkpoints')
|
os.makedirs('checkpoints')
|
||||||
if not os.path.exists('checkpoints/' + args.exp_name):
|
if not os.path.exists('checkpoints/' + args.exp_name):
|
||||||
os.makedirs('checkpoints/' + args.exp_name)
|
os.makedirs('checkpoints/' + args.exp_name)
|
||||||
if not os.path.exists('checkpoints/'+args.exp_name+'/'+'models'):
|
|
||||||
os.makedirs('checkpoints/'+args.exp_name+'/'+'models')
|
|
||||||
|
|
||||||
|
|
||||||
def calculate_shape_IoU(pred_np, seg_np, label, class_choice, eva=False):
|
def weight_init(m):
|
||||||
label = label.squeeze()
|
if isinstance(m, torch.nn.Linear):
|
||||||
shape_ious = []
|
torch.nn.init.xavier_normal_(m.weight)
|
||||||
category = {}
|
if m.bias is not None:
|
||||||
for shape_idx in range(seg_np.shape[0]):
|
torch.nn.init.constant_(m.bias, 0)
|
||||||
if not class_choice:
|
elif isinstance(m, torch.nn.Conv2d):
|
||||||
start_index = index_start[label[shape_idx]]
|
torch.nn.init.xavier_normal_(m.weight)
|
||||||
num = seg_num[label[shape_idx]]
|
if m.bias is not None:
|
||||||
parts = range(start_index, start_index + num)
|
torch.nn.init.constant_(m.bias, 0)
|
||||||
else:
|
elif isinstance(m, torch.nn.Conv1d):
|
||||||
parts = range(seg_num[label[0]])
|
torch.nn.init.xavier_normal_(m.weight)
|
||||||
part_ious = []
|
if m.bias is not None:
|
||||||
for part in parts:
|
torch.nn.init.constant_(m.bias, 0)
|
||||||
I = np.sum(np.logical_and(pred_np[shape_idx] == part, seg_np[shape_idx] == part))
|
elif isinstance(m, torch.nn.BatchNorm2d):
|
||||||
U = np.sum(np.logical_or(pred_np[shape_idx] == part, seg_np[shape_idx] == part))
|
torch.nn.init.constant_(m.weight, 1)
|
||||||
if U == 0:
|
torch.nn.init.constant_(m.bias, 0)
|
||||||
iou = 1 # If the union of groundtruth and prediction points is empty, then count part IoU as 1
|
elif isinstance(m, torch.nn.BatchNorm1d):
|
||||||
else:
|
torch.nn.init.constant_(m.weight, 1)
|
||||||
iou = I / float(U)
|
torch.nn.init.constant_(m.bias, 0)
|
||||||
part_ious.append(iou)
|
|
||||||
shape_ious.append(np.mean(part_ious))
|
|
||||||
if label[shape_idx] not in category:
|
|
||||||
category[label[shape_idx]] = [shape_ious[-1]]
|
|
||||||
else:
|
|
||||||
category[label[shape_idx]].append(shape_ious[-1])
|
|
||||||
|
|
||||||
if eva:
|
|
||||||
return shape_ious, category
|
|
||||||
else:
|
|
||||||
return shape_ious
|
|
||||||
|
|
||||||
def train(args, io):
|
def train(args, io):
|
||||||
train_dataset = ShapeNetPart(partition='trainval', num_points=args.num_points, class_choice=args.class_choice)
|
|
||||||
if (len(train_dataset) < 100):
|
|
||||||
drop_last = False
|
|
||||||
else:
|
|
||||||
drop_last = True
|
|
||||||
train_loader = DataLoader(train_dataset, num_workers=8, batch_size=args.batch_size, shuffle=True, drop_last=drop_last, pin_memory=True)
|
|
||||||
test_loader = DataLoader(ShapeNetPart(partition='test', num_points=args.num_points, class_choice=args.class_choice),
|
|
||||||
num_workers=8, batch_size=args.test_batch_size, shuffle=False, drop_last=False, pin_memory=True)
|
|
||||||
|
|
||||||
|
# ============= Model ===================
|
||||||
|
num_part = 50
|
||||||
device = torch.device("cuda" if args.cuda else "cpu")
|
device = torch.device("cuda" if args.cuda else "cpu")
|
||||||
io.cprint("Let's use " + str(torch.cuda.device_count()) + " GPUs!")
|
|
||||||
|
|
||||||
seg_num_all = train_loader.dataset.seg_num_all
|
model = models.__dict__[args.model](num_part).to(device)
|
||||||
seg_start_index = train_loader.dataset.seg_start_index
|
|
||||||
|
|
||||||
# create model
|
|
||||||
model = models.__dict__[args.model]().to(device)
|
|
||||||
io.cprint(str(model))
|
io.cprint(str(model))
|
||||||
model = nn.DataParallel(model)
|
|
||||||
|
|
||||||
|
model.apply(weight_init)
|
||||||
|
model = nn.DataParallel(model)
|
||||||
|
print("Let's use", torch.cuda.device_count(), "GPUs!")
|
||||||
|
|
||||||
|
'''Resume or not'''
|
||||||
|
if args.resume:
|
||||||
|
state_dict = torch.load("checkpoints/%s/best_insiou_model.pth" % args.exp_name,
|
||||||
|
map_location=torch.device('cpu'))['model']
|
||||||
|
for k in state_dict.keys():
|
||||||
|
if 'module' not in k:
|
||||||
|
from collections import OrderedDict
|
||||||
|
new_state_dict = OrderedDict()
|
||||||
|
for k in state_dict:
|
||||||
|
new_state_dict['module.' + k] = state_dict[k]
|
||||||
|
state_dict = new_state_dict
|
||||||
|
break
|
||||||
|
model.load_state_dict(state_dict)
|
||||||
|
|
||||||
|
print("Resume training model...")
|
||||||
|
print(torch.load("checkpoints/%s/best_insiou_model.pth" % args.exp_name).keys())
|
||||||
|
else:
|
||||||
|
print("Training from scratch...")
|
||||||
|
|
||||||
|
# =========== Dataloader =================
|
||||||
|
train_data = PartNormalDataset(npoints=2048, split='trainval', normalize=False)
|
||||||
|
print("The number of training data is:%d", len(train_data))
|
||||||
|
|
||||||
|
test_data = PartNormalDataset(npoints=2048, split='test', normalize=False)
|
||||||
|
print("The number of test data is:%d", len(test_data))
|
||||||
|
|
||||||
|
train_loader = DataLoader(train_data, batch_size=args.batch_size, shuffle=True, num_workers=args.workers,
|
||||||
|
drop_last=True)
|
||||||
|
|
||||||
|
test_loader = DataLoader(test_data, batch_size=args.test_batch_size, shuffle=False, num_workers=args.workers,
|
||||||
|
drop_last=False)
|
||||||
|
|
||||||
|
# ============= Optimizer ================
|
||||||
if args.use_sgd:
|
if args.use_sgd:
|
||||||
print("Use SGD")
|
print("Use SGD")
|
||||||
opt = optim.SGD(model.parameters(), lr=args.lr*100, momentum=args.momentum, weight_decay=1e-4)
|
opt = optim.SGD(model.parameters(), lr=args.lr*100, momentum=args.momentum, weight_decay=0)
|
||||||
else:
|
else:
|
||||||
print("Use Adam")
|
print("Use Adam")
|
||||||
opt = optim.Adam(model.parameters(), lr=args.lr, weight_decay=1e-4)
|
opt = optim.Adam(model.parameters(), lr=args.lr, betas=(0.9, 0.999), eps=1e-08, weight_decay=0)
|
||||||
|
|
||||||
if args.scheduler == 'cos':
|
if args.scheduler == 'cos':
|
||||||
if args.use_sgd:
|
print("Use CosLR")
|
||||||
eta_min = args.lr/5.0
|
scheduler = CosineAnnealingLR(opt, args.epochs, eta_min=args.lr if args.use_sgd else args.lr / 100)
|
||||||
else:
|
else:
|
||||||
eta_min = args.lr/100.0
|
print("Use StepLR")
|
||||||
scheduler = CosineAnnealingLR(opt, args.epochs, eta_min=eta_min)
|
scheduler = StepLR(opt, step_size=args.step, gamma=0.5)
|
||||||
elif args.scheduler == 'step':
|
|
||||||
scheduler = MultiStepLR(opt, [140, 180], gamma=0.1)
|
# ============= Training =================
|
||||||
criterion = cal_loss
|
best_acc = 0
|
||||||
|
best_class_iou = 0
|
||||||
|
best_instance_iou = 0
|
||||||
|
num_part = 50
|
||||||
|
num_classes = 16
|
||||||
|
|
||||||
best_test_iou = 0
|
|
||||||
for epoch in range(args.epochs):
|
for epoch in range(args.epochs):
|
||||||
####################
|
|
||||||
# Train
|
train_epoch(train_loader, model, opt, scheduler, epoch, num_part, num_classes, io)
|
||||||
####################
|
|
||||||
train_time_cost = datetime.datetime.now()
|
test_metrics, total_per_cat_iou = test_epoch(test_loader, model, epoch, num_part, num_classes, io)
|
||||||
|
|
||||||
|
# 1. when get the best accuracy, save the model:
|
||||||
|
if test_metrics['accuracy'] > best_acc:
|
||||||
|
best_acc = test_metrics['accuracy']
|
||||||
|
io.cprint('Max Acc:%.5f' % best_acc)
|
||||||
|
state = {
|
||||||
|
'model': model.module.state_dict() if torch.cuda.device_count() > 1 else model.state_dict(),
|
||||||
|
'optimizer': opt.state_dict(), 'epoch': epoch, 'test_acc': best_acc}
|
||||||
|
torch.save(state, 'checkpoints/%s/best_acc_model.pth' % args.exp_name)
|
||||||
|
|
||||||
|
# 2. when get the best instance_iou, save the model:
|
||||||
|
if test_metrics['shape_avg_iou'] > best_instance_iou:
|
||||||
|
best_instance_iou = test_metrics['shape_avg_iou']
|
||||||
|
io.cprint('Max instance iou:%.5f' % best_instance_iou)
|
||||||
|
state = {
|
||||||
|
'model': model.module.state_dict() if torch.cuda.device_count() > 1 else model.state_dict(),
|
||||||
|
'optimizer': opt.state_dict(), 'epoch': epoch, 'test_instance_iou': best_instance_iou}
|
||||||
|
torch.save(state, 'checkpoints/%s/best_insiou_model.pth' % args.exp_name)
|
||||||
|
|
||||||
|
# 3. when get the best class_iou, save the model:
|
||||||
|
# first we need to calculate the average per-class iou
|
||||||
|
class_iou = 0
|
||||||
|
for cat_idx in range(16):
|
||||||
|
class_iou += total_per_cat_iou[cat_idx]
|
||||||
|
avg_class_iou = class_iou / 16
|
||||||
|
if avg_class_iou > best_class_iou:
|
||||||
|
best_class_iou = avg_class_iou
|
||||||
|
# print the iou of each class:
|
||||||
|
for cat_idx in range(16):
|
||||||
|
io.cprint(classes_str[cat_idx] + ' iou: ' + str(total_per_cat_iou[cat_idx]))
|
||||||
|
io.cprint('Max class iou:%.5f' % best_class_iou)
|
||||||
|
state = {
|
||||||
|
'model': model.module.state_dict() if torch.cuda.device_count() > 1 else model.state_dict(),
|
||||||
|
'optimizer': opt.state_dict(), 'epoch': epoch, 'test_class_iou': best_class_iou}
|
||||||
|
torch.save(state, 'checkpoints/%s/best_clsiou_model.pth' % args.exp_name)
|
||||||
|
|
||||||
|
# report best acc, ins_iou, cls_iou
|
||||||
|
io.cprint('Final Max Acc:%.5f' % best_acc)
|
||||||
|
io.cprint('Final Max instance iou:%.5f' % best_instance_iou)
|
||||||
|
io.cprint('Final Max class iou:%.5f' % best_class_iou)
|
||||||
|
# save last model
|
||||||
|
state = {
|
||||||
|
'model': model.module.state_dict() if torch.cuda.device_count() > 1 else model.state_dict(),
|
||||||
|
'optimizer': opt.state_dict(), 'epoch': args.epochs - 1, 'test_iou': best_instance_iou}
|
||||||
|
torch.save(state, 'checkpoints/%s/model_ep%d.pth' % (args.exp_name, args.epochs))
|
||||||
|
|
||||||
|
|
||||||
|
def train_epoch(train_loader, model, opt, scheduler, epoch, num_part, num_classes, io):
|
||||||
train_loss = 0.0
|
train_loss = 0.0
|
||||||
count = 0.0
|
count = 0.0
|
||||||
|
accuracy = []
|
||||||
|
shape_ious = 0.0
|
||||||
|
metrics = defaultdict(lambda: list())
|
||||||
model.train()
|
model.train()
|
||||||
train_true_cls = []
|
|
||||||
train_pred_cls = []
|
for batch_id, (points, label, target, norm_plt) in tqdm(enumerate(train_loader), total=len(train_loader), smoothing=0.9):
|
||||||
train_true_seg = []
|
batch_size, num_point, _ = points.size()
|
||||||
train_pred_seg = []
|
points, label, target, norm_plt = Variable(points.float()), Variable(label.long()), Variable(target.long()), \
|
||||||
train_label_seg = []
|
Variable(norm_plt.float())
|
||||||
for data, label, seg in train_loader:
|
points = points.transpose(2, 1)
|
||||||
seg = seg - seg_start_index
|
norm_plt = norm_plt.transpose(2, 1)
|
||||||
label_one_hot = np.zeros((label.shape[0], 16))
|
points, label, target, norm_plt = points.cuda(non_blocking=True), label.squeeze(1).cuda(non_blocking=True), \
|
||||||
for idx in range(label.shape[0]):
|
target.cuda(non_blocking=True), norm_plt.cuda(non_blocking=True)
|
||||||
label_one_hot[idx, label[idx]] = 1
|
# target: b,n
|
||||||
label_one_hot = torch.from_numpy(label_one_hot.astype(np.float32))
|
seg_pred = model(points, norm_plt, to_categorical(label, num_classes)) # seg_pred: b,n,50
|
||||||
data, label_one_hot, seg = data.to(device), label_one_hot.to(device), seg.to(device)
|
loss = F.nll_loss(seg_pred.contiguous().view(-1, num_part), target.view(-1, 1)[:, 0])
|
||||||
data = data.permute(0, 2, 1)
|
|
||||||
batch_size = data.size()[0]
|
# instance iou without considering the class average at each batch_size:
|
||||||
|
batch_shapeious = compute_overall_iou(seg_pred, target, num_part) # list of of current batch_iou:[iou1,iou2,...,iou#b_size]
|
||||||
|
# total iou of current batch in each process:
|
||||||
|
batch_shapeious = seg_pred.new_tensor([np.sum(batch_shapeious)], dtype=torch.float64) # same device with seg_pred!!!
|
||||||
|
|
||||||
|
# Loss backward
|
||||||
|
loss = torch.mean(loss)
|
||||||
opt.zero_grad()
|
opt.zero_grad()
|
||||||
seg_pred = model(data, label_one_hot)
|
|
||||||
seg_pred = seg_pred.permute(0, 2, 1).contiguous()
|
|
||||||
loss = criterion(seg_pred.view(-1, seg_num_all), seg.view(-1,1).squeeze())
|
|
||||||
loss.backward()
|
loss.backward()
|
||||||
torch.nn.utils.clip_grad_norm_(model.parameters(), 1)
|
|
||||||
opt.step()
|
opt.step()
|
||||||
pred = seg_pred.max(dim=2)[1] # (batch_size, num_points)
|
|
||||||
count += batch_size
|
# accuracy
|
||||||
|
seg_pred = seg_pred.contiguous().view(-1, num_part) # b*n,50
|
||||||
|
target = target.view(-1, 1)[:, 0] # b*n
|
||||||
|
pred_choice = seg_pred.contiguous().data.max(1)[1] # b*n
|
||||||
|
correct = pred_choice.eq(target.contiguous().data).sum() # torch.int64: total number of correct-predict pts
|
||||||
|
|
||||||
|
# sum
|
||||||
|
shape_ious += batch_shapeious.item() # count the sum of ious in each iteration
|
||||||
|
count += batch_size # count the total number of samples in each iteration
|
||||||
train_loss += loss.item() * batch_size
|
train_loss += loss.item() * batch_size
|
||||||
seg_np = seg.cpu().numpy() # (batch_size, num_points)
|
accuracy.append(correct.item()/(batch_size * num_point)) # append the accuracy of each iteration
|
||||||
pred_np = pred.detach().cpu().numpy() # (batch_size, num_points)
|
|
||||||
train_true_cls.append(seg_np.reshape(-1)) # (batch_size * num_points)
|
# Note: We do not need to calculate per_class iou during training
|
||||||
train_pred_cls.append(pred_np.reshape(-1)) # (batch_size * num_points)
|
|
||||||
train_true_seg.append(seg_np)
|
|
||||||
train_pred_seg.append(pred_np)
|
|
||||||
train_label_seg.append(label.reshape(-1))
|
|
||||||
if args.scheduler == 'cos':
|
if args.scheduler == 'cos':
|
||||||
scheduler.step()
|
scheduler.step()
|
||||||
elif args.scheduler == 'step':
|
elif args.scheduler == 'step':
|
||||||
if opt.param_groups[0]['lr'] > 1e-5:
|
if opt.param_groups[0]['lr'] > 0.9e-5:
|
||||||
scheduler.step()
|
scheduler.step()
|
||||||
if opt.param_groups[0]['lr'] < 1e-5:
|
if opt.param_groups[0]['lr'] < 0.9e-5:
|
||||||
for param_group in opt.param_groups:
|
for param_group in opt.param_groups:
|
||||||
param_group['lr'] = 1e-5
|
param_group['lr'] = 0.9e-5
|
||||||
train_true_cls = np.concatenate(train_true_cls)
|
io.cprint('Learning rate: %f' % opt.param_groups[0]['lr'])
|
||||||
train_pred_cls = np.concatenate(train_pred_cls)
|
|
||||||
train_acc = metrics.accuracy_score(train_true_cls, train_pred_cls)
|
|
||||||
avg_per_class_acc = metrics.balanced_accuracy_score(train_true_cls, train_pred_cls)
|
|
||||||
train_true_seg = np.concatenate(train_true_seg, axis=0)
|
|
||||||
train_pred_seg = np.concatenate(train_pred_seg, axis=0)
|
|
||||||
train_label_seg = np.concatenate(train_label_seg)
|
|
||||||
train_ious = calculate_shape_IoU(train_pred_seg, train_true_seg, train_label_seg, args.class_choice)
|
|
||||||
train_time_cost = int((datetime.datetime.now() - train_time_cost).total_seconds())
|
|
||||||
outstr = 'Train %d, loss: %.6f, train acc: %.6f, train avg acc: %.6f, train iou: %.6f' % (epoch,
|
|
||||||
train_loss*1.0/count,
|
|
||||||
train_acc,
|
|
||||||
avg_per_class_acc,
|
|
||||||
np.mean(train_ious))
|
|
||||||
io.cprint(outstr)
|
|
||||||
io.cprint(f"Training time: {train_time_cost} seconds.")
|
|
||||||
|
|
||||||
####################
|
metrics['accuracy'] = np.mean(accuracy)
|
||||||
# Test
|
metrics['shape_avg_iou'] = shape_ious * 1.0 / count
|
||||||
####################
|
|
||||||
test_time_cost = datetime.datetime.now()
|
outstr = 'Train %d, loss: %f, train acc: %f, train ins_iou: %f' % (epoch+1, train_loss * 1.0 / count,
|
||||||
|
metrics['accuracy'], metrics['shape_avg_iou'])
|
||||||
|
io.cprint(outstr)
|
||||||
|
|
||||||
|
|
||||||
|
def test_epoch(test_loader, model, epoch, num_part, num_classes, io):
|
||||||
test_loss = 0.0
|
test_loss = 0.0
|
||||||
count = 0.0
|
count = 0.0
|
||||||
|
accuracy = []
|
||||||
|
shape_ious = 0.0
|
||||||
|
final_total_per_cat_iou = np.zeros(16).astype(np.float32)
|
||||||
|
final_total_per_cat_seen = np.zeros(16).astype(np.int32)
|
||||||
|
metrics = defaultdict(lambda: list())
|
||||||
model.eval()
|
model.eval()
|
||||||
test_true_cls = []
|
|
||||||
test_pred_cls = []
|
# label_size: b, means each sample has one corresponding class
|
||||||
test_true_seg = []
|
for batch_id, (points, label, target, norm_plt) in tqdm(enumerate(test_loader), total=len(test_loader), smoothing=0.9):
|
||||||
test_pred_seg = []
|
batch_size, num_point, _ = points.size()
|
||||||
test_label_seg = []
|
points, label, target, norm_plt = Variable(points.float()), Variable(label.long()), Variable(target.long()), \
|
||||||
for data, label, seg in test_loader:
|
Variable(norm_plt.float())
|
||||||
seg = seg - seg_start_index
|
points = points.transpose(2, 1)
|
||||||
label_one_hot = np.zeros((label.shape[0], 16))
|
norm_plt = norm_plt.transpose(2, 1)
|
||||||
for idx in range(label.shape[0]):
|
points, label, target, norm_plt = points.cuda(non_blocking=True), label.squeeze(1).cuda(non_blocking=True), \
|
||||||
label_one_hot[idx, label[idx]] = 1
|
target.cuda(non_blocking=True), norm_plt.cuda(non_blocking=True)
|
||||||
label_one_hot = torch.from_numpy(label_one_hot.astype(np.float32))
|
seg_pred = model(points, norm_plt, to_categorical(label, num_classes)) # b,n,50
|
||||||
data, label_one_hot, seg = data.to(device), label_one_hot.to(device), seg.to(device)
|
|
||||||
data = data.permute(0, 2, 1)
|
# instance iou without considering the class average at each batch_size:
|
||||||
batch_size = data.size()[0]
|
batch_shapeious = compute_overall_iou(seg_pred, target, num_part) # [b]
|
||||||
seg_pred = model(data, label_one_hot)
|
# per category iou at each batch_size:
|
||||||
seg_pred = seg_pred.permute(0, 2, 1).contiguous()
|
|
||||||
loss = criterion(seg_pred.view(-1, seg_num_all), seg.view(-1,1).squeeze())
|
for shape_idx in range(seg_pred.size(0)): # sample_idx
|
||||||
pred = seg_pred.max(dim=2)[1]
|
cur_gt_label = label[shape_idx] # label[sample_idx], denotes current sample belongs to which cat
|
||||||
count += batch_size
|
final_total_per_cat_iou[cur_gt_label] += batch_shapeious[shape_idx] # add the iou belongs to this cat
|
||||||
|
final_total_per_cat_seen[cur_gt_label] += 1 # count the number of this cat is chosen
|
||||||
|
|
||||||
|
# total iou of current batch in each process:
|
||||||
|
batch_ious = seg_pred.new_tensor([np.sum(batch_shapeious)], dtype=torch.float64) # same device with seg_pred!!!
|
||||||
|
|
||||||
|
# prepare seg_pred and target for later calculating loss and acc:
|
||||||
|
seg_pred = seg_pred.contiguous().view(-1, num_part)
|
||||||
|
target = target.view(-1, 1)[:, 0]
|
||||||
|
# Loss
|
||||||
|
loss = F.nll_loss(seg_pred.contiguous(), target.contiguous())
|
||||||
|
|
||||||
|
# accuracy:
|
||||||
|
pred_choice = seg_pred.data.max(1)[1] # b*n
|
||||||
|
correct = pred_choice.eq(target.data).sum() # torch.int64: total number of correct-predict pts
|
||||||
|
|
||||||
|
loss = torch.mean(loss)
|
||||||
|
shape_ious += batch_ious.item() # count the sum of ious in each iteration
|
||||||
|
count += batch_size # count the total number of samples in each iteration
|
||||||
test_loss += loss.item() * batch_size
|
test_loss += loss.item() * batch_size
|
||||||
seg_np = seg.cpu().numpy()
|
accuracy.append(correct.item() / (batch_size * num_point)) # append the accuracy of each iteration
|
||||||
pred_np = pred.detach().cpu().numpy()
|
|
||||||
test_true_cls.append(seg_np.reshape(-1))
|
for cat_idx in range(16):
|
||||||
test_pred_cls.append(pred_np.reshape(-1))
|
if final_total_per_cat_seen[cat_idx] > 0: # indicating this cat is included during previous iou appending
|
||||||
test_true_seg.append(seg_np)
|
final_total_per_cat_iou[cat_idx] = final_total_per_cat_iou[cat_idx] / final_total_per_cat_seen[cat_idx] # avg class iou across all samples
|
||||||
test_pred_seg.append(pred_np)
|
|
||||||
test_label_seg.append(label.reshape(-1))
|
metrics['accuracy'] = np.mean(accuracy)
|
||||||
test_true_cls = np.concatenate(test_true_cls)
|
metrics['shape_avg_iou'] = shape_ious * 1.0 / count
|
||||||
test_pred_cls = np.concatenate(test_pred_cls)
|
|
||||||
test_acc = metrics.accuracy_score(test_true_cls, test_pred_cls)
|
outstr = 'Test %d, loss: %f, test acc: %f test ins_iou: %f' % (epoch + 1, test_loss * 1.0 / count,
|
||||||
avg_per_class_acc = metrics.balanced_accuracy_score(test_true_cls, test_pred_cls)
|
metrics['accuracy'], metrics['shape_avg_iou'])
|
||||||
test_true_seg = np.concatenate(test_true_seg, axis=0)
|
|
||||||
test_pred_seg = np.concatenate(test_pred_seg, axis=0)
|
|
||||||
test_label_seg = np.concatenate(test_label_seg)
|
|
||||||
test_ious = calculate_shape_IoU(test_pred_seg, test_true_seg, test_label_seg, args.class_choice)
|
|
||||||
test_time_cost = int((datetime.datetime.now() - test_time_cost).total_seconds())
|
|
||||||
outstr = 'Test %d, loss: %.6f, test acc: %.6f, test avg acc: %.6f, test iou: %.6f, best iou %.6f' % (epoch,
|
|
||||||
test_loss*1.0/count,
|
|
||||||
test_acc,
|
|
||||||
avg_per_class_acc,
|
|
||||||
np.mean(test_ious), best_test_iou)
|
|
||||||
io.cprint(outstr)
|
io.cprint(outstr)
|
||||||
io.cprint(f"Testing time: {test_time_cost} seconds.")
|
|
||||||
if np.mean(test_ious) >= best_test_iou:
|
return metrics, final_total_per_cat_iou
|
||||||
best_test_iou = np.mean(test_ious)
|
|
||||||
torch.save(model.state_dict(), 'checkpoints/%s/models/model.t7' % args.exp_name)
|
|
||||||
|
|
||||||
|
|
||||||
def test(args, io):
|
def test(args, io):
|
||||||
test_loader = DataLoader(ShapeNetPart(partition='test', num_points=args.num_points, class_choice=args.class_choice),
|
# Dataloader
|
||||||
batch_size=args.test_batch_size, shuffle=True, drop_last=False)
|
test_data = PartNormalDataset(npoints=2048, split='test', normalize=False)
|
||||||
|
print("The number of test data is:%d", len(test_data))
|
||||||
|
|
||||||
device = torch.device("cuda" if args.cuda else "cpu")
|
test_loader = DataLoader(test_data, batch_size=args.test_batch_size, shuffle=False, num_workers=args.workers,
|
||||||
|
drop_last=False)
|
||||||
|
|
||||||
# Try to load models
|
# Try to load models
|
||||||
seg_start_index = test_loader.dataset.seg_start_index
|
num_part = 50
|
||||||
model = models.__dict__[args.model]().to(device)
|
device = torch.device("cuda" if args.cuda else "cpu")
|
||||||
model = nn.DataParallel(model)
|
|
||||||
model.load_state_dict(torch.load(args.model_path))
|
|
||||||
|
|
||||||
model = model.eval()
|
model = models.__dict__[args.model](num_part).to(device)
|
||||||
test_acc = 0.0
|
io.cprint(str(model))
|
||||||
test_true_cls = []
|
|
||||||
test_pred_cls = []
|
|
||||||
test_true_seg = []
|
|
||||||
test_pred_seg = []
|
|
||||||
test_label_seg = []
|
|
||||||
category = {}
|
|
||||||
for data, label, seg in test_loader:
|
|
||||||
seg = seg - seg_start_index
|
|
||||||
label_one_hot = np.zeros((label.shape[0], 16))
|
|
||||||
for idx in range(label.shape[0]):
|
|
||||||
label_one_hot[idx, label[idx]] = 1
|
|
||||||
label_one_hot = torch.from_numpy(label_one_hot.astype(np.float32))
|
|
||||||
data, label_one_hot, seg = data.to(device), label_one_hot.to(device), seg.to(device)
|
|
||||||
data = data.permute(0, 2, 1)
|
|
||||||
seg_pred = model(data, label_one_hot)
|
|
||||||
seg_pred = seg_pred.permute(0, 2, 1).contiguous()
|
|
||||||
pred = seg_pred.max(dim=2)[1]
|
|
||||||
seg_np = seg.cpu().numpy()
|
|
||||||
pred_np = pred.detach().cpu().numpy()
|
|
||||||
test_true_cls.append(seg_np.reshape(-1))
|
|
||||||
test_pred_cls.append(pred_np.reshape(-1))
|
|
||||||
test_true_seg.append(seg_np)
|
|
||||||
test_pred_seg.append(pred_np)
|
|
||||||
test_label_seg.append(label.reshape(-1))
|
|
||||||
|
|
||||||
test_true_cls = np.concatenate(test_true_cls)
|
from collections import OrderedDict
|
||||||
test_pred_cls = np.concatenate(test_pred_cls)
|
state_dict = torch.load("checkpoints/%s/best_%s_model.pth" % (args.exp_name, args.model_type),
|
||||||
test_acc = metrics.accuracy_score(test_true_cls, test_pred_cls)
|
map_location=torch.device('cpu'))['model']
|
||||||
avg_per_class_acc = metrics.balanced_accuracy_score(test_true_cls, test_pred_cls)
|
|
||||||
test_true_seg = np.concatenate(test_true_seg, axis=0)
|
new_state_dict = OrderedDict()
|
||||||
test_pred_seg = np.concatenate(test_pred_seg, axis=0)
|
for layer in state_dict:
|
||||||
test_label_seg = np.concatenate(test_label_seg)
|
new_state_dict[layer.replace('module.', '')] = state_dict[layer]
|
||||||
test_ious,category = calculate_shape_IoU(test_pred_seg, test_true_seg, test_label_seg, args.class_choice, eva=True)
|
model.load_state_dict(new_state_dict)
|
||||||
outstr = 'Test :: test acc: %.6f, test avg acc: %.6f, test iou: %.6f' % (test_acc,
|
|
||||||
avg_per_class_acc,
|
model.eval()
|
||||||
np.mean(test_ious))
|
num_part = 50
|
||||||
|
num_classes = 16
|
||||||
|
metrics = defaultdict(lambda: list())
|
||||||
|
hist_acc = []
|
||||||
|
shape_ious = []
|
||||||
|
total_per_cat_iou = np.zeros((16)).astype(np.float32)
|
||||||
|
total_per_cat_seen = np.zeros((16)).astype(np.int32)
|
||||||
|
|
||||||
|
for batch_id, (points, label, target, norm_plt) in tqdm(enumerate(test_loader), total=len(test_loader), smoothing=0.9):
|
||||||
|
batch_size, num_point, _ = points.size()
|
||||||
|
points, label, target, norm_plt = Variable(points.float()), Variable(label.long()), Variable(target.long()), Variable(norm_plt.float())
|
||||||
|
points = points.transpose(2, 1)
|
||||||
|
norm_plt = norm_plt.transpose(2, 1)
|
||||||
|
points, label, target, norm_plt = points.cuda(non_blocking=True), label.squeeze().cuda(
|
||||||
|
non_blocking=True), target.cuda(non_blocking=True), norm_plt.cuda(non_blocking=True)
|
||||||
|
|
||||||
|
with torch.no_grad():
|
||||||
|
seg_pred = model(points, norm_plt, to_categorical(label, num_classes)) # b,n,50
|
||||||
|
|
||||||
|
# instance iou without considering the class average at each batch_size:
|
||||||
|
batch_shapeious = compute_overall_iou(seg_pred, target, num_part) # [b]
|
||||||
|
shape_ious += batch_shapeious # iou +=, equals to .append
|
||||||
|
|
||||||
|
# per category iou at each batch_size:
|
||||||
|
for shape_idx in range(seg_pred.size(0)): # sample_idx
|
||||||
|
cur_gt_label = label[shape_idx] # label[sample_idx]
|
||||||
|
total_per_cat_iou[cur_gt_label] += batch_shapeious[shape_idx]
|
||||||
|
total_per_cat_seen[cur_gt_label] += 1
|
||||||
|
|
||||||
|
# accuracy:
|
||||||
|
seg_pred = seg_pred.contiguous().view(-1, num_part)
|
||||||
|
target = target.view(-1, 1)[:, 0]
|
||||||
|
pred_choice = seg_pred.data.max(1)[1]
|
||||||
|
correct = pred_choice.eq(target.data).cpu().sum()
|
||||||
|
metrics['accuracy'].append(correct.item() / (batch_size * num_point))
|
||||||
|
|
||||||
|
hist_acc += metrics['accuracy']
|
||||||
|
metrics['accuracy'] = np.mean(hist_acc)
|
||||||
|
metrics['shape_avg_iou'] = np.mean(shape_ious)
|
||||||
|
for cat_idx in range(16):
|
||||||
|
if total_per_cat_seen[cat_idx] > 0:
|
||||||
|
total_per_cat_iou[cat_idx] = total_per_cat_iou[cat_idx] / total_per_cat_seen[cat_idx]
|
||||||
|
|
||||||
|
# First we need to calculate the iou of each class and the avg class iou:
|
||||||
|
class_iou = 0
|
||||||
|
for cat_idx in range(16):
|
||||||
|
class_iou += total_per_cat_iou[cat_idx]
|
||||||
|
io.cprint(classes_str[cat_idx] + ' iou: ' + str(total_per_cat_iou[cat_idx])) # print the iou of each class
|
||||||
|
avg_class_iou = class_iou / 16
|
||||||
|
outstr = 'Test :: test acc: %f test class mIOU: %f, test instance mIOU: %f' % (metrics['accuracy'], avg_class_iou, metrics['shape_avg_iou'])
|
||||||
io.cprint(outstr)
|
io.cprint(outstr)
|
||||||
results = []
|
|
||||||
for key in category.keys():
|
|
||||||
results.append((int(key), np.mean(category[key]), len(category[key])))
|
|
||||||
results.sort(key=lambda x:x[0])
|
|
||||||
for re in results:
|
|
||||||
io.cprint('idx: %d mIoU: %.3f num: %d' % (re[0], re[1], re[2]))
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
# Training settings
|
# Training settings
|
||||||
parser = argparse.ArgumentParser(description='Point Cloud Part Segmentation')
|
parser = argparse.ArgumentParser(description='3D Shape Part Segmentation')
|
||||||
parser.add_argument('--model', type=str, default='CurveNet')
|
parser.add_argument('--model', type=str, default='PointMLP1')
|
||||||
parser.add_argument('--exp_name', type=str, default='exp', metavar='N',
|
parser.add_argument('--exp_name', type=str, default='demo1', metavar='N',
|
||||||
help='Name of the experiment')
|
help='Name of the experiment')
|
||||||
parser.add_argument('--dataset', type=str, default='shapenetpart', metavar='N',
|
|
||||||
choices=['shapenetpart'])
|
|
||||||
parser.add_argument('--class_choice', type=str, default=None, metavar='N',
|
|
||||||
choices=['airplane', 'bag', 'cap', 'car', 'chair',
|
|
||||||
'earphone', 'guitar', 'knife', 'lamp', 'laptop',
|
|
||||||
'motor', 'mug', 'pistol', 'rocket', 'skateboard', 'table'])
|
|
||||||
parser.add_argument('--batch_size', type=int, default=32, metavar='batch_size',
|
parser.add_argument('--batch_size', type=int, default=32, metavar='batch_size',
|
||||||
help='Size of batch)')
|
help='Size of batch)')
|
||||||
parser.add_argument('--test_batch_size', type=int, default=16, metavar='batch_size',
|
parser.add_argument('--test_batch_size', type=int, default=32, metavar='batch_size',
|
||||||
help='Size of batch)')
|
help='Size of batch)')
|
||||||
parser.add_argument('--epochs', type=int, default=200, metavar='N',
|
parser.add_argument('--epochs', type=int, default=350, metavar='N',
|
||||||
help='number of episode to train')
|
help='number of episode to train')
|
||||||
parser.add_argument('--seed', type=int)
|
parser.add_argument('--use_sgd', type=bool, default=False,
|
||||||
parser.add_argument('--use_sgd', type=bool, default=True,
|
|
||||||
help='Use SGD')
|
help='Use SGD')
|
||||||
parser.add_argument('--lr', type=float, default=0.0005, metavar='LR',
|
parser.add_argument('--scheduler', type=str, default='step',
|
||||||
help='learning rate (default: 0.001, 0.1 if using sgd)')
|
help='lr scheduler')
|
||||||
|
parser.add_argument('--step', type=int, default=40,
|
||||||
|
help='lr decay step')
|
||||||
|
parser.add_argument('--lr', type=float, default=0.003, metavar='LR',
|
||||||
|
help='learning rate')
|
||||||
parser.add_argument('--momentum', type=float, default=0.9, metavar='M',
|
parser.add_argument('--momentum', type=float, default=0.9, metavar='M',
|
||||||
help='SGD momentum (default: 0.9)')
|
help='SGD momentum (default: 0.9)')
|
||||||
parser.add_argument('--scheduler', type=str, default='step', metavar='N',
|
|
||||||
choices=['cos', 'step'],
|
|
||||||
help='Scheduler to use, [cos, step]')
|
|
||||||
parser.add_argument('--no_cuda', type=bool, default=False,
|
parser.add_argument('--no_cuda', type=bool, default=False,
|
||||||
help='enables CUDA training')
|
help='enables CUDA training')
|
||||||
|
parser.add_argument('--manual_seed', type=int, metavar='S',
|
||||||
|
help='random seed (default: 1)')
|
||||||
parser.add_argument('--eval', type=bool, default=False,
|
parser.add_argument('--eval', type=bool, default=False,
|
||||||
help='evaluate the model')
|
help='evaluate the model')
|
||||||
parser.add_argument('--num_points', type=int, default=2048,
|
parser.add_argument('--num_points', type=int, default=2048,
|
||||||
help='num of points to use')
|
help='num of points to use')
|
||||||
parser.add_argument('--model_path', type=str, default='', metavar='N',
|
parser.add_argument('--workers', type=int, default=12)
|
||||||
help='Pretrained model path')
|
parser.add_argument('--resume', type=bool, default=False,
|
||||||
|
help='Resume training or not')
|
||||||
|
parser.add_argument('--model_type', type=str, default='insiou',
|
||||||
|
help='choose to test the best insiou/clsiou/acc model (options: insiou, clsiou, acc)')
|
||||||
|
|
||||||
args = parser.parse_args()
|
args = parser.parse_args()
|
||||||
time_str = str(datetime.datetime.now().strftime('-%Y%m%d%H%M%S'))
|
|
||||||
if args.exp_name is None:
|
|
||||||
args.exp_name = time_str
|
|
||||||
args.exp_name = args.model+"_"+args.exp_name
|
args.exp_name = args.model+"_"+args.exp_name
|
||||||
|
|
||||||
_init_()
|
_init_()
|
||||||
|
|
||||||
if args.eval:
|
if not args.eval:
|
||||||
io = IOStream('checkpoints/' + args.exp_name + '/eval.log')
|
io = IOStream('checkpoints/' + args.exp_name + '/%s_train.log' % (args.exp_name))
|
||||||
else:
|
else:
|
||||||
io = IOStream('checkpoints/' + args.exp_name + '/run.log')
|
io = IOStream('checkpoints/' + args.exp_name + '/%s_test.log' % (args.exp_name))
|
||||||
io.cprint(str(args))
|
io.cprint(str(args))
|
||||||
io.cprint('random seed is: ' + str(args.seed))
|
|
||||||
|
if args.manual_seed is not None:
|
||||||
|
random.seed(args.manual_seed)
|
||||||
|
np.random.seed(args.manual_seed)
|
||||||
|
torch.manual_seed(args.manual_seed)
|
||||||
|
|
||||||
args.cuda = not args.no_cuda and torch.cuda.is_available()
|
args.cuda = not args.no_cuda and torch.cuda.is_available()
|
||||||
|
|
||||||
if args.cuda:
|
if args.cuda:
|
||||||
io.cprint(
|
io.cprint('Using GPU')
|
||||||
'Using GPU : ' + str(torch.cuda.current_device()) + ' from ' + str(torch.cuda.device_count()) + ' devices')
|
if args.manual_seed is not None:
|
||||||
|
torch.cuda.manual_seed(args.manual_seed)
|
||||||
|
torch.cuda.manual_seed_all(args.manual_seed)
|
||||||
else:
|
else:
|
||||||
io.cprint('Using CPU')
|
io.cprint('Using CPU')
|
||||||
|
|
||||||
if not args.eval:
|
if not args.eval:
|
||||||
train(args, io)
|
train(args, io)
|
||||||
else:
|
else:
|
||||||
with torch.no_grad():
|
|
||||||
test(args, io)
|
test(args, io)
|
||||||
|
|
|
@ -1,9 +1,3 @@
|
||||||
"""
|
|
||||||
@Author: Yue Wang
|
|
||||||
@Contact: yuewangx@mit.edu
|
|
||||||
@File: util
|
|
||||||
@Time: 4/5/19 3:47 PM
|
|
||||||
"""
|
|
||||||
|
|
||||||
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
|
|
Loading…
Reference in a new issue