Corrections

This commit is contained in:
HuguesTHOMAS 2020-04-24 12:19:12 -04:00
parent 5eb4482209
commit f4da047017
4 changed files with 1 additions and 1054 deletions

View file

@ -1,502 +0,0 @@
#
#
# 0=================================0
# | Kernel Point Convolutions |
# 0=================================0
#
#
# ----------------------------------------------------------------------------------------------------------------------
#
# Class handling SemanticKitti dataset.
# Implements a Dataset, a Sampler, and a collate_fn
#
# ----------------------------------------------------------------------------------------------------------------------
#
# Hugues THOMAS - 11/06/2018
#
# ----------------------------------------------------------------------------------------------------------------------
#
# Imports and global variables
# \**********************************/
#
# Common libs
import sys
import struct
import scipy
import time
import numpy as np
import pickle
import torch
import yaml
#from mayavi import mlab
from multiprocessing import Lock
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
# OS functions
from os import listdir
from os.path import exists, join, isdir, getsize
# Dataset parent class
from datasets.common import *
from torch.utils.data import Sampler, get_worker_info
from utils.mayavi_visu import *
from utils.metrics import fast_confusion
from datasets.common import grid_subsampling
from utils.config import bcolors
def ssc_to_homo(ssc, ssc_in_radians=True):
# Convert 6-DOF ssc coordinate transformation to 4x4 homogeneous matrix
# transformation
if ssc.ndim == 1:
reduce = True
ssc = np.expand_dims(ssc, 0)
else:
reduce = False
if not ssc_in_radians:
ssc[:, 3:] = np.pi / 180.0 * ssc[:, 3:]
sr = np.sin(ssc[:, 3])
cr = np.cos(ssc[:, 3])
sp = np.sin(ssc[:, 4])
cp = np.cos(ssc[:, 4])
sh = np.sin(ssc[:, 5])
ch = np.cos(ssc[:, 5])
H = np.zeros((ssc.shape[0], 4, 4))
H[:, 0, 0] = ch*cp
H[:, 0, 1] = -sh*cr + ch*sp*sr
H[:, 0, 2] = sh*sr + ch*sp*cr
H[:, 1, 0] = sh*cp
H[:, 1, 1] = ch*cr + sh*sp*sr
H[:, 1, 2] = -ch*sr + sh*sp*cr
H[:, 2, 0] = -sp
H[:, 2, 1] = cp*sr
H[:, 2, 2] = cp*cr
H[:, 0, 3] = ssc[:, 0]
H[:, 1, 3] = ssc[:, 1]
H[:, 2, 3] = ssc[:, 2]
H[:, 3, 3] = 1
if reduce:
H = np.squeeze(H)
return H
def verify_magic(s):
magic = 44444
m = struct.unpack('<HHHH', s)
return len(m)>=4 and m[0] == magic and m[1] == magic and m[2] == magic and m[3] == magic
def test_read_hits():
data_path = '../../Data/NCLT'
velo_folder = 'velodyne_data'
day = '2012-01-08'
hits_path = join(data_path, velo_folder, day, 'velodyne_hits.bin')
all_utimes = []
all_hits = []
all_ints = []
num_bytes = getsize(hits_path)
current_bytes = 0
with open(hits_path, 'rb') as f_bin:
total_hits = 0
first_utime = -1
last_utime = -1
while True:
magic = f_bin.read(8)
if magic == b'':
break
if not verify_magic(magic):
print('Could not verify magic')
num_hits = struct.unpack('<I', f_bin.read(4))[0]
utime = struct.unpack('<Q', f_bin.read(8))[0]
# Do not convert padding (it is an int always equal to zero)
padding = f_bin.read(4)
total_hits += num_hits
if first_utime == -1:
first_utime = utime
last_utime = utime
hits = []
ints = []
for i in range(num_hits):
x = struct.unpack('<H', f_bin.read(2))[0]
y = struct.unpack('<H', f_bin.read(2))[0]
z = struct.unpack('<H', f_bin.read(2))[0]
i = struct.unpack('B', f_bin.read(1))[0]
l = struct.unpack('B', f_bin.read(1))[0]
hits += [[x, y, z]]
ints += [i]
utimes = np.full((num_hits,), utime - first_utime, dtype=np.int32)
ints = np.array(ints, dtype=np.uint8)
hits = np.array(hits, dtype=np.float32)
hits *= 0.005
hits += -100.0
all_utimes.append(utimes)
all_hits.append(hits)
all_ints.append(ints)
if 100 * current_bytes / num_bytes > 0.1:
break
current_bytes += 24 + 8 * num_hits
print('{:d}/{:d} => {:.1f}%'.format(current_bytes, num_bytes, 100 * current_bytes / num_bytes))
all_utimes = np.hstack(all_utimes)
all_hits = np.vstack(all_hits)
all_ints = np.hstack(all_ints)
write_ply('test_hits',
[all_hits, all_ints, all_utimes],
['x', 'y', 'z', 'intensity', 'utime'])
print("Read %d total hits from %ld to %ld" % (total_hits, first_utime, last_utime))
return 0
def frames_to_ply(show_frames=False):
# In files
data_path = '../../Data/NCLT'
velo_folder = 'velodyne_data'
days = np.sort([d for d in listdir(join(data_path, velo_folder))])
for day in days:
# Out files
ply_folder = join(data_path, 'frames_ply', day)
if not exists(ply_folder):
makedirs(ply_folder)
day_path = join(data_path, velo_folder, day, 'velodyne_sync')
f_names = np.sort([f for f in listdir(day_path) if f[-4:] == '.bin'])
N = len(f_names)
print('Reading', N, 'files')
for f_i, f_name in enumerate(f_names):
ply_name = join(ply_folder, f_name[:-4] + '.ply')
if exists(ply_name):
continue
t1 = time.time()
hits = []
ints = []
with open(join(day_path, f_name), 'rb') as f_bin:
while True:
x_str = f_bin.read(2)
# End of file
if x_str == b'':
break
x = struct.unpack('<H', x_str)[0]
y = struct.unpack('<H', f_bin.read(2))[0]
z = struct.unpack('<H', f_bin.read(2))[0]
intensity = struct.unpack('B', f_bin.read(1))[0]
l = struct.unpack('B', f_bin.read(1))[0]
hits += [[x, y, z]]
ints += [intensity]
ints = np.array(ints, dtype=np.uint8)
hits = np.array(hits, dtype=np.float32)
hits *= 0.005
hits += -100.0
write_ply(ply_name,
[hits, ints],
['x', 'y', 'z', 'intensity'])
t2 = time.time()
print('File {:s} {:d}/{:d} Done in {:.1f}s'.format(f_name, f_i, N, t2 - t1))
if show_frames:
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(hits[:, 0], hits[:, 1], -hits[:, 2], c=-hits[:, 2], s=5, linewidths=0)
plt.show()
return 0
def merge_day_pointclouds(show_day_trajectory=False, only_SLAM_nodes=False):
"""
Recreate the whole day point cloud thks to gt pose
Generate gt_annotation of mobile objects
"""
# In files
data_path = '../../Data/NCLT'
gt_folder = 'ground_truth'
cov_folder = 'ground_truth_cov'
# Transformation from body to velodyne frame (from NCLT paper)
x_body_velo = np.array([0.002, -0.004, -0.957, 0.807, 0.166, -90.703])
H_body_velo = ssc_to_homo(x_body_velo, ssc_in_radians=False)
H_velo_body = np.linalg.inv(H_body_velo)
x_body_lb3 = np.array([0.035, 0.002, -1.23, -179.93, -0.23, 0.50])
H_body_lb3 = ssc_to_homo(x_body_lb3, ssc_in_radians=False)
H_lb3_body = np.linalg.inv(H_body_lb3)
# Get gt files and days
gt_files = np.sort([gt_f for gt_f in listdir(join(data_path, gt_folder)) if gt_f[-4:] == '.csv'])
cov_files = np.sort([cov_f for cov_f in listdir(join(data_path, cov_folder)) if cov_f[-4:] == '.csv'])
days = [d[:-4].split('_')[1] for d in gt_files]
# Load all gt poses
print('\nLoading days groundtruth poses...')
t0 = time.time()
gt_H = []
gt_t = []
for d, gt_f in enumerate(gt_files):
t1 = time.time()
gt_pkl_file = join(data_path, gt_folder, gt_f[:-4] + '.pkl')
if exists(gt_pkl_file):
# Read pkl
with open(gt_pkl_file, 'rb') as f:
day_gt_t, day_gt_H = pickle.load(f)
else:
# File paths
gt_csv = join(data_path, gt_folder, gt_f)
# Load gt
gt = np.loadtxt(gt_csv, delimiter=',')
# Convert gt to homogenous rotation/translation matrix
day_gt_t = gt[:, 0]
day_gt_H = ssc_to_homo(gt[:, 1:])
# Save pickle
with open(gt_pkl_file, 'wb') as f:
pickle.dump([day_gt_t, day_gt_H], f)
t2 = time.time()
print('{:s} {:d}/{:d} Done in {:.1f}s'.format(gt_f, d, gt_files.shape[0], t2 - t1))
gt_t += [day_gt_t]
gt_H += [day_gt_H]
if show_day_trajectory:
cov_csv = join(data_path, cov_folder, cov_files[d])
cov = np.loadtxt(cov_csv, delimiter=',')
t_cov = cov[:, 0]
t_cov_bool = np.logical_and(t_cov > np.min(day_gt_t), t_cov < np.max(day_gt_t))
t_cov = t_cov[t_cov_bool]
# Note: Interpolation is not needed, this is done as a convinience
interp = scipy.interpolate.interp1d(day_gt_t, day_gt_H[:, :3, 3], kind='nearest', axis=0)
node_poses = interp(t_cov)
plt.figure()
plt.scatter(day_gt_H[:, 1, 3], day_gt_H[:, 0, 3], 1, c=-day_gt_H[:, 2, 3], linewidth=0)
plt.scatter(node_poses[:, 1], node_poses[:, 0], 1, c=-node_poses[:, 2], linewidth=5)
plt.axis('equal')
plt.title('Ground Truth Position of Nodes in SLAM Graph')
plt.xlabel('East (m)')
plt.ylabel('North (m)')
plt.colorbar()
plt.show()
t2 = time.time()
print('Done in {:.1f}s\n'.format(t2 - t0))
# Out files
out_folder = join(data_path, 'day_ply')
if not exists(out_folder):
makedirs(out_folder)
# Focus on a particular point
p0 = np.array([-220, -527, 12])
center_radius = 10.0
point_radius = 50.0
# Loop on days
for d, day in enumerate(days):
#if day != '2012-02-05':
# continue
day_min_t = gt_t[d][0]
day_max_t = gt_t[d][-1]
frames_folder = join(data_path, 'frames_ply', day)
f_times = np.sort([float(f[:-4]) for f in listdir(frames_folder) if f[-4:] == '.ply'])
# If we want, load only SLAM nodes
if only_SLAM_nodes:
# Load node timestamps
cov_csv = join(data_path, cov_folder, cov_files[d])
cov = np.loadtxt(cov_csv, delimiter=',')
t_cov = cov[:, 0]
t_cov_bool = np.logical_and(t_cov > day_min_t, t_cov < day_max_t)
t_cov = t_cov[t_cov_bool]
# Find closest lidar frames
t_cov = np.expand_dims(t_cov, 1)
diffs = np.abs(t_cov - f_times)
inds = np.argmin(diffs, axis=1)
f_times = f_times[inds]
# Is this frame in gt
f_t_bool = np.logical_and(f_times > day_min_t, f_times < day_max_t)
f_times = f_times[f_t_bool]
# Interpolation gt poses to frame timestamps
interp = scipy.interpolate.interp1d(gt_t[d], gt_H[d], kind='nearest', axis=0)
frame_poses = interp(f_times)
N = len(f_times)
world_points = []
world_frames = []
world_frames_c = []
print('Reading', day, ' => ', N, 'files')
for f_i, f_t in enumerate(f_times):
t1 = time.time()
#########
# GT pose
#########
H = frame_poses[f_i].astype(np.float32)
# s = '\n'
# for cc in H:
# for c in cc:
# s += '{:5.2f} '.format(c)
# s += '\n'
# print(s)
#############
# Focus check
#############
if np.linalg.norm(H[:3, 3] - p0) > center_radius:
continue
###################################
# Local frame coordinates for debug
###################################
# Create artificial frames
x = np.linspace(0, 1, 50, dtype=np.float32)
points = np.hstack((np.vstack((x, x*0, x*0)), np.vstack((x*0, x, x*0)), np.vstack((x*0, x*0, x)))).T
colors = ((points > 0.1).astype(np.float32) * 255).astype(np.uint8)
hpoints = np.hstack((points, np.ones_like(points[:, :1])))
hpoints = np.matmul(hpoints, H.T)
hpoints[:, 3] *= 0
world_frames += [hpoints[:, :3]]
world_frames_c += [colors]
#######################
# Load velo point cloud
#######################
# Load frame ply file
f_name = '{:.0f}.ply'.format(f_t)
data = read_ply(join(frames_folder, f_name))
points = np.vstack((data['x'], data['y'], data['z'])).T
#intensity = data['intensity']
hpoints = np.hstack((points, np.ones_like(points[:, :1])))
hpoints = np.matmul(hpoints, H.T)
hpoints[:, 3] *= 0
hpoints[:, 3] += np.sqrt(f_t - f_times[0])
# focus check
focus_bool = np.linalg.norm(hpoints[:, :3] - p0, axis=1) < point_radius
hpoints = hpoints[focus_bool, :]
world_points += [hpoints]
t2 = time.time()
print('File {:s} {:d}/{:d} Done in {:.1f}s'.format(f_name, f_i, N, t2 - t1))
if len(world_points) < 2:
continue
world_points = np.vstack(world_points)
###### DEBUG
world_frames = np.vstack(world_frames)
world_frames_c = np.vstack(world_frames_c)
write_ply('testf.ply',
[world_frames, world_frames_c],
['x', 'y', 'z', 'red', 'green', 'blue'])
###### DEBUG
print(world_points.shape, world_points.dtype)
# Subsample merged frames
# world_points, features = grid_subsampling(world_points[:, :3],
# features=world_points[:, 3:],
# sampleDl=0.1)
features = world_points[:, 3:]
world_points = world_points[:, :3]
print(world_points.shape, world_points.dtype)
write_ply('test' + day + '.ply',
[world_points, features],
['x', 'y', 'z', 't'])
# Generate gt annotations
# Subsample day ply (for visualization)
# Save day ply
# a = 1/0

View file

@ -1,344 +0,0 @@
#
#
# 0=================================0
# | Kernel Point Convolutions |
# 0=================================0
#
#
# ----------------------------------------------------------------------------------------------------------------------
#
# Callable script to start a training on NCLT dataset
#
# ----------------------------------------------------------------------------------------------------------------------
#
# Hugues THOMAS - 06/03/2020
#
# ----------------------------------------------------------------------------------------------------------------------
#
# Imports and global variables
# \**********************************/
#
# Common libs
import signal
import os
import numpy as np
import sys
import torch
# Dataset
from datasets.NCLT import *
from torch.utils.data import DataLoader
from utils.config import Config
from utils.trainer import ModelTrainer
from models.architectures import KPFCNN
# ----------------------------------------------------------------------------------------------------------------------
#
# Config Class
# \******************/
#
class NCLTConfig(Config):
"""
Override the parameters you want to modify for this dataset
"""
####################
# Dataset parameters
####################
# Dataset name
dataset = 'NCLT'
# Number of classes in the dataset (This value is overwritten by dataset class when Initializating dataset).
num_classes = None
# Type of task performed on this dataset (also overwritten)
dataset_task = ''
# Number of CPU threads for the input pipeline
input_threads = 10
#########################
# Architecture definition
#########################
# Define layers
architecture = ['simple',
'resnetb',
'resnetb_strided',
'resnetb',
'resnetb',
'resnetb_strided',
'resnetb',
'resnetb',
'resnetb',
'resnetb',
'resnetb_strided',
'resnetb',
'resnetb',
'resnetb',
'resnetb_strided',
'resnetb',
'resnetb',
'nearest_upsample',
'unary',
'nearest_upsample',
'unary',
'nearest_upsample',
'unary',
'nearest_upsample',
'unary']
###################
# KPConv parameters
###################
# Radius of the input sphere
in_radius = 6.0
val_radius = 51.0
n_frames = 1
max_in_points = 100000
max_val_points = 200000
# Number of batch
batch_num = 8
val_batch_num = 1
# Number of kernel points
num_kernel_points = 15
# Size of the first subsampling grid in meter
first_subsampling_dl = 0.06
# Radius of convolution in "number grid cell". (2.5 is the standard value)
conv_radius = 2.5
# Radius of deformable convolution in "number grid cell". Larger so that deformed kernel can spread out
deform_radius = 6.0
# Radius of the area of influence of each kernel point in "number grid cell". (1.0 is the standard value)
KP_extent = 1.5
# Behavior of convolutions in ('constant', 'linear', 'gaussian')
KP_influence = 'linear'
# Aggregation function of KPConv in ('closest', 'sum')
aggregation_mode = 'sum'
# Choice of input features
first_features_dim = 128
in_features_dim = 2
# Can the network learn modulations
modulated = False
# Batch normalization parameters
use_batch_norm = True
batch_norm_momentum = 0.02
# Offset loss
# 'permissive' only constrains offsets inside the deform radius (NOT implemented yet)
# 'fitting' helps deformed kernels to adapt to the geometry by penalizing distance to input points
offsets_loss = 'fitting'
offsets_decay = 0.01
#####################
# Training parameters
#####################
# Maximal number of epochs
max_epoch = 800
# Learning rate management
learning_rate = 1e-2
momentum = 0.98
lr_decays = {i: 0.1 ** (1 / 150) for i in range(1, max_epoch)}
grad_clip_norm = 100.0
# Number of steps per epochs
epoch_steps = 500
# Number of validation examples per epoch
validation_size = 200
# Number of epoch between each checkpoint
checkpoint_gap = 50
# Augmentations
augment_scale_anisotropic = True
augment_symmetries = [True, False, False]
augment_rotation = 'vertical'
augment_scale_min = 0.8
augment_scale_max = 1.2
augment_noise = 0.001
augment_color = 0.8
# Choose weights for class (used in segmentation loss). Empty list for no weights
# class proportion for R=10.0 and dl=0.08 (first is unlabeled)
# 19.1 48.9 0.5 1.1 5.6 3.6 0.7 0.6 0.9 193.2 17.7 127.4 6.7 132.3 68.4 283.8 7.0 78.5 3.3 0.8
#
#
# sqrt(Inverse of proportion * 100)
# class_w = [1.430, 14.142, 9.535, 4.226, 5.270, 11.952, 12.910, 10.541, 0.719,
# 2.377, 0.886, 3.863, 0.869, 1.209, 0.594, 3.780, 1.129, 5.505, 11.180]
# sqrt(Inverse of proportion * 100) capped (0.5 < X < 5)
# class_w = [1.430, 5.000, 5.000, 4.226, 5.000, 5.000, 5.000, 5.000, 0.719, 2.377,
# 0.886, 3.863, 0.869, 1.209, 0.594, 3.780, 1.129, 5.000, 5.000]
# Do we nee to save convergence
saving = True
saving_path = None
# ----------------------------------------------------------------------------------------------------------------------
#
# Main Call
# \***************/
#
if __name__ == '__main__':
#test_read_hits()
#frames_to_ply()
merge_day_pointclouds()
a = 1/0
############################
# Initialize the environment
############################
# Set which gpu is going to be used
GPU_ID = '2'
# Set GPU visible device
os.environ['CUDA_VISIBLE_DEVICES'] = GPU_ID
###############
# Previous chkp
###############
# Choose here if you want to start training from a previous snapshot (None for new training)
# previous_training_path = 'Log_2020-03-19_19-53-27'
previous_training_path = ''
# Choose index of checkpoint to start from. If None, uses the latest chkp
chkp_idx = None
if previous_training_path:
# Find all snapshot in the chosen training folder
chkp_path = os.path.join('results', previous_training_path, 'checkpoints')
chkps = [f for f in os.listdir(chkp_path) if f[:4] == 'chkp']
# Find which snapshot to restore
if chkp_idx is None:
chosen_chkp = 'current_chkp.tar'
else:
chosen_chkp = np.sort(chkps)[chkp_idx]
chosen_chkp = os.path.join('results', previous_training_path, 'checkpoints', chosen_chkp)
else:
chosen_chkp = None
##############
# Prepare Data
##############
print()
print('Data Preparation')
print('****************')
# Initialize configuration class
config = NCLTConfig()
if previous_training_path:
config.load(os.path.join('results', previous_training_path))
config.saving_path = None
# Get path from argument if given
if len(sys.argv) > 1:
config.saving_path = sys.argv[1]
# Initialize datasets
training_dataset = NCLTDataset(config, set='training',
balance_classes=True)
test_dataset = NCLTDataset(config, set='validation',
balance_classes=False)
# Initialize samplers
training_sampler = NCLTSampler(training_dataset)
test_sampler = NCLTSampler(test_dataset)
# Initialize the dataloader
training_loader = DataLoader(training_dataset,
batch_size=1,
sampler=training_sampler,
collate_fn=NCLTCollate,
num_workers=config.input_threads,
pin_memory=True)
test_loader = DataLoader(test_dataset,
batch_size=1,
sampler=test_sampler,
collate_fn=NCLTCollate,
num_workers=config.input_threads,
pin_memory=True)
# Calibrate max_in_point value
training_sampler.calib_max_in(config, training_loader, verbose=True)
test_sampler.calib_max_in(config, test_loader, verbose=True)
# Calibrate samplers
training_sampler.calibration(training_loader, verbose=True)
test_sampler.calibration(test_loader, verbose=True)
# debug_timing(training_dataset, training_loader)
# debug_timing(test_dataset, test_loader)
# debug_class_w(training_dataset, training_loader)
print('\nModel Preparation')
print('*****************')
# Define network model
t1 = time.time()
net = KPFCNN(config, training_dataset.label_values, training_dataset.ignored_labels)
debug = False
if debug:
print('\n*************************************\n')
print(net)
print('\n*************************************\n')
for param in net.parameters():
if param.requires_grad:
print(param.shape)
print('\n*************************************\n')
print("Model size %i" % sum(param.numel() for param in net.parameters() if param.requires_grad))
print('\n*************************************\n')
# Define a trainer class
trainer = ModelTrainer(net, config, chkp_path=chosen_chkp)
print('Done in {:.1f}s\n'.format(time.time() - t1))
print('\nStart training')
print('**************')
# Training
trainer.train(net, training_loader, test_loader, config)
print('Forcing exit now')
os.kill(os.getpid(), signal.SIGINT)

View file

@ -24,9 +24,6 @@
# Common libs # Common libs
import signal import signal
import os import os
import numpy as np
import sys
import torch
# Dataset # Dataset
from datasets.S3DIS import * from datasets.S3DIS import *
@ -271,6 +268,7 @@ if __name__ == '__main__':
training_sampler.calibration(training_loader, verbose=True) training_sampler.calibration(training_loader, verbose=True)
test_sampler.calibration(test_loader, verbose=True) test_sampler.calibration(test_loader, verbose=True)
# Optional debug functions
# debug_timing(training_dataset, training_loader) # debug_timing(training_dataset, training_loader)
# debug_timing(test_dataset, test_loader) # debug_timing(test_dataset, test_loader)
# debug_upsampling(training_dataset, training_loader) # debug_upsampling(training_dataset, training_loader)

View file

@ -1,205 +0,0 @@
#
#
# 0=================================0
# | Kernel Point Convolutions |
# 0=================================0
#
#
# ----------------------------------------------------------------------------------------------------------------------
#
# Callable script to start a training on ModelNet40 dataset
#
# ----------------------------------------------------------------------------------------------------------------------
#
# Hugues THOMAS - 06/03/2020
#
# ----------------------------------------------------------------------------------------------------------------------
#
# Imports and global variables
# \**********************************/
#
# Common libs
import signal
import os
import numpy as np
import sys
import torch
# Dataset
from datasets.ModelNet40 import *
from datasets.S3DIS import *
from torch.utils.data import DataLoader
from utils.config import Config
from utils.visualizer import ModelVisualizer
from models.architectures import KPCNN, KPFCNN
# ----------------------------------------------------------------------------------------------------------------------
#
# Main Call
# \***************/
#
def model_choice(chosen_log):
###########################
# Call the test initializer
###########################
# Automatically retrieve the last trained model
if chosen_log in ['last_ModelNet40', 'last_ShapeNetPart', 'last_S3DIS']:
# Dataset name
test_dataset = '_'.join(chosen_log.split('_')[1:])
# List all training logs
logs = np.sort([os.path.join('results', f) for f in os.listdir('results') if f.startswith('Log')])
# Find the last log of asked dataset
for log in logs[::-1]:
log_config = Config()
log_config.load(log)
if log_config.dataset.startswith(test_dataset):
chosen_log = log
break
if chosen_log in ['last_ModelNet40', 'last_ShapeNetPart', 'last_S3DIS']:
raise ValueError('No log of the dataset "' + test_dataset + '" found')
# Check if log exists
if not os.path.exists(chosen_log):
raise ValueError('The given log does not exists: ' + chosen_log)
return chosen_log
# ----------------------------------------------------------------------------------------------------------------------
#
# Main Call
# \***************/
#
if __name__ == '__main__':
###############################
# Choose the model to visualize
###############################
# Here you can choose which model you want to test with the variable test_model. Here are the possible values :
#
# > 'last_XXX': Automatically retrieve the last trained model on dataset XXX
# > '(old_)results/Log_YYYY-MM-DD_HH-MM-SS': Directly provide the path of a trained model
# chosen_log = 'results/Log_2020-04-04_10-04-42' # => ModelNet40
# chosen_log = 'results/Log_2020-04-04_10-04-42' # => S3DIS
chosen_log = 'results/Log_2020-04-22_12-28-37' # => S3DIS corrected
# You can also choose the index of the snapshot to load (last by default)
chkp_idx = -1
# Eventually you can choose which feature is visualized (index of the deform operation in the network)
f_idx = -1
# Deal with 'last_XXX' choices
chosen_log = model_choice(chosen_log)
############################
# Initialize the environment
############################
# Set which gpu is going to be used
GPU_ID = '0'
# Set GPU visible device
os.environ['CUDA_VISIBLE_DEVICES'] = GPU_ID
###############
# Previous chkp
###############
# Find all checkpoints in the chosen training folder
chkp_path = os.path.join(chosen_log, 'checkpoints')
chkps = [f for f in os.listdir(chkp_path) if f[:4] == 'chkp']
# Find which snapshot to restore
if chkp_idx is None:
chosen_chkp = 'current_chkp.tar'
else:
chosen_chkp = np.sort(chkps)[chkp_idx]
chosen_chkp = os.path.join(chosen_log, 'checkpoints', chosen_chkp)
# Initialize configuration class
config = Config()
config.load(chosen_log)
##################################
# Change model parameters for test
##################################
# Change parameters for the test here. For example, you can stop augmenting the input data.
config.augment_noise = 0.0001
#config.augment_symmetries = False
config.batch_num = 1
config.in_radius = 2.0
config.input_threads = 0
##############
# Prepare Data
##############
print()
print('Data Preparation')
print('****************')
# Initiate dataset
if config.dataset.startswith('ModelNet40'):
test_dataset = ModelNet40Dataset(config, train=False)
test_sampler = ModelNet40Sampler(test_dataset)
collate_fn = ModelNet40Collate
elif config.dataset == 'S3DIS':
test_dataset = S3DISDataset(config, set='validation', use_potentials=True)
test_sampler = S3DISSampler(test_dataset)
collate_fn = S3DISCollate
else:
raise ValueError('Unsupported dataset : ' + config.dataset)
# Data loader
test_loader = DataLoader(test_dataset,
batch_size=1,
sampler=test_sampler,
collate_fn=collate_fn,
num_workers=config.input_threads,
pin_memory=True)
# Calibrate samplers
test_sampler.calibration(test_loader, verbose=True)
print('\nModel Preparation')
print('*****************')
# Define network model
t1 = time.time()
if config.dataset_task == 'classification':
net = KPCNN(config)
elif config.dataset_task in ['cloud_segmentation', 'slam_segmentation']:
net = KPFCNN(config, test_dataset.label_values, test_dataset.ignored_labels)
else:
raise ValueError('Unsupported dataset_task for deformation visu: ' + config.dataset_task)
# Define a visualizer class
visualizer = ModelVisualizer(net, config, chkp_path=chosen_chkp, on_gpu=False)
print('Done in {:.1f}s\n'.format(time.time() - t1))
print('\nStart visualization')
print('*******************')
# Training
visualizer.show_effective_recep_field(net, test_loader, config, f_idx)