🎨 black + ruff

This commit is contained in:
Laurent FAINSIN 2023-05-15 17:18:10 +02:00
parent 9cc74574e4
commit d0cdb8e4ee
24 changed files with 3760 additions and 3099 deletions

View file

@ -7,22 +7,20 @@ import numpy.distutils.misc_util
# Adding sources of the project # Adding sources of the project
# ***************************** # *****************************
SOURCES = ["../cpp_utils/cloud/cloud.cpp", SOURCES = [
"neighbors/neighbors.cpp", "../cpp_utils/cloud/cloud.cpp",
"wrapper.cpp"] "neighbors/neighbors.cpp",
"wrapper.cpp",
module = Extension(name="radius_neighbors", ]
sources=SOURCES,
extra_compile_args=['-std=c++11',
'-D_GLIBCXX_USE_CXX11_ABI=0'])
setup(ext_modules=[module], include_dirs=numpy.distutils.misc_util.get_numpy_include_dirs())
module = Extension(
name="radius_neighbors",
sources=SOURCES,
extra_compile_args=["-std=c++11", "-D_GLIBCXX_USE_CXX11_ABI=0"],
)
setup(
ext_modules=[module],
include_dirs=numpy.distutils.misc_util.get_numpy_include_dirs(),
)

View file

@ -7,22 +7,20 @@ import numpy.distutils.misc_util
# Adding sources of the project # Adding sources of the project
# ***************************** # *****************************
SOURCES = ["../cpp_utils/cloud/cloud.cpp", SOURCES = [
"grid_subsampling/grid_subsampling.cpp", "../cpp_utils/cloud/cloud.cpp",
"wrapper.cpp"] "grid_subsampling/grid_subsampling.cpp",
"wrapper.cpp",
module = Extension(name="grid_subsampling", ]
sources=SOURCES,
extra_compile_args=['-std=c++11',
'-D_GLIBCXX_USE_CXX11_ABI=0'])
setup(ext_modules=[module], include_dirs=numpy.distutils.misc_util.get_numpy_include_dirs())
module = Extension(
name="grid_subsampling",
sources=SOURCES,
extra_compile_args=["-std=c++11", "-D_GLIBCXX_USE_CXX11_ABI=0"],
)
setup(
ext_modules=[module],
include_dirs=numpy.distutils.misc_util.get_numpy_include_dirs(),
)

View file

@ -27,11 +27,9 @@ import time
import numpy as np import numpy as np
import pickle import pickle
import torch import torch
import math
# OS functions # OS functions
from os import listdir
from os.path import exists, join from os.path import exists, join
# Dataset parent class # Dataset parent class
@ -55,53 +53,55 @@ class ModelNet40Dataset(PointCloudDataset):
""" """
This dataset is small enough to be stored in-memory, so load all point clouds here This dataset is small enough to be stored in-memory, so load all point clouds here
""" """
PointCloudDataset.__init__(self, 'ModelNet40') PointCloudDataset.__init__(self, "ModelNet40")
############ ############
# Parameters # Parameters
############ ############
# Dict from labels to names # Dict from labels to names
self.label_to_names = {0: 'airplane', self.label_to_names = {
1: 'bathtub', 0: "airplane",
2: 'bed', 1: "bathtub",
3: 'bench', 2: "bed",
4: 'bookshelf', 3: "bench",
5: 'bottle', 4: "bookshelf",
6: 'bowl', 5: "bottle",
7: 'car', 6: "bowl",
8: 'chair', 7: "car",
9: 'cone', 8: "chair",
10: 'cup', 9: "cone",
11: 'curtain', 10: "cup",
12: 'desk', 11: "curtain",
13: 'door', 12: "desk",
14: 'dresser', 13: "door",
15: 'flower_pot', 14: "dresser",
16: 'glass_box', 15: "flower_pot",
17: 'guitar', 16: "glass_box",
18: 'keyboard', 17: "guitar",
19: 'lamp', 18: "keyboard",
20: 'laptop', 19: "lamp",
21: 'mantel', 20: "laptop",
22: 'monitor', 21: "mantel",
23: 'night_stand', 22: "monitor",
24: 'person', 23: "night_stand",
25: 'piano', 24: "person",
26: 'plant', 25: "piano",
27: 'radio', 26: "plant",
28: 'range_hood', 27: "radio",
29: 'sink', 28: "range_hood",
30: 'sofa', 29: "sink",
31: 'stairs', 30: "sofa",
32: 'stool', 31: "stairs",
33: 'table', 32: "stool",
34: 'tent', 33: "table",
35: 'toilet', 34: "tent",
36: 'tv_stand', 35: "toilet",
37: 'vase', 36: "tv_stand",
38: 'wardrobe', 37: "vase",
39: 'xbox'} 38: "wardrobe",
39: "xbox",
}
# Initialize a bunch of variables concerning class labels # Initialize a bunch of variables concerning class labels
self.init_labels() self.init_labels()
@ -110,10 +110,10 @@ class ModelNet40Dataset(PointCloudDataset):
self.ignored_labels = np.array([]) self.ignored_labels = np.array([])
# Dataset folder # Dataset folder
self.path = './Data/ModelNet40' self.path = "./Data/ModelNet40"
# Type of task conducted on this dataset # Type of task conducted on this dataset
self.dataset_task = 'classification' self.dataset_task = "classification"
# Update number of class and data task in configuration # Update number of class and data task in configuration
config.num_classes = self.num_classes config.num_classes = self.num_classes
@ -128,22 +128,31 @@ class ModelNet40Dataset(PointCloudDataset):
# Number of models and models used per epoch # Number of models and models used per epoch
if self.train: if self.train:
self.num_models = 9843 self.num_models = 9843
if config.epoch_steps and config.epoch_steps * config.batch_num < self.num_models: if (
config.epoch_steps
and config.epoch_steps * config.batch_num < self.num_models
):
self.epoch_n = config.epoch_steps * config.batch_num self.epoch_n = config.epoch_steps * config.batch_num
else: else:
self.epoch_n = self.num_models self.epoch_n = self.num_models
else: else:
self.num_models = 2468 self.num_models = 2468
self.epoch_n = min(self.num_models, config.validation_size * config.batch_num) self.epoch_n = min(
self.num_models, config.validation_size * config.batch_num
)
############# #############
# Load models # Load models
############# #############
if 0 < self.config.first_subsampling_dl <= 0.01: if 0 < self.config.first_subsampling_dl <= 0.01:
raise ValueError('subsampling_parameter too low (should be over 1 cm') raise ValueError("subsampling_parameter too low (should be over 1 cm")
self.input_points, self.input_normals, self.input_labels = self.load_subsampled_clouds(orient_correction) (
self.input_points,
self.input_normals,
self.input_labels,
) = self.load_subsampled_clouds(orient_correction)
return return
@ -171,7 +180,6 @@ class ModelNet40Dataset(PointCloudDataset):
R_list = [] R_list = []
for p_i in idx_list: for p_i in idx_list:
# Get points and labels # Get points and labels
points = self.input_points[p_i].astype(np.float32) points = self.input_points[p_i].astype(np.float32)
normals = self.input_normals[p_i].astype(np.float32) normals = self.input_normals[p_i].astype(np.float32)
@ -192,7 +200,7 @@ class ModelNet40Dataset(PointCloudDataset):
# Concatenate batch # Concatenate batch
################### ###################
#show_ModelNet_examples(tp_list, cloud_normals=tn_list) # show_ModelNet_examples(tp_list, cloud_normals=tn_list)
stacked_points = np.concatenate(tp_list, axis=0) stacked_points = np.concatenate(tp_list, axis=0)
stacked_normals = np.concatenate(tn_list, axis=0) stacked_normals = np.concatenate(tn_list, axis=0)
@ -209,7 +217,9 @@ class ModelNet40Dataset(PointCloudDataset):
elif self.config.in_features_dim == 4: elif self.config.in_features_dim == 4:
stacked_features = np.hstack((stacked_features, stacked_normals)) stacked_features = np.hstack((stacked_features, stacked_normals))
else: else:
raise ValueError('Only accepted input dimensions are 1, 4 and 7 (without and with XYZ)') raise ValueError(
"Only accepted input dimensions are 1, 4 and 7 (without and with XYZ)"
)
####################### #######################
# Create network inputs # Create network inputs
@ -219,10 +229,9 @@ class ModelNet40Dataset(PointCloudDataset):
# #
# Get the whole input list # Get the whole input list
input_list = self.classification_inputs(stacked_points, input_list = self.classification_inputs(
stacked_features, stacked_points, stacked_features, labels, stack_lengths
labels, )
stack_lengths)
# Add scale and rotation for testing # Add scale and rotation for testing
input_list += [scales, rots, model_inds] input_list += [scales, rots, model_inds]
@ -230,31 +239,38 @@ class ModelNet40Dataset(PointCloudDataset):
return input_list return input_list
def load_subsampled_clouds(self, orient_correction): def load_subsampled_clouds(self, orient_correction):
# Restart timer # Restart timer
t0 = time.time() t0 = time.time()
# Load wanted points if possible # Load wanted points if possible
if self.train: if self.train:
split ='training' split = "training"
else: else:
split = 'test' split = "test"
print('\nLoading {:s} points subsampled at {:.3f}'.format(split, self.config.first_subsampling_dl)) print(
filename = join(self.path, '{:s}_{:.3f}_record.pkl'.format(split, self.config.first_subsampling_dl)) "\nLoading {:s} points subsampled at {:.3f}".format(
split, self.config.first_subsampling_dl
)
)
filename = join(
self.path,
"{:s}_{:.3f}_record.pkl".format(split, self.config.first_subsampling_dl),
)
if exists(filename): if exists(filename):
with open(filename, 'rb') as file: with open(filename, "rb") as file:
input_points, input_normals, input_labels = pickle.load(file) input_points, input_normals, input_labels = pickle.load(file)
# Else compute them from original points # Else compute them from original points
else: else:
# Collect training file names # Collect training file names
if self.train: if self.train:
names = np.loadtxt(join(self.path, 'modelnet40_train.txt'), dtype=np.str) names = np.loadtxt(
join(self.path, "modelnet40_train.txt"), dtype=np.str
)
else: else:
names = np.loadtxt(join(self.path, 'modelnet40_test.txt'), dtype=np.str) names = np.loadtxt(join(self.path, "modelnet40_test.txt"), dtype=np.str)
# Initialize containers # Initialize containers
input_points = [] input_points = []
@ -263,49 +279,54 @@ class ModelNet40Dataset(PointCloudDataset):
# Advanced display # Advanced display
N = len(names) N = len(names)
progress_n = 30 progress_n = 30
fmt_str = '[{:<' + str(progress_n) + '}] {:5.1f}%' fmt_str = "[{:<" + str(progress_n) + "}] {:5.1f}%"
# Collect point clouds # Collect point clouds
for i, cloud_name in enumerate(names): for i, cloud_name in enumerate(names):
# Read points # Read points
class_folder = '_'.join(cloud_name.split('_')[:-1]) class_folder = "_".join(cloud_name.split("_")[:-1])
txt_file = join(self.path, class_folder, cloud_name) + '.txt' txt_file = join(self.path, class_folder, cloud_name) + ".txt"
data = np.loadtxt(txt_file, delimiter=',', dtype=np.float32) data = np.loadtxt(txt_file, delimiter=",", dtype=np.float32)
# Subsample them # Subsample them
if self.config.first_subsampling_dl > 0: if self.config.first_subsampling_dl > 0:
points, normals = grid_subsampling(data[:, :3], points, normals = grid_subsampling(
features=data[:, 3:], data[:, :3],
sampleDl=self.config.first_subsampling_dl) features=data[:, 3:],
sampleDl=self.config.first_subsampling_dl,
)
else: else:
points = data[:, :3] points = data[:, :3]
normals = data[:, 3:] normals = data[:, 3:]
print('', end='\r') print("", end="\r")
print(fmt_str.format('#' * ((i * progress_n) // N), 100 * i / N), end='', flush=True) print(
fmt_str.format("#" * ((i * progress_n) // N), 100 * i / N),
end="",
flush=True,
)
# Add to list # Add to list
input_points += [points] input_points += [points]
input_normals += [normals] input_normals += [normals]
print('', end='\r') print("", end="\r")
print(fmt_str.format('#' * progress_n, 100), end='', flush=True) print(fmt_str.format("#" * progress_n, 100), end="", flush=True)
print() print()
# Get labels # Get labels
label_names = ['_'.join(name.split('_')[:-1]) for name in names] label_names = ["_".join(name.split("_")[:-1]) for name in names]
input_labels = np.array([self.name_to_label[name] for name in label_names]) input_labels = np.array([self.name_to_label[name] for name in label_names])
# Save for later use # Save for later use
with open(filename, 'wb') as file: with open(filename, "wb") as file:
pickle.dump((input_points, pickle.dump((input_points, input_normals, input_labels), file)
input_normals,
input_labels), file)
lengths = [p.shape[0] for p in input_points] lengths = [p.shape[0] for p in input_points]
sizes = [l * 4 * 6 for l in lengths] sizes = [l * 4 * 6 for l in lengths]
print('{:.1f} MB loaded in {:.1f}s'.format(np.sum(sizes) * 1e-6, time.time() - t0)) print(
"{:.1f} MB loaded in {:.1f}s".format(np.sum(sizes) * 1e-6, time.time() - t0)
)
if orient_correction: if orient_correction:
input_points = [pp[:, [0, 2, 1]] for pp in input_points] input_points = [pp[:, [0, 2, 1]] for pp in input_points]
@ -313,6 +334,7 @@ class ModelNet40Dataset(PointCloudDataset):
return input_points, input_normals, input_labels return input_points, input_normals, input_labels
# ---------------------------------------------------------------------------------------------------------------------- # ----------------------------------------------------------------------------------------------------------------------
# #
# Utility classes definition # Utility classes definition
@ -322,7 +344,9 @@ class ModelNet40Dataset(PointCloudDataset):
class ModelNet40Sampler(Sampler): class ModelNet40Sampler(Sampler):
"""Sampler for ModelNet40""" """Sampler for ModelNet40"""
def __init__(self, dataset: ModelNet40Dataset, use_potential=True, balance_labels=False): def __init__(
self, dataset: ModelNet40Dataset, use_potential=True, balance_labels=False
):
Sampler.__init__(self, dataset) Sampler.__init__(self, dataset)
# Does the sampler use potential for regular sampling # Does the sampler use potential for regular sampling
@ -356,18 +380,18 @@ class ModelNet40Sampler(Sampler):
if self.use_potential: if self.use_potential:
if self.balance_labels: if self.balance_labels:
gen_indices = [] gen_indices = []
pick_n = self.dataset.epoch_n // self.dataset.num_classes + 1 pick_n = self.dataset.epoch_n // self.dataset.num_classes + 1
for i, l in enumerate(self.dataset.label_values): for i, l in enumerate(self.dataset.label_values):
# Get the potentials of the objects of this class # Get the potentials of the objects of this class
label_inds = np.where(np.equal(self.dataset.input_labels, l))[0] label_inds = np.where(np.equal(self.dataset.input_labels, l))[0]
class_potentials = self.potentials[label_inds] class_potentials = self.potentials[label_inds]
# Get the indices to generate thanks to potentials # Get the indices to generate thanks to potentials
if pick_n < class_potentials.shape[0]: if pick_n < class_potentials.shape[0]:
pick_indices = np.argpartition(class_potentials, pick_n)[:pick_n] pick_indices = np.argpartition(class_potentials, pick_n)[
:pick_n
]
else: else:
pick_indices = np.random.permutation(class_potentials.shape[0]) pick_indices = np.random.permutation(class_potentials.shape[0])
class_indices = label_inds[pick_indices] class_indices = label_inds[pick_indices]
@ -377,17 +401,20 @@ class ModelNet40Sampler(Sampler):
gen_indices = np.random.permutation(np.hstack(gen_indices)) gen_indices = np.random.permutation(np.hstack(gen_indices))
else: else:
# Get indices with the minimum potential # Get indices with the minimum potential
if self.dataset.epoch_n < self.potentials.shape[0]: if self.dataset.epoch_n < self.potentials.shape[0]:
gen_indices = np.argpartition(self.potentials, self.dataset.epoch_n)[:self.dataset.epoch_n] gen_indices = np.argpartition(
self.potentials, self.dataset.epoch_n
)[: self.dataset.epoch_n]
else: else:
gen_indices = np.random.permutation(self.potentials.shape[0]) gen_indices = np.random.permutation(self.potentials.shape[0])
gen_indices = np.random.permutation(gen_indices) gen_indices = np.random.permutation(gen_indices)
# Update potentials (Change the order for the next epoch) # Update potentials (Change the order for the next epoch)
self.potentials[gen_indices] = np.ceil(self.potentials[gen_indices]) self.potentials[gen_indices] = np.ceil(self.potentials[gen_indices])
self.potentials[gen_indices] += np.random.rand(gen_indices.shape[0]) * 0.1 + 0.1 self.potentials[gen_indices] += (
np.random.rand(gen_indices.shape[0]) * 0.1 + 0.1
)
else: else:
if self.balance_labels: if self.balance_labels:
@ -399,7 +426,9 @@ class ModelNet40Sampler(Sampler):
gen_indices += [rand_inds] gen_indices += [rand_inds]
gen_indices = np.random.permutation(np.hstack(gen_indices)) gen_indices = np.random.permutation(np.hstack(gen_indices))
else: else:
gen_indices = np.random.permutation(self.dataset.num_models)[:self.dataset.epoch_n] gen_indices = np.random.permutation(self.dataset.num_models)[
: self.dataset.epoch_n
]
################ ################
# Generator loop # Generator loop
@ -411,7 +440,6 @@ class ModelNet40Sampler(Sampler):
# Generator loop # Generator loop
for p_i in gen_indices: for p_i in gen_indices:
# Size of picked cloud # Size of picked cloud
n = self.dataset.input_points[p_i].shape[0] n = self.dataset.input_points[p_i].shape[0]
@ -450,7 +478,7 @@ class ModelNet40Sampler(Sampler):
# Previously saved calibration # Previously saved calibration
############################## ##############################
print('\nStarting Calibration (use verbose=True for more details)') print("\nStarting Calibration (use verbose=True for more details)")
t0 = time.time() t0 = time.time()
redo = False redo = False
@ -459,39 +487,40 @@ class ModelNet40Sampler(Sampler):
# *********** # ***********
# Load batch_limit dictionary # Load batch_limit dictionary
batch_lim_file = join(self.dataset.path, 'batch_limits.pkl') batch_lim_file = join(self.dataset.path, "batch_limits.pkl")
if exists(batch_lim_file): if exists(batch_lim_file):
with open(batch_lim_file, 'rb') as file: with open(batch_lim_file, "rb") as file:
batch_lim_dict = pickle.load(file) batch_lim_dict = pickle.load(file)
else: else:
batch_lim_dict = {} batch_lim_dict = {}
# Check if the batch limit associated with current parameters exists # Check if the batch limit associated with current parameters exists
key = '{:.3f}_{:d}'.format(self.dataset.config.first_subsampling_dl, key = "{:.3f}_{:d}".format(
self.dataset.config.batch_num) self.dataset.config.first_subsampling_dl, self.dataset.config.batch_num
)
if key in batch_lim_dict: if key in batch_lim_dict:
self.batch_limit = batch_lim_dict[key] self.batch_limit = batch_lim_dict[key]
else: else:
redo = True redo = True
if verbose: if verbose:
print('\nPrevious calibration found:') print("\nPrevious calibration found:")
print('Check batch limit dictionary') print("Check batch limit dictionary")
if key in batch_lim_dict: if key in batch_lim_dict:
color = bcolors.OKGREEN color = bcolors.OKGREEN
v = str(int(batch_lim_dict[key])) v = str(int(batch_lim_dict[key]))
else: else:
color = bcolors.FAIL color = bcolors.FAIL
v = '?' v = "?"
print('{:}\"{:s}\": {:s}{:}'.format(color, key, v, bcolors.ENDC)) print('{:}"{:s}": {:s}{:}'.format(color, key, v, bcolors.ENDC))
# Neighbors limit # Neighbors limit
# *************** # ***************
# Load neighb_limits dictionary # Load neighb_limits dictionary
neighb_lim_file = join(self.dataset.path, 'neighbors_limits.pkl') neighb_lim_file = join(self.dataset.path, "neighbors_limits.pkl")
if exists(neighb_lim_file): if exists(neighb_lim_file):
with open(neighb_lim_file, 'rb') as file: with open(neighb_lim_file, "rb") as file:
neighb_lim_dict = pickle.load(file) neighb_lim_dict = pickle.load(file)
else: else:
neighb_lim_dict = {} neighb_lim_dict = {}
@ -499,14 +528,13 @@ class ModelNet40Sampler(Sampler):
# Check if the limit associated with current parameters exists (for each layer) # Check if the limit associated with current parameters exists (for each layer)
neighb_limits = [] neighb_limits = []
for layer_ind in range(self.dataset.config.num_layers): for layer_ind in range(self.dataset.config.num_layers):
dl = self.dataset.config.first_subsampling_dl * (2**layer_ind) dl = self.dataset.config.first_subsampling_dl * (2**layer_ind)
if self.dataset.config.deform_layers[layer_ind]: if self.dataset.config.deform_layers[layer_ind]:
r = dl * self.dataset.config.deform_radius r = dl * self.dataset.config.deform_radius
else: else:
r = dl * self.dataset.config.conv_radius r = dl * self.dataset.config.conv_radius
key = '{:.3f}_{:.3f}'.format(dl, r) key = "{:.3f}_{:.3f}".format(dl, r)
if key in neighb_lim_dict: if key in neighb_lim_dict:
neighb_limits += [neighb_lim_dict[key]] neighb_limits += [neighb_lim_dict[key]]
@ -516,34 +544,37 @@ class ModelNet40Sampler(Sampler):
redo = True redo = True
if verbose: if verbose:
print('Check neighbors limit dictionary') print("Check neighbors limit dictionary")
for layer_ind in range(self.dataset.config.num_layers): for layer_ind in range(self.dataset.config.num_layers):
dl = self.dataset.config.first_subsampling_dl * (2**layer_ind) dl = self.dataset.config.first_subsampling_dl * (2**layer_ind)
if self.dataset.config.deform_layers[layer_ind]: if self.dataset.config.deform_layers[layer_ind]:
r = dl * self.dataset.config.deform_radius r = dl * self.dataset.config.deform_radius
else: else:
r = dl * self.dataset.config.conv_radius r = dl * self.dataset.config.conv_radius
key = '{:.3f}_{:.3f}'.format(dl, r) key = "{:.3f}_{:.3f}".format(dl, r)
if key in neighb_lim_dict: if key in neighb_lim_dict:
color = bcolors.OKGREEN color = bcolors.OKGREEN
v = str(neighb_lim_dict[key]) v = str(neighb_lim_dict[key])
else: else:
color = bcolors.FAIL color = bcolors.FAIL
v = '?' v = "?"
print('{:}\"{:s}\": {:s}{:}'.format(color, key, v, bcolors.ENDC)) print('{:}"{:s}": {:s}{:}'.format(color, key, v, bcolors.ENDC))
if redo: if redo:
############################ ############################
# Neighbors calib parameters # Neighbors calib parameters
############################ ############################
# From config parameter, compute higher bound of neighbors number in a neighborhood # From config parameter, compute higher bound of neighbors number in a neighborhood
hist_n = int(np.ceil(4 / 3 * np.pi * (self.dataset.config.conv_radius + 1) ** 3)) hist_n = int(
np.ceil(4 / 3 * np.pi * (self.dataset.config.conv_radius + 1) ** 3)
)
# Histogram of neighborhood sizes # Histogram of neighborhood sizes
neighb_hists = np.zeros((self.dataset.config.num_layers, hist_n), dtype=np.int32) neighb_hists = np.zeros(
(self.dataset.config.num_layers, hist_n), dtype=np.int32
)
######################## ########################
# Batch calib parameters # Batch calib parameters
@ -573,9 +604,11 @@ class ModelNet40Sampler(Sampler):
for epoch in range(10): for epoch in range(10):
for batch_i, batch in enumerate(dataloader): for batch_i, batch in enumerate(dataloader):
# Update neighborhood histogram # Update neighborhood histogram
counts = [np.sum(neighb_mat.numpy() < neighb_mat.shape[0], axis=1) for neighb_mat in batch.neighbors] counts = [
np.sum(neighb_mat.numpy() < neighb_mat.shape[0], axis=1)
for neighb_mat in batch.neighbors
]
hists = [np.bincount(c, minlength=hist_n)[:hist_n] for c in counts] hists = [np.bincount(c, minlength=hist_n)[:hist_n] for c in counts]
neighb_hists += np.vstack(hists) neighb_hists += np.vstack(hists)
@ -612,69 +645,68 @@ class ModelNet40Sampler(Sampler):
# Console display (only one per second) # Console display (only one per second)
if verbose and (t - last_display) > 1.0: if verbose and (t - last_display) > 1.0:
last_display = t last_display = t
message = 'Step {:5d} estim_b ={:5.2f} batch_limit ={:7d}' message = "Step {:5d} estim_b ={:5.2f} batch_limit ={:7d}"
print(message.format(i, print(message.format(i, estim_b, int(self.batch_limit)))
estim_b,
int(self.batch_limit)))
if breaking: if breaking:
break break
# Use collected neighbor histogram to get neighbors limit # Use collected neighbor histogram to get neighbors limit
cumsum = np.cumsum(neighb_hists.T, axis=0) cumsum = np.cumsum(neighb_hists.T, axis=0)
percentiles = np.sum(cumsum < (untouched_ratio * cumsum[hist_n - 1, :]), axis=0) percentiles = np.sum(
cumsum < (untouched_ratio * cumsum[hist_n - 1, :]), axis=0
)
self.dataset.neighborhood_limits = percentiles self.dataset.neighborhood_limits = percentiles
if verbose: if verbose:
# Crop histogram # Crop histogram
while np.sum(neighb_hists[:, -1]) == 0: while np.sum(neighb_hists[:, -1]) == 0:
neighb_hists = neighb_hists[:, :-1] neighb_hists = neighb_hists[:, :-1]
hist_n = neighb_hists.shape[1] hist_n = neighb_hists.shape[1]
print('\n**************************************************\n') print("\n**************************************************\n")
line0 = 'neighbors_num ' line0 = "neighbors_num "
for layer in range(neighb_hists.shape[0]): for layer in range(neighb_hists.shape[0]):
line0 += '| layer {:2d} '.format(layer) line0 += "| layer {:2d} ".format(layer)
print(line0) print(line0)
for neighb_size in range(hist_n): for neighb_size in range(hist_n):
line0 = ' {:4d} '.format(neighb_size) line0 = " {:4d} ".format(neighb_size)
for layer in range(neighb_hists.shape[0]): for layer in range(neighb_hists.shape[0]):
if neighb_size > percentiles[layer]: if neighb_size > percentiles[layer]:
color = bcolors.FAIL color = bcolors.FAIL
else: else:
color = bcolors.OKGREEN color = bcolors.OKGREEN
line0 += '|{:}{:10d}{:} '.format(color, line0 += "|{:}{:10d}{:} ".format(
neighb_hists[layer, neighb_size], color, neighb_hists[layer, neighb_size], bcolors.ENDC
bcolors.ENDC) )
print(line0) print(line0)
print('\n**************************************************\n') print("\n**************************************************\n")
print('\nchosen neighbors limits: ', percentiles) print("\nchosen neighbors limits: ", percentiles)
print() print()
# Save batch_limit dictionary # Save batch_limit dictionary
key = '{:.3f}_{:d}'.format(self.dataset.config.first_subsampling_dl, key = "{:.3f}_{:d}".format(
self.dataset.config.batch_num) self.dataset.config.first_subsampling_dl, self.dataset.config.batch_num
)
batch_lim_dict[key] = self.batch_limit batch_lim_dict[key] = self.batch_limit
with open(batch_lim_file, 'wb') as file: with open(batch_lim_file, "wb") as file:
pickle.dump(batch_lim_dict, file) pickle.dump(batch_lim_dict, file)
# Save neighb_limit dictionary # Save neighb_limit dictionary
for layer_ind in range(self.dataset.config.num_layers): for layer_ind in range(self.dataset.config.num_layers):
dl = self.dataset.config.first_subsampling_dl * (2 ** layer_ind) dl = self.dataset.config.first_subsampling_dl * (2**layer_ind)
if self.dataset.config.deform_layers[layer_ind]: if self.dataset.config.deform_layers[layer_ind]:
r = dl * self.dataset.config.deform_radius r = dl * self.dataset.config.deform_radius
else: else:
r = dl * self.dataset.config.conv_radius r = dl * self.dataset.config.conv_radius
key = '{:.3f}_{:.3f}'.format(dl, r) key = "{:.3f}_{:.3f}".format(dl, r)
neighb_lim_dict[key] = self.dataset.neighborhood_limits[layer_ind] neighb_lim_dict[key] = self.dataset.neighborhood_limits[layer_ind]
with open(neighb_lim_file, 'wb') as file: with open(neighb_lim_file, "wb") as file:
pickle.dump(neighb_lim_dict, file) pickle.dump(neighb_lim_dict, file)
print("Calibration done in {:.1f}s\n".format(time.time() - t0))
print('Calibration done in {:.1f}s\n'.format(time.time() - t0))
return return
@ -682,7 +714,6 @@ class ModelNet40CustomBatch:
"""Custom batch definition with memory pinning for ModelNet40""" """Custom batch definition with memory pinning for ModelNet40"""
def __init__(self, input_list): def __init__(self, input_list):
# Get rid of batch dimension # Get rid of batch dimension
input_list = input_list[0] input_list = input_list[0]
@ -691,13 +722,21 @@ class ModelNet40CustomBatch:
# Extract input tensors from the list of numpy array # Extract input tensors from the list of numpy array
ind = 0 ind = 0
self.points = [torch.from_numpy(nparray) for nparray in input_list[ind:ind+L]] self.points = [
torch.from_numpy(nparray) for nparray in input_list[ind : ind + L]
]
ind += L ind += L
self.neighbors = [torch.from_numpy(nparray) for nparray in input_list[ind:ind+L]] self.neighbors = [
torch.from_numpy(nparray) for nparray in input_list[ind : ind + L]
]
ind += L ind += L
self.pools = [torch.from_numpy(nparray) for nparray in input_list[ind:ind+L]] self.pools = [
torch.from_numpy(nparray) for nparray in input_list[ind : ind + L]
]
ind += L ind += L
self.lengths = [torch.from_numpy(nparray) for nparray in input_list[ind:ind+L]] self.lengths = [
torch.from_numpy(nparray) for nparray in input_list[ind : ind + L]
]
ind += L ind += L
self.features = torch.from_numpy(input_list[ind]) self.features = torch.from_numpy(input_list[ind])
ind += 1 ind += 1
@ -729,7 +768,6 @@ class ModelNet40CustomBatch:
return self return self
def to(self, device): def to(self, device):
self.points = [in_tensor.to(device) for in_tensor in self.points] self.points = [in_tensor.to(device) for in_tensor in self.points]
self.neighbors = [in_tensor.to(device) for in_tensor in self.neighbors] self.neighbors = [in_tensor.to(device) for in_tensor in self.neighbors]
self.pools = [in_tensor.to(device) for in_tensor in self.pools] self.pools = [in_tensor.to(device) for in_tensor in self.pools]
@ -744,15 +782,15 @@ class ModelNet40CustomBatch:
def unstack_points(self, layer=None): def unstack_points(self, layer=None):
"""Unstack the points""" """Unstack the points"""
return self.unstack_elements('points', layer) return self.unstack_elements("points", layer)
def unstack_neighbors(self, layer=None): def unstack_neighbors(self, layer=None):
"""Unstack the neighbors indices""" """Unstack the neighbors indices"""
return self.unstack_elements('neighbors', layer) return self.unstack_elements("neighbors", layer)
def unstack_pools(self, layer=None): def unstack_pools(self, layer=None):
"""Unstack the pooling indices""" """Unstack the pooling indices"""
return self.unstack_elements('pools', layer) return self.unstack_elements("pools", layer)
def unstack_elements(self, element_name, layer=None, to_numpy=True): def unstack_elements(self, element_name, layer=None, to_numpy=True):
""" """
@ -760,34 +798,31 @@ class ModelNet40CustomBatch:
layers layers
""" """
if element_name == 'points': if element_name == "points":
elements = self.points elements = self.points
elif element_name == 'neighbors': elif element_name == "neighbors":
elements = self.neighbors elements = self.neighbors
elif element_name == 'pools': elif element_name == "pools":
elements = self.pools[:-1] elements = self.pools[:-1]
else: else:
raise ValueError('Unknown element name: {:s}'.format(element_name)) raise ValueError("Unknown element name: {:s}".format(element_name))
all_p_list = [] all_p_list = []
for layer_i, layer_elems in enumerate(elements): for layer_i, layer_elems in enumerate(elements):
if layer is None or layer == layer_i: if layer is None or layer == layer_i:
i0 = 0 i0 = 0
p_list = [] p_list = []
if element_name == 'pools': if element_name == "pools":
lengths = self.lengths[layer_i+1] lengths = self.lengths[layer_i + 1]
else: else:
lengths = self.lengths[layer_i] lengths = self.lengths[layer_i]
for b_i, length in enumerate(lengths): for b_i, length in enumerate(lengths):
elem = layer_elems[i0 : i0 + length]
elem = layer_elems[i0:i0 + length] if element_name == "neighbors":
if element_name == 'neighbors':
elem[elem >= self.points[layer_i].shape[0]] = -1 elem[elem >= self.points[layer_i].shape[0]] = -1
elem[elem >= 0] -= i0 elem[elem >= 0] -= i0
elif element_name == 'pools': elif element_name == "pools":
elem[elem >= self.points[layer_i].shape[0]] = -1 elem[elem >= self.points[layer_i].shape[0]] = -1
elem[elem >= 0] -= torch.sum(self.lengths[layer_i][:b_i]) elem[elem >= 0] -= torch.sum(self.lengths[layer_i][:b_i])
i0 += length i0 += length
@ -819,16 +854,15 @@ def debug_sampling(dataset, sampler, loader):
"""Shows which labels are sampled according to strategy chosen""" """Shows which labels are sampled according to strategy chosen"""
label_sum = np.zeros((dataset.num_classes), dtype=np.int32) label_sum = np.zeros((dataset.num_classes), dtype=np.int32)
for epoch in range(10): for epoch in range(10):
for batch_i, (points, normals, labels, indices, in_sizes) in enumerate(loader): for batch_i, (points, normals, labels, indices, in_sizes) in enumerate(loader):
# print(batch_i, tuple(points.shape), tuple(normals.shape), labels, indices, in_sizes) # print(batch_i, tuple(points.shape), tuple(normals.shape), labels, indices, in_sizes)
label_sum += np.bincount(labels.numpy(), minlength=dataset.num_classes) label_sum += np.bincount(labels.numpy(), minlength=dataset.num_classes)
print(label_sum) print(label_sum)
#print(sampler.potentials[:6]) # print(sampler.potentials[:6])
print('******************') print("******************")
print('*******************************************') print("*******************************************")
_, counts = np.unique(dataset.input_labels, return_counts=True) _, counts = np.unique(dataset.input_labels, return_counts=True)
print(counts) print(counts)
@ -843,7 +877,6 @@ def debug_timing(dataset, sampler, loader):
estim_b = dataset.config.batch_num estim_b = dataset.config.batch_num
for epoch in range(10): for epoch in range(10):
for batch_i, batch in enumerate(loader): for batch_i, batch in enumerate(loader):
# print(batch_i, tuple(points.shape), tuple(normals.shape), labels, indices, in_sizes) # print(batch_i, tuple(points.shape), tuple(normals.shape), labels, indices, in_sizes)
@ -864,56 +897,49 @@ def debug_timing(dataset, sampler, loader):
# Console display (only one per second) # Console display (only one per second)
if (t[-1] - last_display) > -1.0: if (t[-1] - last_display) > -1.0:
last_display = t[-1] last_display = t[-1]
message = 'Step {:08d} -> (ms/batch) {:8.2f} {:8.2f} / batch = {:.2f}' message = "Step {:08d} -> (ms/batch) {:8.2f} {:8.2f} / batch = {:.2f}"
print(message.format(batch_i, print(
1000 * mean_dt[0], message.format(
1000 * mean_dt[1], batch_i, 1000 * mean_dt[0], 1000 * mean_dt[1], estim_b
estim_b)) )
)
print('************* Epoch ended *************') print("************* Epoch ended *************")
_, counts = np.unique(dataset.input_labels, return_counts=True) _, counts = np.unique(dataset.input_labels, return_counts=True)
print(counts) print(counts)
def debug_show_clouds(dataset, sampler, loader): def debug_show_clouds(dataset, sampler, loader):
for epoch in range(10): for epoch in range(10):
clouds = []
cloud_normals = []
cloud_labels = []
L = dataset.config.num_layers L = dataset.config.num_layers
for batch_i, batch in enumerate(loader): for batch_i, batch in enumerate(loader):
# Print characteristics of input tensors # Print characteristics of input tensors
print('\nPoints tensors') print("\nPoints tensors")
for i in range(L): for i in range(L):
print(batch.points[i].dtype, batch.points[i].shape) print(batch.points[i].dtype, batch.points[i].shape)
print('\nNeigbors tensors') print("\nNeigbors tensors")
for i in range(L): for i in range(L):
print(batch.neighbors[i].dtype, batch.neighbors[i].shape) print(batch.neighbors[i].dtype, batch.neighbors[i].shape)
print('\nPools tensors') print("\nPools tensors")
for i in range(L): for i in range(L):
print(batch.pools[i].dtype, batch.pools[i].shape) print(batch.pools[i].dtype, batch.pools[i].shape)
print('\nStack lengths') print("\nStack lengths")
for i in range(L): for i in range(L):
print(batch.lengths[i].dtype, batch.lengths[i].shape) print(batch.lengths[i].dtype, batch.lengths[i].shape)
print('\nFeatures') print("\nFeatures")
print(batch.features.dtype, batch.features.shape) print(batch.features.dtype, batch.features.shape)
print('\nLabels') print("\nLabels")
print(batch.labels.dtype, batch.labels.shape) print(batch.labels.dtype, batch.labels.shape)
print('\nAugment Scales') print("\nAugment Scales")
print(batch.scales.dtype, batch.scales.shape) print(batch.scales.dtype, batch.scales.shape)
print('\nAugment Rotations') print("\nAugment Rotations")
print(batch.rots.dtype, batch.rots.shape) print(batch.rots.dtype, batch.rots.shape)
print('\nModel indices') print("\nModel indices")
print(batch.model_inds.dtype, batch.model_inds.shape) print(batch.model_inds.dtype, batch.model_inds.shape)
print('\nAre input tensors pinned') print("\nAre input tensors pinned")
print(batch.neighbors[0].is_pinned()) print(batch.neighbors[0].is_pinned())
print(batch.neighbors[-1].is_pinned()) print(batch.neighbors[-1].is_pinned())
print(batch.points[0].is_pinned()) print(batch.points[0].is_pinned())
@ -925,7 +951,7 @@ def debug_show_clouds(dataset, sampler, loader):
show_input_batch(batch) show_input_batch(batch)
print('*******************************************') print("*******************************************")
_, counts = np.unique(dataset.input_labels, return_counts=True) _, counts = np.unique(dataset.input_labels, return_counts=True)
print(counts) print(counts)
@ -939,7 +965,6 @@ def debug_batch_and_neighbors_calib(dataset, sampler, loader):
mean_dt = np.zeros(2) mean_dt = np.zeros(2)
for epoch in range(10): for epoch in range(10):
for batch_i, input_list in enumerate(loader): for batch_i, input_list in enumerate(loader):
# print(batch_i, tuple(points.shape), tuple(normals.shape), labels, indices, in_sizes) # print(batch_i, tuple(points.shape), tuple(normals.shape), labels, indices, in_sizes)
@ -957,12 +982,10 @@ def debug_batch_and_neighbors_calib(dataset, sampler, loader):
# Console display (only one per second) # Console display (only one per second)
if (t[-1] - last_display) > 1.0: if (t[-1] - last_display) > 1.0:
last_display = t[-1] last_display = t[-1]
message = 'Step {:08d} -> Average timings (ms/batch) {:8.2f} {:8.2f} ' message = "Step {:08d} -> Average timings (ms/batch) {:8.2f} {:8.2f} "
print(message.format(batch_i, print(message.format(batch_i, 1000 * mean_dt[0], 1000 * mean_dt[1]))
1000 * mean_dt[0],
1000 * mean_dt[1]))
print('************* Epoch ended *************') print("************* Epoch ended *************")
_, counts = np.unique(dataset.input_labels, return_counts=True) _, counts = np.unique(dataset.input_labels, return_counts=True)
print(counts) print(counts)
@ -976,7 +999,6 @@ class ModelNet40WorkerInitDebug:
return return
def __call__(self, worker_id): def __call__(self, worker_id):
# Print workers info # Print workers info
worker_info = get_worker_info() worker_info = get_worker_info()
print(worker_info) print(worker_info)
@ -985,11 +1007,10 @@ class ModelNet40WorkerInitDebug:
dataset = worker_info.dataset # the dataset copy in this worker process dataset = worker_info.dataset # the dataset copy in this worker process
# In windows, each worker has its own copy of the dataset. In Linux, this is shared in memory # In windows, each worker has its own copy of the dataset. In Linux, this is shared in memory
print(dataset.input_labels.__array_interface__['data']) print(dataset.input_labels.__array_interface__["data"])
print(worker_info.dataset.input_labels.__array_interface__['data']) print(worker_info.dataset.input_labels.__array_interface__["data"])
print(self.dataset.input_labels.__array_interface__['data']) print(self.dataset.input_labels.__array_interface__["data"])
# configure the dataset to only process the split workload # configure the dataset to only process the split workload
return return

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -21,12 +21,8 @@
# #
# Common libs # Common libs
import time
import os
import numpy as np import numpy as np
import sys from torch.utils.data import Dataset
import torch
from torch.utils.data import DataLoader, Dataset
from utils.config import Config from utils.config import Config
from utils.mayavi_visu import * from utils.mayavi_visu import *
from kernels.kernel_points import create_3D_rotations from kernels.kernel_points import create_3D_rotations
@ -41,6 +37,7 @@ import cpp_wrappers.cpp_neighbors.radius_neighbors as cpp_neighbors
# \***********************/ # \***********************/
# #
def grid_subsampling(points, features=None, labels=None, sampleDl=0.1, verbose=0): def grid_subsampling(points, features=None, labels=None, sampleDl=0.1, verbose=0):
""" """
CPP wrapper for a grid subsampling (method = barycenter for points and features) CPP wrapper for a grid subsampling (method = barycenter for points and features)
@ -53,29 +50,35 @@ def grid_subsampling(points, features=None, labels=None, sampleDl=0.1, verbose=0
""" """
if (features is None) and (labels is None): if (features is None) and (labels is None):
return cpp_subsampling.subsample(points, return cpp_subsampling.subsample(points, sampleDl=sampleDl, verbose=verbose)
sampleDl=sampleDl, elif labels is None:
verbose=verbose) return cpp_subsampling.subsample(
elif (labels is None): points, features=features, sampleDl=sampleDl, verbose=verbose
return cpp_subsampling.subsample(points, )
features=features, elif features is None:
sampleDl=sampleDl, return cpp_subsampling.subsample(
verbose=verbose) points, classes=labels, sampleDl=sampleDl, verbose=verbose
elif (features is None): )
return cpp_subsampling.subsample(points,
classes=labels,
sampleDl=sampleDl,
verbose=verbose)
else: else:
return cpp_subsampling.subsample(points, return cpp_subsampling.subsample(
features=features, points,
classes=labels, features=features,
sampleDl=sampleDl, classes=labels,
verbose=verbose) sampleDl=sampleDl,
verbose=verbose,
)
def batch_grid_subsampling(points, batches_len, features=None, labels=None, def batch_grid_subsampling(
sampleDl=0.1, max_p=0, verbose=0, random_grid_orient=True): points,
batches_len,
features=None,
labels=None,
sampleDl=0.1,
max_p=0,
verbose=0,
random_grid_orient=True,
):
""" """
CPP wrapper for a grid subsampling (method = barycenter for points and features) CPP wrapper for a grid subsampling (method = barycenter for points and features)
:param points: (N, 3) matrix of input points :param points: (N, 3) matrix of input points
@ -89,7 +92,6 @@ def batch_grid_subsampling(points, batches_len, features=None, labels=None,
R = None R = None
B = len(batches_len) B = len(batches_len)
if random_grid_orient: if random_grid_orient:
######################################################## ########################################################
# Create a random rotation matrix for each batch element # Create a random rotation matrix for each batch element
######################################################## ########################################################
@ -99,7 +101,9 @@ def batch_grid_subsampling(points, batches_len, features=None, labels=None,
phi = (np.random.rand(B) - 0.5) * np.pi phi = (np.random.rand(B) - 0.5) * np.pi
# Create the first vector in carthesian coordinates # Create the first vector in carthesian coordinates
u = np.vstack([np.cos(theta) * np.cos(phi), np.sin(theta) * np.cos(phi), np.sin(phi)]) u = np.vstack(
[np.cos(theta) * np.cos(phi), np.sin(theta) * np.cos(phi), np.sin(phi)]
)
# Choose a random rotation angle # Choose a random rotation angle
alpha = np.random.rand(B) * 2 * np.pi alpha = np.random.rand(B) * 2 * np.pi
@ -115,7 +119,9 @@ def batch_grid_subsampling(points, batches_len, features=None, labels=None,
points = points.copy() points = points.copy()
for bi, length in enumerate(batches_len): for bi, length in enumerate(batches_len):
# Apply the rotation # Apply the rotation
points[i0:i0 + length, :] = np.sum(np.expand_dims(points[i0:i0 + length, :], 2) * R[bi], axis=1) points[i0 : i0 + length, :] = np.sum(
np.expand_dims(points[i0 : i0 + length, :], 2) * R[bi], axis=1
)
i0 += length i0 += length
####################### #######################
@ -123,61 +129,73 @@ def batch_grid_subsampling(points, batches_len, features=None, labels=None,
####################### #######################
if (features is None) and (labels is None): if (features is None) and (labels is None):
s_points, s_len = cpp_subsampling.subsample_batch(points, s_points, s_len = cpp_subsampling.subsample_batch(
batches_len, points, batches_len, sampleDl=sampleDl, max_p=max_p, verbose=verbose
sampleDl=sampleDl, )
max_p=max_p,
verbose=verbose)
if random_grid_orient: if random_grid_orient:
i0 = 0 i0 = 0
for bi, length in enumerate(s_len): for bi, length in enumerate(s_len):
s_points[i0:i0 + length, :] = np.sum(np.expand_dims(s_points[i0:i0 + length, :], 2) * R[bi].T, axis=1) s_points[i0 : i0 + length, :] = np.sum(
np.expand_dims(s_points[i0 : i0 + length, :], 2) * R[bi].T, axis=1
)
i0 += length i0 += length
return s_points, s_len return s_points, s_len
elif (labels is None): elif labels is None:
s_points, s_len, s_features = cpp_subsampling.subsample_batch(points, s_points, s_len, s_features = cpp_subsampling.subsample_batch(
batches_len, points,
features=features, batches_len,
sampleDl=sampleDl, features=features,
max_p=max_p, sampleDl=sampleDl,
verbose=verbose) max_p=max_p,
verbose=verbose,
)
if random_grid_orient: if random_grid_orient:
i0 = 0 i0 = 0
for bi, length in enumerate(s_len): for bi, length in enumerate(s_len):
# Apply the rotation # Apply the rotation
s_points[i0:i0 + length, :] = np.sum(np.expand_dims(s_points[i0:i0 + length, :], 2) * R[bi].T, axis=1) s_points[i0 : i0 + length, :] = np.sum(
np.expand_dims(s_points[i0 : i0 + length, :], 2) * R[bi].T, axis=1
)
i0 += length i0 += length
return s_points, s_len, s_features return s_points, s_len, s_features
elif (features is None): elif features is None:
s_points, s_len, s_labels = cpp_subsampling.subsample_batch(points, s_points, s_len, s_labels = cpp_subsampling.subsample_batch(
batches_len, points,
classes=labels, batches_len,
sampleDl=sampleDl, classes=labels,
max_p=max_p, sampleDl=sampleDl,
verbose=verbose) max_p=max_p,
verbose=verbose,
)
if random_grid_orient: if random_grid_orient:
i0 = 0 i0 = 0
for bi, length in enumerate(s_len): for bi, length in enumerate(s_len):
# Apply the rotation # Apply the rotation
s_points[i0:i0 + length, :] = np.sum(np.expand_dims(s_points[i0:i0 + length, :], 2) * R[bi].T, axis=1) s_points[i0 : i0 + length, :] = np.sum(
np.expand_dims(s_points[i0 : i0 + length, :], 2) * R[bi].T, axis=1
)
i0 += length i0 += length
return s_points, s_len, s_labels return s_points, s_len, s_labels
else: else:
s_points, s_len, s_features, s_labels = cpp_subsampling.subsample_batch(points, s_points, s_len, s_features, s_labels = cpp_subsampling.subsample_batch(
batches_len, points,
features=features, batches_len,
classes=labels, features=features,
sampleDl=sampleDl, classes=labels,
max_p=max_p, sampleDl=sampleDl,
verbose=verbose) max_p=max_p,
verbose=verbose,
)
if random_grid_orient: if random_grid_orient:
i0 = 0 i0 = 0
for bi, length in enumerate(s_len): for bi, length in enumerate(s_len):
# Apply the rotation # Apply the rotation
s_points[i0:i0 + length, :] = np.sum(np.expand_dims(s_points[i0:i0 + length, :], 2) * R[bi].T, axis=1) s_points[i0 : i0 + length, :] = np.sum(
np.expand_dims(s_points[i0 : i0 + length, :], 2) * R[bi].T, axis=1
)
i0 += length i0 += length
return s_points, s_len, s_features, s_labels return s_points, s_len, s_features, s_labels
@ -193,7 +211,9 @@ def batch_neighbors(queries, supports, q_batches, s_batches, radius):
:return: neighbors indices :return: neighbors indices
""" """
return cpp_neighbors.batch_query(queries, supports, q_batches, s_batches, radius=radius) return cpp_neighbors.batch_query(
queries, supports, q_batches, s_batches, radius=radius
)
# ---------------------------------------------------------------------------------------------------------------------- # ----------------------------------------------------------------------------------------------------------------------
@ -211,7 +231,7 @@ class PointCloudDataset(Dataset):
""" """
self.name = name self.name = name
self.path = '' self.path = ""
self.label_to_names = {} self.label_to_names = {}
self.num_classes = 0 self.num_classes = 0
self.label_values = np.zeros((0,), dtype=np.int32) self.label_values = np.zeros((0,), dtype=np.int32)
@ -237,7 +257,6 @@ class PointCloudDataset(Dataset):
return 0 return 0
def init_labels(self): def init_labels(self):
# Initialize all label parameters given the label_to_names dict # Initialize all label parameters given the label_to_names dict
self.num_classes = len(self.label_to_names) self.num_classes = len(self.label_to_names)
self.label_values = np.sort([k for k, v in self.label_to_names.items()]) self.label_values = np.sort([k for k, v in self.label_to_names.items()])
@ -256,27 +275,33 @@ class PointCloudDataset(Dataset):
R = np.eye(points.shape[1]) R = np.eye(points.shape[1])
if points.shape[1] == 3: if points.shape[1] == 3:
if self.config.augment_rotation == 'vertical': if self.config.augment_rotation == "vertical":
# Create random rotations # Create random rotations
theta = np.random.rand() * 2 * np.pi theta = np.random.rand() * 2 * np.pi
c, s = np.cos(theta), np.sin(theta) c, s = np.cos(theta), np.sin(theta)
R = np.array([[c, -s, 0], [s, c, 0], [0, 0, 1]], dtype=np.float32) R = np.array([[c, -s, 0], [s, c, 0], [0, 0, 1]], dtype=np.float32)
elif self.config.augment_rotation == 'all': elif self.config.augment_rotation == "all":
# Choose two random angles for the first vector in polar coordinates # Choose two random angles for the first vector in polar coordinates
theta = np.random.rand() * 2 * np.pi theta = np.random.rand() * 2 * np.pi
phi = (np.random.rand() - 0.5) * np.pi phi = (np.random.rand() - 0.5) * np.pi
# Create the first vector in carthesian coordinates # Create the first vector in carthesian coordinates
u = np.array([np.cos(theta) * np.cos(phi), np.sin(theta) * np.cos(phi), np.sin(phi)]) u = np.array(
[
np.cos(theta) * np.cos(phi),
np.sin(theta) * np.cos(phi),
np.sin(phi),
]
)
# Choose a random rotation angle # Choose a random rotation angle
alpha = np.random.rand() * 2 * np.pi alpha = np.random.rand() * 2 * np.pi
# Create the rotation matrix with this vector and angle # Create the rotation matrix with this vector and angle
R = create_3D_rotations(np.reshape(u, (1, -1)), np.reshape(alpha, (1, -1)))[0] R = create_3D_rotations(
np.reshape(u, (1, -1)), np.reshape(alpha, (1, -1))
)[0]
R = R.astype(np.float32) R = R.astype(np.float32)
@ -301,17 +326,19 @@ class PointCloudDataset(Dataset):
# Noise # Noise
####### #######
noise = (np.random.randn(points.shape[0], points.shape[1]) * self.config.augment_noise).astype(np.float32) noise = (
np.random.randn(points.shape[0], points.shape[1])
* self.config.augment_noise
).astype(np.float32)
################## ##################
# Apply transforms # Apply transforms
################## ##################
# Do not use np.dot because it is multi-threaded # Do not use np.dot because it is multi-threaded
#augmented_points = np.dot(points, R) * scale + noise # augmented_points = np.dot(points, R) * scale + noise
augmented_points = np.sum(np.expand_dims(points, 2) * R, axis=1) * scale + noise augmented_points = np.sum(np.expand_dims(points, 2) * R, axis=1) * scale + noise
if normals is None: if normals is None:
return augmented_points, scale, R return augmented_points, scale, R
else: else:
@ -319,12 +346,14 @@ class PointCloudDataset(Dataset):
normal_scale = scale[[1, 2, 0]] * scale[[2, 0, 1]] normal_scale = scale[[1, 2, 0]] * scale[[2, 0, 1]]
augmented_normals = np.dot(normals, R) * normal_scale augmented_normals = np.dot(normals, R) * normal_scale
# Renormalise # Renormalise
augmented_normals *= 1 / (np.linalg.norm(augmented_normals, axis=1, keepdims=True) + 1e-6) augmented_normals *= 1 / (
np.linalg.norm(augmented_normals, axis=1, keepdims=True) + 1e-6
)
if verbose: if verbose:
test_p = [np.vstack([points, augmented_points])] test_p = [np.vstack([points, augmented_points])]
test_n = [np.vstack([normals, augmented_normals])] test_n = [np.vstack([normals, augmented_normals])]
test_l = [np.hstack([points[:, 2]*0, augmented_points[:, 2]*0+1])] test_l = [np.hstack([points[:, 2] * 0, augmented_points[:, 2] * 0 + 1])]
show_ModelNet_examples(test_p, test_n, test_l) show_ModelNet_examples(test_p, test_n, test_l)
return augmented_points, augmented_normals, scale, R return augmented_points, augmented_normals, scale, R
@ -337,16 +366,13 @@ class PointCloudDataset(Dataset):
# crop neighbors matrix # crop neighbors matrix
if len(self.neighborhood_limits) > 0: if len(self.neighborhood_limits) > 0:
return neighbors[:, :self.neighborhood_limits[layer]] return neighbors[:, : self.neighborhood_limits[layer]]
else: else:
return neighbors return neighbors
def classification_inputs(self, def classification_inputs(
stacked_points, self, stacked_points, stacked_features, labels, stack_lengths
stacked_features, ):
labels,
stack_lengths):
# Starting radius of convolutions # Starting radius of convolutions
r_normal = self.config.first_subsampling_dl * self.config.conv_radius r_normal = self.config.first_subsampling_dl * self.config.conv_radius
@ -367,9 +393,13 @@ class PointCloudDataset(Dataset):
arch = self.config.architecture arch = self.config.architecture
for block_i, block in enumerate(arch): for block_i, block in enumerate(arch):
# Get all blocks of the layer # Get all blocks of the layer
if not ('pool' in block or 'strided' in block or 'global' in block or 'upsample' in block): if not (
"pool" in block
or "strided" in block
or "global" in block
or "upsample" in block
):
layer_blocks += [block] layer_blocks += [block]
continue continue
@ -379,12 +409,14 @@ class PointCloudDataset(Dataset):
deform_layer = False deform_layer = False
if layer_blocks: if layer_blocks:
# Convolutions are done in this layer, compute the neighbors with the good radius # Convolutions are done in this layer, compute the neighbors with the good radius
if np.any(['deformable' in blck for blck in layer_blocks]): if np.any(["deformable" in blck for blck in layer_blocks]):
r = r_normal * self.config.deform_radius / self.config.conv_radius r = r_normal * self.config.deform_radius / self.config.conv_radius
deform_layer = True deform_layer = True
else: else:
r = r_normal r = r_normal
conv_i = batch_neighbors(stacked_points, stacked_points, stack_lengths, stack_lengths, r) conv_i = batch_neighbors(
stacked_points, stacked_points, stack_lengths, stack_lengths, r
)
else: else:
# This layer only perform pooling, no neighbors required # This layer only perform pooling, no neighbors required
@ -394,23 +426,26 @@ class PointCloudDataset(Dataset):
# ************************* # *************************
# If end of layer is a pooling operation # If end of layer is a pooling operation
if 'pool' in block or 'strided' in block: if "pool" in block or "strided" in block:
# New subsampling length # New subsampling length
dl = 2 * r_normal / self.config.conv_radius dl = 2 * r_normal / self.config.conv_radius
# Subsampled points # Subsampled points
pool_p, pool_b = batch_grid_subsampling(stacked_points, stack_lengths, sampleDl=dl) pool_p, pool_b = batch_grid_subsampling(
stacked_points, stack_lengths, sampleDl=dl
)
# Radius of pooled neighbors # Radius of pooled neighbors
if 'deformable' in block: if "deformable" in block:
r = r_normal * self.config.deform_radius / self.config.conv_radius r = r_normal * self.config.deform_radius / self.config.conv_radius
deform_layer = True deform_layer = True
else: else:
r = r_normal r = r_normal
# Subsample indices # Subsample indices
pool_i = batch_neighbors(pool_p, stacked_points, pool_b, stack_lengths, r) pool_i = batch_neighbors(
pool_p, stacked_points, pool_b, stack_lengths, r
)
else: else:
# No pooling in the end of this layer, no pooling indices required # No pooling in the end of this layer, no pooling indices required
@ -438,7 +473,7 @@ class PointCloudDataset(Dataset):
layer_blocks = [] layer_blocks = []
# Stop when meeting a global pooling or upsampling # Stop when meeting a global pooling or upsampling
if 'global' in block or 'upsample' in block: if "global" in block or "upsample" in block:
break break
############### ###############
@ -453,13 +488,9 @@ class PointCloudDataset(Dataset):
return li return li
def segmentation_inputs(
def segmentation_inputs(self, self, stacked_points, stacked_features, labels, stack_lengths
stacked_points, ):
stacked_features,
labels,
stack_lengths):
# Starting radius of convolutions # Starting radius of convolutions
r_normal = self.config.first_subsampling_dl * self.config.conv_radius r_normal = self.config.first_subsampling_dl * self.config.conv_radius
@ -481,9 +512,13 @@ class PointCloudDataset(Dataset):
arch = self.config.architecture arch = self.config.architecture
for block_i, block in enumerate(arch): for block_i, block in enumerate(arch):
# Get all blocks of the layer # Get all blocks of the layer
if not ('pool' in block or 'strided' in block or 'global' in block or 'upsample' in block): if not (
"pool" in block
or "strided" in block
or "global" in block
or "upsample" in block
):
layer_blocks += [block] layer_blocks += [block]
continue continue
@ -493,12 +528,14 @@ class PointCloudDataset(Dataset):
deform_layer = False deform_layer = False
if layer_blocks: if layer_blocks:
# Convolutions are done in this layer, compute the neighbors with the good radius # Convolutions are done in this layer, compute the neighbors with the good radius
if np.any(['deformable' in blck for blck in layer_blocks]): if np.any(["deformable" in blck for blck in layer_blocks]):
r = r_normal * self.config.deform_radius / self.config.conv_radius r = r_normal * self.config.deform_radius / self.config.conv_radius
deform_layer = True deform_layer = True
else: else:
r = r_normal r = r_normal
conv_i = batch_neighbors(stacked_points, stacked_points, stack_lengths, stack_lengths, r) conv_i = batch_neighbors(
stacked_points, stacked_points, stack_lengths, stack_lengths, r
)
else: else:
# This layer only perform pooling, no neighbors required # This layer only perform pooling, no neighbors required
@ -508,26 +545,31 @@ class PointCloudDataset(Dataset):
# ************************* # *************************
# If end of layer is a pooling operation # If end of layer is a pooling operation
if 'pool' in block or 'strided' in block: if "pool" in block or "strided" in block:
# New subsampling length # New subsampling length
dl = 2 * r_normal / self.config.conv_radius dl = 2 * r_normal / self.config.conv_radius
# Subsampled points # Subsampled points
pool_p, pool_b = batch_grid_subsampling(stacked_points, stack_lengths, sampleDl=dl) pool_p, pool_b = batch_grid_subsampling(
stacked_points, stack_lengths, sampleDl=dl
)
# Radius of pooled neighbors # Radius of pooled neighbors
if 'deformable' in block: if "deformable" in block:
r = r_normal * self.config.deform_radius / self.config.conv_radius r = r_normal * self.config.deform_radius / self.config.conv_radius
deform_layer = True deform_layer = True
else: else:
r = r_normal r = r_normal
# Subsample indices # Subsample indices
pool_i = batch_neighbors(pool_p, stacked_points, pool_b, stack_lengths, r) pool_i = batch_neighbors(
pool_p, stacked_points, pool_b, stack_lengths, r
)
# Upsample indices (with the radius of the next layer to keep wanted density) # Upsample indices (with the radius of the next layer to keep wanted density)
up_i = batch_neighbors(stacked_points, pool_p, stack_lengths, pool_b, 2 * r) up_i = batch_neighbors(
stacked_points, pool_p, stack_lengths, pool_b, 2 * r
)
else: else:
# No pooling in the end of this layer, no pooling indices required # No pooling in the end of this layer, no pooling indices required
@ -540,7 +582,7 @@ class PointCloudDataset(Dataset):
conv_i = self.big_neighborhood_filter(conv_i, len(input_points)) conv_i = self.big_neighborhood_filter(conv_i, len(input_points))
pool_i = self.big_neighborhood_filter(pool_i, len(input_points)) pool_i = self.big_neighborhood_filter(pool_i, len(input_points))
if up_i.shape[0] > 0: if up_i.shape[0] > 0:
up_i = self.big_neighborhood_filter(up_i, len(input_points)+1) up_i = self.big_neighborhood_filter(up_i, len(input_points) + 1)
# Updating input lists # Updating input lists
input_points += [stacked_points] input_points += [stacked_points]
@ -559,7 +601,7 @@ class PointCloudDataset(Dataset):
layer_blocks = [] layer_blocks = []
# Stop when meeting a global pooling or upsampling # Stop when meeting a global pooling or upsampling
if 'global' in block or 'upsample' in block: if "global" in block or "upsample" in block:
break break
############### ###############
@ -567,20 +609,13 @@ class PointCloudDataset(Dataset):
############### ###############
# list of network inputs # list of network inputs
li = input_points + input_neighbors + input_pools + input_upsamples + input_stack_lengths li = (
input_points
+ input_neighbors
+ input_pools
+ input_upsamples
+ input_stack_lengths
)
li += [stacked_features, labels] li += [stacked_features, labels]
return li return li

View file

@ -23,10 +23,8 @@
# Import numpy package and name it "np" # Import numpy package and name it "np"
import time
import numpy as np import numpy as np
import matplotlib.pyplot as plt import matplotlib.pyplot as plt
from matplotlib import cm
from os import makedirs from os import makedirs
from os.path import join, exists from os.path import join, exists
@ -41,6 +39,7 @@ from utils.config import bcolors
# #
# #
def create_3D_rotations(axis, angle): def create_3D_rotations(axis, angle):
""" """
Create rotation matrices from a list of axes and angles. Code from wikipedia on quaternions Create rotation matrices from a list of axes and angles. Code from wikipedia on quaternions
@ -62,21 +61,35 @@ def create_3D_rotations(axis, angle):
t19 = t2 * axis[:, 1] * axis[:, 2] t19 = t2 * axis[:, 1] * axis[:, 2]
t20 = t8 * axis[:, 0] t20 = t8 * axis[:, 0]
t24 = axis[:, 2] * axis[:, 2] t24 = axis[:, 2] * axis[:, 2]
R = np.stack([t1 + t2 * t3, R = np.stack(
t7 - t9, [
t11 + t12, t1 + t2 * t3,
t7 + t9, t7 - t9,
t1 + t2 * t15, t11 + t12,
t19 - t20, t7 + t9,
t11 - t12, t1 + t2 * t15,
t19 + t20, t19 - t20,
t1 + t2 * t24], axis=1) t11 - t12,
t19 + t20,
t1 + t2 * t24,
],
axis=1,
)
return np.reshape(R, (-1, 3, 3)) return np.reshape(R, (-1, 3, 3))
def spherical_Lloyd(radius, num_cells, dimension=3, fixed='center', approximation='monte-carlo', def spherical_Lloyd(
approx_n=5000, max_iter=500, momentum=0.9, verbose=0): radius,
num_cells,
dimension=3,
fixed="center",
approximation="monte-carlo",
approx_n=5000,
max_iter=500,
momentum=0.9,
verbose=0,
):
""" """
Creation of kernel point via Lloyd algorithm. We use an approximation of the algorithm, and compute the Voronoi Creation of kernel point via Lloyd algorithm. We use an approximation of the algorithm, and compute the Voronoi
cell centers with discretization of space. The exact formula is not trivial with part of the sphere as sides. cell centers with discretization of space. The exact formula is not trivial with part of the sphere as sides.
@ -109,13 +122,15 @@ def spherical_Lloyd(radius, num_cells, dimension=3, fixed='center', approximatio
new_points = np.random.rand(num_cells, dimension) * 2 * radius0 - radius0 new_points = np.random.rand(num_cells, dimension) * 2 * radius0 - radius0
kernel_points = np.vstack((kernel_points, new_points)) kernel_points = np.vstack((kernel_points, new_points))
d2 = np.sum(np.power(kernel_points, 2), axis=1) d2 = np.sum(np.power(kernel_points, 2), axis=1)
kernel_points = kernel_points[np.logical_and(d2 < radius0 ** 2, (0.9 * radius0) ** 2 < d2), :] kernel_points = kernel_points[
np.logical_and(d2 < radius0**2, (0.9 * radius0) ** 2 < d2), :
]
kernel_points = kernel_points[:num_cells, :].reshape((num_cells, -1)) kernel_points = kernel_points[:num_cells, :].reshape((num_cells, -1))
# Optional fixing # Optional fixing
if fixed == 'center': if fixed == "center":
kernel_points[0, :] *= 0 kernel_points[0, :] *= 0
if fixed == 'verticals': if fixed == "verticals":
kernel_points[:3, :] *= 0 kernel_points[:3, :] *= 0
kernel_points[1, -1] += 2 * radius0 / 3 kernel_points[1, -1] += 2 * radius0 / 3
kernel_points[2, -1] -= 2 * radius0 / 3 kernel_points[2, -1] -= 2 * radius0 / 3
@ -129,10 +144,10 @@ def spherical_Lloyd(radius, num_cells, dimension=3, fixed='center', approximatio
fig = plt.figure() fig = plt.figure()
# Initialize discretization in this method is chosen # Initialize discretization in this method is chosen
if approximation == 'discretization': if approximation == "discretization":
side_n = int(np.floor(approx_n ** (1. / dimension))) side_n = int(np.floor(approx_n ** (1.0 / dimension)))
dl = 2 * radius0 / side_n dl = 2 * radius0 / side_n
coords = np.arange(-radius0 + dl/2, radius0, dl) coords = np.arange(-radius0 + dl / 2, radius0, dl)
if dimension == 2: if dimension == 2:
x, y = np.meshgrid(coords, coords) x, y = np.meshgrid(coords, coords)
X = np.vstack((np.ravel(x), np.ravel(y))).T X = np.vstack((np.ravel(x), np.ravel(y))).T
@ -143,11 +158,13 @@ def spherical_Lloyd(radius, num_cells, dimension=3, fixed='center', approximatio
x, y, z, t = np.meshgrid(coords, coords, coords, coords) x, y, z, t = np.meshgrid(coords, coords, coords, coords)
X = np.vstack((np.ravel(x), np.ravel(y), np.ravel(z), np.ravel(t))).T X = np.vstack((np.ravel(x), np.ravel(y), np.ravel(z), np.ravel(t))).T
else: else:
raise ValueError('Unsupported dimension (max is 4)') raise ValueError("Unsupported dimension (max is 4)")
elif approximation == 'monte-carlo': elif approximation == "monte-carlo":
X = np.zeros((0, dimension)) X = np.zeros((0, dimension))
else: else:
raise ValueError('Wrong approximation method chosen: "{:s}"'.format(approximation)) raise ValueError(
'Wrong approximation method chosen: "{:s}"'.format(approximation)
)
# Only points inside the sphere are used # Only points inside the sphere are used
d2 = np.sum(np.power(X, 2), axis=1) d2 = np.sum(np.power(X, 2), axis=1)
@ -164,9 +181,8 @@ def spherical_Lloyd(radius, num_cells, dimension=3, fixed='center', approximatio
max_moves = np.zeros((0,)) max_moves = np.zeros((0,))
for iter in range(max_iter): for iter in range(max_iter):
# In the case of monte-carlo, renew the sampled points # In the case of monte-carlo, renew the sampled points
if approximation == 'monte-carlo': if approximation == "monte-carlo":
X = np.random.rand(approx_n, dimension) * 2 * radius0 - radius0 X = np.random.rand(approx_n, dimension) * 2 * radius0 - radius0
d2 = np.sum(np.power(X, 2), axis=1) d2 = np.sum(np.power(X, 2), axis=1)
X = X[d2 < radius0 * radius0, :] X = X[d2 < radius0 * radius0, :]
@ -179,7 +195,7 @@ def spherical_Lloyd(radius, num_cells, dimension=3, fixed='center', approximatio
cell_inds = np.argmin(sq_distances, axis=1) cell_inds = np.argmin(sq_distances, axis=1)
centers = [] centers = []
for c in range(num_cells): for c in range(num_cells):
bool_c = (cell_inds == c) bool_c = cell_inds == c
num_c = np.sum(bool_c.astype(np.int32)) num_c = np.sum(bool_c.astype(np.int32))
if num_c > 0: if num_c > 0:
centers.append(np.sum(X[bool_c, :], axis=0) / num_c) centers.append(np.sum(X[bool_c, :], axis=0) / num_c)
@ -196,28 +212,42 @@ def spherical_Lloyd(radius, num_cells, dimension=3, fixed='center', approximatio
max_moves = np.append(max_moves, np.max(np.linalg.norm(moves, axis=1))) max_moves = np.append(max_moves, np.max(np.linalg.norm(moves, axis=1)))
# Optional fixing # Optional fixing
if fixed == 'center': if fixed == "center":
kernel_points[0, :] *= 0 kernel_points[0, :] *= 0
if fixed == 'verticals': if fixed == "verticals":
kernel_points[0, :] *= 0 kernel_points[0, :] *= 0
kernel_points[:3, :-1] *= 0 kernel_points[:3, :-1] *= 0
if verbose: if verbose:
print('iter {:5d} / max move = {:f}'.format(iter, np.max(np.linalg.norm(moves, axis=1)))) print(
"iter {:5d} / max move = {:f}".format(
iter, np.max(np.linalg.norm(moves, axis=1))
)
)
if warning: if warning:
print('{:}WARNING: at least one point has no cell{:}'.format(bcolors.WARNING, bcolors.ENDC)) print(
"{:}WARNING: at least one point has no cell{:}".format(
bcolors.WARNING, bcolors.ENDC
)
)
if verbose > 1: if verbose > 1:
plt.clf() plt.clf()
plt.scatter(X[:, 0], X[:, 1], c=cell_inds, s=20.0, plt.scatter(
marker='.', cmap=plt.get_cmap('tab20')) X[:, 0],
#plt.scatter(kernel_points[:, 0], kernel_points[:, 1], c=np.arange(num_cells), s=100.0, X[:, 1],
c=cell_inds,
s=20.0,
marker=".",
cmap=plt.get_cmap("tab20"),
)
# plt.scatter(kernel_points[:, 0], kernel_points[:, 1], c=np.arange(num_cells), s=100.0,
# marker='+', cmap=plt.get_cmap('tab20')) # marker='+', cmap=plt.get_cmap('tab20'))
plt.plot(kernel_points[:, 0], kernel_points[:, 1], 'k+') plt.plot(kernel_points[:, 0], kernel_points[:, 1], "k+")
circle = plt.Circle((0, 0), radius0, color='r', fill=False) circle = plt.Circle((0, 0), radius0, color="r", fill=False)
fig.axes[0].add_artist(circle) fig.axes[0].add_artist(circle)
fig.axes[0].set_xlim((-radius0 * 1.1, radius0 * 1.1)) fig.axes[0].set_xlim((-radius0 * 1.1, radius0 * 1.1))
fig.axes[0].set_ylim((-radius0 * 1.1, radius0 * 1.1)) fig.axes[0].set_ylim((-radius0 * 1.1, radius0 * 1.1))
fig.axes[0].set_aspect('equal') fig.axes[0].set_aspect("equal")
plt.draw() plt.draw()
plt.pause(0.001) plt.pause(0.001)
plt.show(block=False) plt.show(block=False)
@ -231,32 +261,45 @@ def spherical_Lloyd(radius, num_cells, dimension=3, fixed='center', approximatio
if dimension == 2: if dimension == 2:
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=[10.4, 4.8]) fig, (ax1, ax2) = plt.subplots(1, 2, figsize=[10.4, 4.8])
ax1.plot(max_moves) ax1.plot(max_moves)
ax2.scatter(X[:, 0], X[:, 1], c=cell_inds, s=20.0, ax2.scatter(
marker='.', cmap=plt.get_cmap('tab20')) X[:, 0],
X[:, 1],
c=cell_inds,
s=20.0,
marker=".",
cmap=plt.get_cmap("tab20"),
)
# plt.scatter(kernel_points[:, 0], kernel_points[:, 1], c=np.arange(num_cells), s=100.0, # plt.scatter(kernel_points[:, 0], kernel_points[:, 1], c=np.arange(num_cells), s=100.0,
# marker='+', cmap=plt.get_cmap('tab20')) # marker='+', cmap=plt.get_cmap('tab20'))
ax2.plot(kernel_points[:, 0], kernel_points[:, 1], 'k+') ax2.plot(kernel_points[:, 0], kernel_points[:, 1], "k+")
circle = plt.Circle((0, 0), radius0, color='r', fill=False) circle = plt.Circle((0, 0), radius0, color="r", fill=False)
ax2.add_artist(circle) ax2.add_artist(circle)
ax2.set_xlim((-radius0 * 1.1, radius0 * 1.1)) ax2.set_xlim((-radius0 * 1.1, radius0 * 1.1))
ax2.set_ylim((-radius0 * 1.1, radius0 * 1.1)) ax2.set_ylim((-radius0 * 1.1, radius0 * 1.1))
ax2.set_aspect('equal') ax2.set_aspect("equal")
plt.title('Check if kernel is correct.') plt.title("Check if kernel is correct.")
plt.draw() plt.draw()
plt.show() plt.show()
if dimension > 2: if dimension > 2:
plt.figure() plt.figure()
plt.plot(max_moves) plt.plot(max_moves)
plt.title('Check if kernel is correct.') plt.title("Check if kernel is correct.")
plt.show() plt.show()
# Rescale kernels with real radius # Rescale kernels with real radius
return kernel_points * radius return kernel_points * radius
def kernel_point_optimization_debug(radius, num_points, num_kernels=1, dimension=3, def kernel_point_optimization_debug(
fixed='center', ratio=0.66, verbose=0): radius,
num_points,
num_kernels=1,
dimension=3,
fixed="center",
ratio=0.66,
verbose=0,
):
""" """
Creation of kernel point via optimization of potentials. Creation of kernel point via optimization of potentials.
:param radius: Radius of the kernels :param radius: Radius of the kernels
@ -292,18 +335,25 @@ def kernel_point_optimization_debug(radius, num_points, num_kernels=1, dimension
####################### #######################
# Random kernel points # Random kernel points
kernel_points = np.random.rand(num_kernels * num_points - 1, dimension) * diameter0 - radius0 kernel_points = (
while (kernel_points.shape[0] < num_kernels * num_points): np.random.rand(num_kernels * num_points - 1, dimension) * diameter0 - radius0
new_points = np.random.rand(num_kernels * num_points - 1, dimension) * diameter0 - radius0 )
while kernel_points.shape[0] < num_kernels * num_points:
new_points = (
np.random.rand(num_kernels * num_points - 1, dimension) * diameter0
- radius0
)
kernel_points = np.vstack((kernel_points, new_points)) kernel_points = np.vstack((kernel_points, new_points))
d2 = np.sum(np.power(kernel_points, 2), axis=1) d2 = np.sum(np.power(kernel_points, 2), axis=1)
kernel_points = kernel_points[d2 < 0.5 * radius0 * radius0, :] kernel_points = kernel_points[d2 < 0.5 * radius0 * radius0, :]
kernel_points = kernel_points[:num_kernels * num_points, :].reshape((num_kernels, num_points, -1)) kernel_points = kernel_points[: num_kernels * num_points, :].reshape(
(num_kernels, num_points, -1)
)
# Optionnal fixing # Optionnal fixing
if fixed == 'center': if fixed == "center":
kernel_points[:, 0, :] *= 0 kernel_points[:, 0, :] *= 0
if fixed == 'verticals': if fixed == "verticals":
kernel_points[:, :3, :] *= 0 kernel_points[:, :3, :] *= 0
kernel_points[:, 1, -1] += 2 * radius0 / 3 kernel_points[:, 1, -1] += 2 * radius0 / 3
kernel_points[:, 2, -1] -= 2 * radius0 / 3 kernel_points[:, 2, -1] -= 2 * radius0 / 3
@ -313,14 +363,13 @@ def kernel_point_optimization_debug(radius, num_points, num_kernels=1, dimension
##################### #####################
# Initialize figure # Initialize figure
if verbose>1: if verbose > 1:
fig = plt.figure() fig = plt.figure()
saved_gradient_norms = np.zeros((10000, num_kernels)) saved_gradient_norms = np.zeros((10000, num_kernels))
old_gradient_norms = np.zeros((num_kernels, num_points)) old_gradient_norms = np.zeros((num_kernels, num_points))
step = -1 step = -1
while step < 10000: while step < 10000:
# Increment # Increment
step += 1 step += 1
@ -331,16 +380,16 @@ def kernel_point_optimization_debug(radius, num_points, num_kernels=1, dimension
A = np.expand_dims(kernel_points, axis=2) A = np.expand_dims(kernel_points, axis=2)
B = np.expand_dims(kernel_points, axis=1) B = np.expand_dims(kernel_points, axis=1)
interd2 = np.sum(np.power(A - B, 2), axis=-1) interd2 = np.sum(np.power(A - B, 2), axis=-1)
inter_grads = (A - B) / (np.power(np.expand_dims(interd2, -1), 3/2) + 1e-6) inter_grads = (A - B) / (np.power(np.expand_dims(interd2, -1), 3 / 2) + 1e-6)
inter_grads = np.sum(inter_grads, axis=1) inter_grads = np.sum(inter_grads, axis=1)
# Derivative of the radius potential # Derivative of the radius potential
circle_grads = 10*kernel_points circle_grads = 10 * kernel_points
# All gradients # All gradients
gradients = inter_grads + circle_grads gradients = inter_grads + circle_grads
if fixed == 'verticals': if fixed == "verticals":
gradients[:, 1:3, :-1] = 0 gradients[:, 1:3, :-1] = 0
# Stop condition # Stop condition
@ -352,9 +401,17 @@ def kernel_point_optimization_debug(radius, num_points, num_kernels=1, dimension
# Stop if all moving points are gradients fixed (low gradients diff) # Stop if all moving points are gradients fixed (low gradients diff)
if fixed == 'center' and np.max(np.abs(old_gradient_norms[:, 1:] - gradients_norms[:, 1:])) < thresh: if (
fixed == "center"
and np.max(np.abs(old_gradient_norms[:, 1:] - gradients_norms[:, 1:]))
< thresh
):
break break
elif fixed == 'verticals' and np.max(np.abs(old_gradient_norms[:, 3:] - gradients_norms[:, 3:])) < thresh: elif (
fixed == "verticals"
and np.max(np.abs(old_gradient_norms[:, 3:] - gradients_norms[:, 3:]))
< thresh
):
break break
elif np.max(np.abs(old_gradient_norms - gradients_norms)) < thresh: elif np.max(np.abs(old_gradient_norms - gradients_norms)) < thresh:
break break
@ -367,24 +424,32 @@ def kernel_point_optimization_debug(radius, num_points, num_kernels=1, dimension
moving_dists = np.minimum(moving_factor * gradients_norms, clip) moving_dists = np.minimum(moving_factor * gradients_norms, clip)
# Fix central point # Fix central point
if fixed == 'center': if fixed == "center":
moving_dists[:, 0] = 0 moving_dists[:, 0] = 0
if fixed == 'verticals': if fixed == "verticals":
moving_dists[:, 0] = 0 moving_dists[:, 0] = 0
# Move points # Move points
kernel_points -= np.expand_dims(moving_dists, -1) * gradients / np.expand_dims(gradients_norms + 1e-6, -1) kernel_points -= (
np.expand_dims(moving_dists, -1)
* gradients
/ np.expand_dims(gradients_norms + 1e-6, -1)
)
if verbose: if verbose:
print('step {:5d} / max grad = {:f}'.format(step, np.max(gradients_norms[:, 3:]))) print(
"step {:5d} / max grad = {:f}".format(
step, np.max(gradients_norms[:, 3:])
)
)
if verbose > 1: if verbose > 1:
plt.clf() plt.clf()
plt.plot(kernel_points[0, :, 0], kernel_points[0, :, 1], '.') plt.plot(kernel_points[0, :, 0], kernel_points[0, :, 1], ".")
circle = plt.Circle((0, 0), radius, color='r', fill=False) circle = plt.Circle((0, 0), radius, color="r", fill=False)
fig.axes[0].add_artist(circle) fig.axes[0].add_artist(circle)
fig.axes[0].set_xlim((-radius*1.1, radius*1.1)) fig.axes[0].set_xlim((-radius * 1.1, radius * 1.1))
fig.axes[0].set_ylim((-radius*1.1, radius*1.1)) fig.axes[0].set_ylim((-radius * 1.1, radius * 1.1))
fig.axes[0].set_aspect('equal') fig.axes[0].set_aspect("equal")
plt.draw() plt.draw()
plt.pause(0.001) plt.pause(0.001)
plt.show(block=False) plt.show(block=False)
@ -395,7 +460,7 @@ def kernel_point_optimization_debug(radius, num_points, num_kernels=1, dimension
# Remove unused lines in the saved gradients # Remove unused lines in the saved gradients
if step < 10000: if step < 10000:
saved_gradient_norms = saved_gradient_norms[:step+1, :] saved_gradient_norms = saved_gradient_norms[: step + 1, :]
# Rescale radius to fit the wanted ratio of radius # Rescale radius to fit the wanted ratio of radius
r = np.sqrt(np.sum(np.power(kernel_points, 2), axis=-1)) r = np.sqrt(np.sum(np.power(kernel_points, 2), axis=-1))
@ -406,9 +471,8 @@ def kernel_point_optimization_debug(radius, num_points, num_kernels=1, dimension
def load_kernels(radius, num_kpoints, dimension, fixed, lloyd=False): def load_kernels(radius, num_kpoints, dimension, fixed, lloyd=False):
# Kernel directory # Kernel directory
kernel_dir = 'kernels/dispositions' kernel_dir = "kernels/dispositions"
if not exists(kernel_dir): if not exists(kernel_dir):
makedirs(kernel_dir) makedirs(kernel_dir)
@ -417,26 +481,28 @@ def load_kernels(radius, num_kpoints, dimension, fixed, lloyd=False):
lloyd = True lloyd = True
# Kernel_file # Kernel_file
kernel_file = join(kernel_dir, 'k_{:03d}_{:s}_{:d}D.ply'.format(num_kpoints, fixed, dimension)) kernel_file = join(
kernel_dir, "k_{:03d}_{:s}_{:d}D.ply".format(num_kpoints, fixed, dimension)
)
# Check if already done # Check if already done
if not exists(kernel_file): if not exists(kernel_file):
if lloyd: if lloyd:
# Create kernels # Create kernels
kernel_points = spherical_Lloyd(1.0, kernel_points = spherical_Lloyd(
num_kpoints, 1.0, num_kpoints, dimension=dimension, fixed=fixed, verbose=0
dimension=dimension, )
fixed=fixed,
verbose=0)
else: else:
# Create kernels # Create kernels
kernel_points, grad_norms = kernel_point_optimization_debug(1.0, kernel_points, grad_norms = kernel_point_optimization_debug(
num_kpoints, 1.0,
num_kernels=100, num_kpoints,
dimension=dimension, num_kernels=100,
fixed=fixed, dimension=dimension,
verbose=0) fixed=fixed,
verbose=0,
)
# Find best candidate # Find best candidate
best_k = np.argmin(grad_norms[-1, :]) best_k = np.argmin(grad_norms[-1, :])
@ -444,23 +510,23 @@ def load_kernels(radius, num_kpoints, dimension, fixed, lloyd=False):
# Save points # Save points
kernel_points = kernel_points[best_k, :, :] kernel_points = kernel_points[best_k, :, :]
write_ply(kernel_file, kernel_points, ['x', 'y', 'z']) write_ply(kernel_file, kernel_points, ["x", "y", "z"])
else: else:
data = read_ply(kernel_file) data = read_ply(kernel_file)
kernel_points = np.vstack((data['x'], data['y'], data['z'])).T kernel_points = np.vstack((data["x"], data["y"], data["z"])).T
# Random roations for the kernel # Random roations for the kernel
# N.B. 4D random rotations not supported yet # N.B. 4D random rotations not supported yet
R = np.eye(dimension) R = np.eye(dimension)
theta = np.random.rand() * 2 * np.pi theta = np.random.rand() * 2 * np.pi
if dimension == 2: if dimension == 2:
if fixed != 'vertical': if fixed != "vertical":
c, s = np.cos(theta), np.sin(theta) c, s = np.cos(theta), np.sin(theta)
R = np.array([[c, -s], [s, c]], dtype=np.float32) R = np.array([[c, -s], [s, c]], dtype=np.float32)
elif dimension == 3: elif dimension == 3:
if fixed != 'vertical': if fixed != "vertical":
c, s = np.cos(theta), np.sin(theta) c, s = np.cos(theta), np.sin(theta)
R = np.array([[c, -s, 0], [s, c, 0], [0, 0, 1]], dtype=np.float32) R = np.array([[c, -s, 0], [s, c, 0], [0, 0, 1]], dtype=np.float32)
@ -468,18 +534,24 @@ def load_kernels(radius, num_kpoints, dimension, fixed, lloyd=False):
phi = (np.random.rand() - 0.5) * np.pi phi = (np.random.rand() - 0.5) * np.pi
# Create the first vector in carthesian coordinates # Create the first vector in carthesian coordinates
u = np.array([np.cos(theta) * np.cos(phi), np.sin(theta) * np.cos(phi), np.sin(phi)]) u = np.array(
[np.cos(theta) * np.cos(phi), np.sin(theta) * np.cos(phi), np.sin(phi)]
)
# Choose a random rotation angle # Choose a random rotation angle
alpha = np.random.rand() * 2 * np.pi alpha = np.random.rand() * 2 * np.pi
# Create the rotation matrix with this vector and angle # Create the rotation matrix with this vector and angle
R = create_3D_rotations(np.reshape(u, (1, -1)), np.reshape(alpha, (1, -1)))[0] R = create_3D_rotations(np.reshape(u, (1, -1)), np.reshape(alpha, (1, -1)))[
0
]
R = R.astype(np.float32) R = R.astype(np.float32)
# Add a small noise # Add a small noise
kernel_points = kernel_points + np.random.normal(scale=0.01, size=kernel_points.shape) kernel_points = kernel_points + np.random.normal(
scale=0.01, size=kernel_points.shape
)
# Scale kernels # Scale kernels
kernel_points = radius * kernel_points kernel_points = radius * kernel_points
@ -487,4 +559,4 @@ def load_kernels(radius, num_kpoints, dimension, fixed, lloyd=False):
# Rotate kernels # Rotate kernels
kernel_points = np.matmul(kernel_points, R) kernel_points = np.matmul(kernel_points, R)
return kernel_points.astype(np.float32) return kernel_points.astype(np.float32)

View file

@ -19,20 +19,17 @@ import numpy as np
def p2p_fitting_regularizer(net): def p2p_fitting_regularizer(net):
fitting_loss = 0 fitting_loss = 0
repulsive_loss = 0 repulsive_loss = 0
for m in net.modules(): for m in net.modules():
if isinstance(m, KPConv) and m.deformable: if isinstance(m, KPConv) and m.deformable:
############## ##############
# Fitting loss # Fitting loss
############## ##############
# Get the distance to closest input point and normalize to be independant from layers # Get the distance to closest input point and normalize to be independant from layers
KP_min_d2 = m.min_d2 / (m.KP_extent ** 2) KP_min_d2 = m.min_d2 / (m.KP_extent**2)
# Loss will be the square distance to closest input point. We use L1 because dist is already squared # Loss will be the square distance to closest input point. We use L1 because dist is already squared
fitting_loss += net.l1(KP_min_d2, torch.zeros_like(KP_min_d2)) fitting_loss += net.l1(KP_min_d2, torch.zeros_like(KP_min_d2))
@ -46,9 +43,15 @@ def p2p_fitting_regularizer(net):
# Point should not be close to each other # Point should not be close to each other
for i in range(net.K): for i in range(net.K):
other_KP = torch.cat([KP_locs[:, :i, :], KP_locs[:, i + 1:, :]], dim=1).detach() other_KP = torch.cat(
distances = torch.sqrt(torch.sum((other_KP - KP_locs[:, i:i + 1, :]) ** 2, dim=2)) [KP_locs[:, :i, :], KP_locs[:, i + 1 :, :]], dim=1
rep_loss = torch.sum(torch.clamp_max(distances - net.repulse_extent, max=0.0) ** 2, dim=1) ).detach()
distances = torch.sqrt(
torch.sum((other_KP - KP_locs[:, i : i + 1, :]) ** 2, dim=2)
)
rep_loss = torch.sum(
torch.clamp_max(distances - net.repulse_extent, max=0.0) ** 2, dim=1
)
repulsive_loss += net.l1(rep_loss, torch.zeros_like(rep_loss)) / net.K repulsive_loss += net.l1(rep_loss, torch.zeros_like(rep_loss)) / net.K
return net.deform_fitting_power * (2 * fitting_loss + repulsive_loss) return net.deform_fitting_power * (2 * fitting_loss + repulsive_loss)
@ -79,36 +82,32 @@ class KPCNN(nn.Module):
# Loop over consecutive blocks # Loop over consecutive blocks
block_in_layer = 0 block_in_layer = 0
for block_i, block in enumerate(config.architecture): for block_i, block in enumerate(config.architecture):
# Check equivariance # Check equivariance
if ('equivariant' in block) and (not out_dim % 3 == 0): if ("equivariant" in block) and (not out_dim % 3 == 0):
raise ValueError('Equivariant block but features dimension is not a factor of 3') raise ValueError(
"Equivariant block but features dimension is not a factor of 3"
)
# Detect upsampling block to stop # Detect upsampling block to stop
if 'upsample' in block: if "upsample" in block:
break break
# Apply the good block function defining tf ops # Apply the good block function defining tf ops
self.block_ops.append(block_decider(block, self.block_ops.append(
r, block_decider(block, r, in_dim, out_dim, layer, config)
in_dim, )
out_dim,
layer,
config))
# Index of block in this layer # Index of block in this layer
block_in_layer += 1 block_in_layer += 1
# Update dimension of input from output # Update dimension of input from output
if 'simple' in block: if "simple" in block:
in_dim = out_dim // 2 in_dim = out_dim // 2
else: else:
in_dim = out_dim in_dim = out_dim
# Detect change to a subsampled layer # Detect change to a subsampled layer
if 'pool' in block or 'strided' in block: if "pool" in block or "strided" in block:
# Update radius and feature dimension for next layer # Update radius and feature dimension for next layer
layer += 1 layer += 1
r *= 2 r *= 2
@ -134,7 +133,6 @@ class KPCNN(nn.Module):
return return
def forward(self, batch, config): def forward(self, batch, config):
# Save all block operations in a list of modules # Save all block operations in a list of modules
x = batch.features.clone().detach() x = batch.features.clone().detach()
@ -160,12 +158,12 @@ class KPCNN(nn.Module):
self.output_loss = self.criterion(outputs, labels) self.output_loss = self.criterion(outputs, labels)
# Regularization of deformable offsets # Regularization of deformable offsets
if self.deform_fitting_mode == 'point2point': if self.deform_fitting_mode == "point2point":
self.reg_loss = p2p_fitting_regularizer(self) self.reg_loss = p2p_fitting_regularizer(self)
elif self.deform_fitting_mode == 'point2plane': elif self.deform_fitting_mode == "point2plane":
raise ValueError('point2plane fitting mode not implemented yet.') raise ValueError("point2plane fitting mode not implemented yet.")
else: else:
raise ValueError('Unknown fitting mode: ' + self.deform_fitting_mode) raise ValueError("Unknown fitting mode: " + self.deform_fitting_mode)
# Combined loss # Combined loss
return self.output_loss + self.reg_loss return self.output_loss + self.reg_loss
@ -217,36 +215,36 @@ class KPFCNN(nn.Module):
# Loop over consecutive blocks # Loop over consecutive blocks
for block_i, block in enumerate(config.architecture): for block_i, block in enumerate(config.architecture):
# Check equivariance # Check equivariance
if ('equivariant' in block) and (not out_dim % 3 == 0): if ("equivariant" in block) and (not out_dim % 3 == 0):
raise ValueError('Equivariant block but features dimension is not a factor of 3') raise ValueError(
"Equivariant block but features dimension is not a factor of 3"
)
# Detect change to next layer for skip connection # Detect change to next layer for skip connection
if np.any([tmp in block for tmp in ['pool', 'strided', 'upsample', 'global']]): if np.any(
[tmp in block for tmp in ["pool", "strided", "upsample", "global"]]
):
self.encoder_skips.append(block_i) self.encoder_skips.append(block_i)
self.encoder_skip_dims.append(in_dim) self.encoder_skip_dims.append(in_dim)
# Detect upsampling block to stop # Detect upsampling block to stop
if 'upsample' in block: if "upsample" in block:
break break
# Apply the good block function defining tf ops # Apply the good block function defining tf ops
self.encoder_blocks.append(block_decider(block, self.encoder_blocks.append(
r, block_decider(block, r, in_dim, out_dim, layer, config)
in_dim, )
out_dim,
layer,
config))
# Update dimension of input from output # Update dimension of input from output
if 'simple' in block: if "simple" in block:
in_dim = out_dim // 2 in_dim = out_dim // 2
else: else:
in_dim = out_dim in_dim = out_dim
# Detect change to a subsampled layer # Detect change to a subsampled layer
if 'pool' in block or 'strided' in block: if "pool" in block or "strided" in block:
# Update radius and feature dimension for next layer # Update radius and feature dimension for next layer
layer += 1 layer += 1
r *= 2 r *= 2
@ -263,38 +261,36 @@ class KPFCNN(nn.Module):
# Find first upsampling block # Find first upsampling block
start_i = 0 start_i = 0
for block_i, block in enumerate(config.architecture): for block_i, block in enumerate(config.architecture):
if 'upsample' in block: if "upsample" in block:
start_i = block_i start_i = block_i
break break
# Loop over consecutive blocks # Loop over consecutive blocks
for block_i, block in enumerate(config.architecture[start_i:]): for block_i, block in enumerate(config.architecture[start_i:]):
# Add dimension of skip connection concat # Add dimension of skip connection concat
if block_i > 0 and 'upsample' in config.architecture[start_i + block_i - 1]: if block_i > 0 and "upsample" in config.architecture[start_i + block_i - 1]:
in_dim += self.encoder_skip_dims[layer] in_dim += self.encoder_skip_dims[layer]
self.decoder_concats.append(block_i) self.decoder_concats.append(block_i)
# Apply the good block function defining tf ops # Apply the good block function defining tf ops
self.decoder_blocks.append(block_decider(block, self.decoder_blocks.append(
r, block_decider(block, r, in_dim, out_dim, layer, config)
in_dim, )
out_dim,
layer,
config))
# Update dimension of input from output # Update dimension of input from output
in_dim = out_dim in_dim = out_dim
# Detect change to a subsampled layer # Detect change to a subsampled layer
if 'upsample' in block: if "upsample" in block:
# Update radius and feature dimension for next layer # Update radius and feature dimension for next layer
layer -= 1 layer -= 1
r *= 0.5 r *= 0.5
out_dim = out_dim // 2 out_dim = out_dim // 2
self.head_mlp = UnaryBlock(out_dim, config.first_features_dim, False, 0) self.head_mlp = UnaryBlock(out_dim, config.first_features_dim, False, 0)
self.head_softmax = UnaryBlock(config.first_features_dim, self.C, False, 0, no_relu=True) self.head_softmax = UnaryBlock(
config.first_features_dim, self.C, False, 0, no_relu=True
)
################ ################
# Network Losses # Network Losses
@ -320,7 +316,6 @@ class KPFCNN(nn.Module):
return return
def forward(self, batch, config): def forward(self, batch, config):
# Get input features # Get input features
x = batch.features.clone().detach() x = batch.features.clone().detach()
@ -351,7 +346,7 @@ class KPFCNN(nn.Module):
""" """
# Set all ignored labels to -1 and correct the other label to be in [0, C-1] range # Set all ignored labels to -1 and correct the other label to be in [0, C-1] range
target = - torch.ones_like(labels) target = -torch.ones_like(labels)
for i, c in enumerate(self.valid_labels): for i, c in enumerate(self.valid_labels):
target[labels == c] = i target[labels == c] = i
@ -364,12 +359,12 @@ class KPFCNN(nn.Module):
self.output_loss = self.criterion(outputs, target) self.output_loss = self.criterion(outputs, target)
# Regularization of deformable offsets # Regularization of deformable offsets
if self.deform_fitting_mode == 'point2point': if self.deform_fitting_mode == "point2point":
self.reg_loss = p2p_fitting_regularizer(self) self.reg_loss = p2p_fitting_regularizer(self)
elif self.deform_fitting_mode == 'point2plane': elif self.deform_fitting_mode == "point2plane":
raise ValueError('point2plane fitting mode not implemented yet.') raise ValueError("point2plane fitting mode not implemented yet.")
else: else:
raise ValueError('Unknown fitting mode: ' + self.deform_fitting_mode) raise ValueError("Unknown fitting mode: " + self.deform_fitting_mode)
# Combined loss # Combined loss
return self.output_loss + self.reg_loss return self.output_loss + self.reg_loss
@ -383,7 +378,7 @@ class KPFCNN(nn.Module):
""" """
# Set all ignored labels to -1 and correct the other label to be in [0, C-1] range # Set all ignored labels to -1 and correct the other label to be in [0, C-1] range
target = - torch.ones_like(labels) target = -torch.ones_like(labels)
for i, c in enumerate(self.valid_labels): for i, c in enumerate(self.valid_labels):
target[labels == c] = i target[labels == c] = i
@ -392,24 +387,3 @@ class KPFCNN(nn.Module):
correct = (predicted == target).sum().item() correct = (predicted == target).sum().item()
return correct / total return correct / total

View file

@ -15,7 +15,6 @@
# #
import time
import math import math
import torch import torch
import torch.nn as nn import torch.nn as nn
@ -23,7 +22,6 @@ from torch.nn.parameter import Parameter
from torch.nn.init import kaiming_uniform_ from torch.nn.init import kaiming_uniform_
from kernels.kernel_points import load_kernels from kernels.kernel_points import load_kernels
from utils.ply import write_ply
# ---------------------------------------------------------------------------------------------------------------------- # ----------------------------------------------------------------------------------------------------------------------
# #
@ -51,19 +49,19 @@ def gather(x, idx, method=2):
return x.gather(0, idx) return x.gather(0, idx)
elif method == 2: elif method == 2:
for i, ni in enumerate(idx.size()[1:]): for i, ni in enumerate(idx.size()[1:]):
x = x.unsqueeze(i+1) x = x.unsqueeze(i + 1)
new_s = list(x.size()) new_s = list(x.size())
new_s[i+1] = ni new_s[i + 1] = ni
x = x.expand(new_s) x = x.expand(new_s)
n = len(idx.size()) n = len(idx.size())
for i, di in enumerate(x.size()[n:]): for i, di in enumerate(x.size()[n:]):
idx = idx.unsqueeze(i+n) idx = idx.unsqueeze(i + n)
new_s = list(idx.size()) new_s = list(idx.size())
new_s[i+n] = di new_s[i + n] = di
idx = idx.expand(new_s) idx = idx.expand(new_s)
return x.gather(0, idx) return x.gather(0, idx)
else: else:
raise ValueError('Unkown method') raise ValueError("Unkown method")
def radius_gaussian(sq_r, sig, eps=1e-9): def radius_gaussian(sq_r, sig, eps=1e-9):
@ -122,9 +120,8 @@ def global_average(x, batch_lengths):
averaged_features = [] averaged_features = []
i0 = 0 i0 = 0
for b_i, length in enumerate(batch_lengths): for b_i, length in enumerate(batch_lengths):
# Average features for each batch cloud # Average features for each batch cloud
averaged_features.append(torch.mean(x[i0:i0 + length], dim=0)) averaged_features.append(torch.mean(x[i0 : i0 + length], dim=0))
# Increment for next cloud # Increment for next cloud
i0 += length i0 += length
@ -141,10 +138,20 @@ def global_average(x, batch_lengths):
class KPConv(nn.Module): class KPConv(nn.Module):
def __init__(
def __init__(self, kernel_size, p_dim, in_channels, out_channels, KP_extent, radius, self,
fixed_kernel_points='center', KP_influence='linear', aggregation_mode='sum', kernel_size,
deformable=False, modulated=False): p_dim,
in_channels,
out_channels,
KP_extent,
radius,
fixed_kernel_points="center",
KP_influence="linear",
aggregation_mode="sum",
deformable=False,
modulated=False,
):
""" """
Initialize parameters for KPConvDeformable. Initialize parameters for KPConvDeformable.
:param kernel_size: Number of kernel points. :param kernel_size: Number of kernel points.
@ -180,8 +187,10 @@ class KPConv(nn.Module):
self.offset_features = None self.offset_features = None
# Initialize weights # Initialize weights
self.weights = Parameter(torch.zeros((self.K, in_channels, out_channels), dtype=torch.float32), self.weights = Parameter(
requires_grad=True) torch.zeros((self.K, in_channels, out_channels), dtype=torch.float32),
requires_grad=True,
)
# Initiate weights for offsets # Initiate weights for offsets
if deformable: if deformable:
@ -189,16 +198,20 @@ class KPConv(nn.Module):
self.offset_dim = (self.p_dim + 1) * self.K self.offset_dim = (self.p_dim + 1) * self.K
else: else:
self.offset_dim = self.p_dim * self.K self.offset_dim = self.p_dim * self.K
self.offset_conv = KPConv(self.K, self.offset_conv = KPConv(
self.p_dim, self.K,
self.in_channels, self.p_dim,
self.offset_dim, self.in_channels,
KP_extent, self.offset_dim,
radius, KP_extent,
fixed_kernel_points=fixed_kernel_points, radius,
KP_influence=KP_influence, fixed_kernel_points=fixed_kernel_points,
aggregation_mode=aggregation_mode) KP_influence=KP_influence,
self.offset_bias = Parameter(torch.zeros(self.offset_dim, dtype=torch.float32), requires_grad=True) aggregation_mode=aggregation_mode,
)
self.offset_bias = Parameter(
torch.zeros(self.offset_dim, dtype=torch.float32), requires_grad=True
)
else: else:
self.offset_dim = None self.offset_dim = None
@ -226,36 +239,36 @@ class KPConv(nn.Module):
""" """
# Create one kernel disposition (as numpy array). Choose the KP distance to center thanks to the KP extent # Create one kernel disposition (as numpy array). Choose the KP distance to center thanks to the KP extent
K_points_numpy = load_kernels(self.radius, K_points_numpy = load_kernels(
self.K, self.radius, self.K, dimension=self.p_dim, fixed=self.fixed_kernel_points
dimension=self.p_dim, )
fixed=self.fixed_kernel_points)
return Parameter(torch.tensor(K_points_numpy, dtype=torch.float32), return Parameter(
requires_grad=False) torch.tensor(K_points_numpy, dtype=torch.float32), requires_grad=False
)
def forward(self, q_pts, s_pts, neighb_inds, x): def forward(self, q_pts, s_pts, neighb_inds, x):
################### ###################
# Offset generation # Offset generation
################### ###################
if self.deformable: if self.deformable:
# Get offsets with a KPConv that only takes part of the features # Get offsets with a KPConv that only takes part of the features
self.offset_features = self.offset_conv(q_pts, s_pts, neighb_inds, x) + self.offset_bias self.offset_features = (
self.offset_conv(q_pts, s_pts, neighb_inds, x) + self.offset_bias
)
if self.modulated: if self.modulated:
# Get offset (in normalized scale) from features # Get offset (in normalized scale) from features
unscaled_offsets = self.offset_features[:, :self.p_dim * self.K] unscaled_offsets = self.offset_features[:, : self.p_dim * self.K]
unscaled_offsets = unscaled_offsets.view(-1, self.K, self.p_dim) unscaled_offsets = unscaled_offsets.view(-1, self.K, self.p_dim)
# Get modulations # Get modulations
modulations = 2 * torch.sigmoid(self.offset_features[:, self.p_dim * self.K:]) modulations = 2 * torch.sigmoid(
self.offset_features[:, self.p_dim * self.K :]
)
else: else:
# Get offset (in normalized scale) from features # Get offset (in normalized scale) from features
unscaled_offsets = self.offset_features.view(-1, self.K, self.p_dim) unscaled_offsets = self.offset_features.view(-1, self.K, self.p_dim)
@ -294,22 +307,25 @@ class KPConv(nn.Module):
differences = neighbors - deformed_K_points differences = neighbors - deformed_K_points
# Get the square distances [n_points, n_neighbors, n_kpoints] # Get the square distances [n_points, n_neighbors, n_kpoints]
sq_distances = torch.sum(differences ** 2, dim=3) sq_distances = torch.sum(differences**2, dim=3)
# Optimization by ignoring points outside a deformed KP range # Optimization by ignoring points outside a deformed KP range
if self.deformable: if self.deformable:
# Save distances for loss # Save distances for loss
self.min_d2, _ = torch.min(sq_distances, dim=1) self.min_d2, _ = torch.min(sq_distances, dim=1)
# Boolean of the neighbors in range of a kernel point [n_points, n_neighbors] # Boolean of the neighbors in range of a kernel point [n_points, n_neighbors]
in_range = torch.any(sq_distances < self.KP_extent ** 2, dim=2).type(torch.int32) in_range = torch.any(sq_distances < self.KP_extent**2, dim=2).type(
torch.int32
)
# New value of max neighbors # New value of max neighbors
new_max_neighb = torch.max(torch.sum(in_range, dim=1)) new_max_neighb = torch.max(torch.sum(in_range, dim=1))
# For each row of neighbors, indices of the ones that are in range [n_points, new_max_neighb] # For each row of neighbors, indices of the ones that are in range [n_points, new_max_neighb]
neighb_row_bool, neighb_row_inds = torch.topk(in_range, new_max_neighb.item(), dim=1) neighb_row_bool, neighb_row_inds = torch.topk(
in_range, new_max_neighb.item(), dim=1
)
# Gather new neighbor indices [n_points, new_max_neighb] # Gather new neighbor indices [n_points, new_max_neighb]
new_neighb_inds = neighb_inds.gather(1, neighb_row_inds, sparse_grad=False) new_neighb_inds = neighb_inds.gather(1, neighb_row_inds, sparse_grad=False)
@ -321,35 +337,41 @@ class KPConv(nn.Module):
# New shadow neighbors have to point to the last shadow point # New shadow neighbors have to point to the last shadow point
new_neighb_inds *= neighb_row_bool new_neighb_inds *= neighb_row_bool
new_neighb_inds -= (neighb_row_bool.type(torch.int64) - 1) * int(s_pts.shape[0] - 1) new_neighb_inds -= (neighb_row_bool.type(torch.int64) - 1) * int(
s_pts.shape[0] - 1
)
else: else:
new_neighb_inds = neighb_inds new_neighb_inds = neighb_inds
# Get Kernel point influences [n_points, n_kpoints, n_neighbors] # Get Kernel point influences [n_points, n_kpoints, n_neighbors]
if self.KP_influence == 'constant': if self.KP_influence == "constant":
# Every point get an influence of 1. # Every point get an influence of 1.
all_weights = torch.ones_like(sq_distances) all_weights = torch.ones_like(sq_distances)
all_weights = torch.transpose(all_weights, 1, 2) all_weights = torch.transpose(all_weights, 1, 2)
elif self.KP_influence == 'linear': elif self.KP_influence == "linear":
# Influence decrease linearly with the distance, and get to zero when d = KP_extent. # Influence decrease linearly with the distance, and get to zero when d = KP_extent.
all_weights = torch.clamp(1 - torch.sqrt(sq_distances) / self.KP_extent, min=0.0) all_weights = torch.clamp(
1 - torch.sqrt(sq_distances) / self.KP_extent, min=0.0
)
all_weights = torch.transpose(all_weights, 1, 2) all_weights = torch.transpose(all_weights, 1, 2)
elif self.KP_influence == 'gaussian': elif self.KP_influence == "gaussian":
# Influence in gaussian of the distance. # Influence in gaussian of the distance.
sigma = self.KP_extent * 0.3 sigma = self.KP_extent * 0.3
all_weights = radius_gaussian(sq_distances, sigma) all_weights = radius_gaussian(sq_distances, sigma)
all_weights = torch.transpose(all_weights, 1, 2) all_weights = torch.transpose(all_weights, 1, 2)
else: else:
raise ValueError('Unknown influence function type (config.KP_influence)') raise ValueError("Unknown influence function type (config.KP_influence)")
# In case of closest mode, only the closest KP can influence each point # In case of closest mode, only the closest KP can influence each point
if self.aggregation_mode == 'closest': if self.aggregation_mode == "closest":
neighbors_1nn = torch.argmin(sq_distances, dim=2) neighbors_1nn = torch.argmin(sq_distances, dim=2)
all_weights *= torch.transpose(nn.functional.one_hot(neighbors_1nn, self.K), 1, 2) all_weights *= torch.transpose(
nn.functional.one_hot(neighbors_1nn, self.K), 1, 2
)
elif self.aggregation_mode != 'sum': elif self.aggregation_mode != "sum":
raise ValueError("Unknown convolution mode. Should be 'closest' or 'sum'") raise ValueError("Unknown convolution mode. Should be 'closest' or 'sum'")
# Add a zero feature for shadow neighbors # Add a zero feature for shadow neighbors
@ -373,9 +395,10 @@ class KPConv(nn.Module):
return torch.sum(kernel_outputs, dim=0) return torch.sum(kernel_outputs, dim=0)
def __repr__(self): def __repr__(self):
return 'KPConv(radius: {:.2f}, in_feat: {:d}, out_feat: {:d})'.format(self.radius, return "KPConv(radius: {:.2f}, in_feat: {:d}, out_feat: {:d})".format(
self.in_channels, self.radius, self.in_channels, self.out_channels
self.out_channels) )
# ---------------------------------------------------------------------------------------------------------------------- # ----------------------------------------------------------------------------------------------------------------------
# #
@ -383,51 +406,55 @@ class KPConv(nn.Module):
# \********************/ # \********************/
# #
def block_decider(block_name,
radius,
in_dim,
out_dim,
layer_ind,
config):
if block_name == 'unary': def block_decider(block_name, radius, in_dim, out_dim, layer_ind, config):
return UnaryBlock(in_dim, out_dim, config.use_batch_norm, config.batch_norm_momentum) if block_name == "unary":
return UnaryBlock(
in_dim, out_dim, config.use_batch_norm, config.batch_norm_momentum
)
elif block_name in ['simple', elif block_name in [
'simple_deformable', "simple",
'simple_invariant', "simple_deformable",
'simple_equivariant', "simple_invariant",
'simple_strided', "simple_equivariant",
'simple_deformable_strided', "simple_strided",
'simple_invariant_strided', "simple_deformable_strided",
'simple_equivariant_strided']: "simple_invariant_strided",
"simple_equivariant_strided",
]:
return SimpleBlock(block_name, in_dim, out_dim, radius, layer_ind, config) return SimpleBlock(block_name, in_dim, out_dim, radius, layer_ind, config)
elif block_name in ['resnetb', elif block_name in [
'resnetb_invariant', "resnetb",
'resnetb_equivariant', "resnetb_invariant",
'resnetb_deformable', "resnetb_equivariant",
'resnetb_strided', "resnetb_deformable",
'resnetb_deformable_strided', "resnetb_strided",
'resnetb_equivariant_strided', "resnetb_deformable_strided",
'resnetb_invariant_strided']: "resnetb_equivariant_strided",
return ResnetBottleneckBlock(block_name, in_dim, out_dim, radius, layer_ind, config) "resnetb_invariant_strided",
]:
return ResnetBottleneckBlock(
block_name, in_dim, out_dim, radius, layer_ind, config
)
elif block_name == 'max_pool' or block_name == 'max_pool_wide': elif block_name == "max_pool" or block_name == "max_pool_wide":
return MaxPoolBlock(layer_ind) return MaxPoolBlock(layer_ind)
elif block_name == 'global_average': elif block_name == "global_average":
return GlobalAverageBlock() return GlobalAverageBlock()
elif block_name == 'nearest_upsample': elif block_name == "nearest_upsample":
return NearestUpsampleBlock(layer_ind) return NearestUpsampleBlock(layer_ind)
else: else:
raise ValueError('Unknown block name in the architecture definition : ' + block_name) raise ValueError(
"Unknown block name in the architecture definition : " + block_name
)
class BatchNormBlock(nn.Module): class BatchNormBlock(nn.Module):
def __init__(self, in_dim, use_bn, bn_momentum): def __init__(self, in_dim, use_bn, bn_momentum):
""" """
Initialize a batch normalization block. If network does not use batch normalization, replace with biases. Initialize a batch normalization block. If network does not use batch normalization, replace with biases.
@ -441,9 +468,11 @@ class BatchNormBlock(nn.Module):
self.in_dim = in_dim self.in_dim = in_dim
if self.use_bn: if self.use_bn:
self.batch_norm = nn.BatchNorm1d(in_dim, momentum=bn_momentum) self.batch_norm = nn.BatchNorm1d(in_dim, momentum=bn_momentum)
#self.batch_norm = nn.InstanceNorm1d(in_dim, momentum=bn_momentum) # self.batch_norm = nn.InstanceNorm1d(in_dim, momentum=bn_momentum)
else: else:
self.bias = Parameter(torch.zeros(in_dim, dtype=torch.float32), requires_grad=True) self.bias = Parameter(
torch.zeros(in_dim, dtype=torch.float32), requires_grad=True
)
return return
def reset_parameters(self): def reset_parameters(self):
@ -451,7 +480,6 @@ class BatchNormBlock(nn.Module):
def forward(self, x): def forward(self, x):
if self.use_bn: if self.use_bn:
x = x.unsqueeze(2) x = x.unsqueeze(2)
x = x.transpose(0, 2) x = x.transpose(0, 2)
x = self.batch_norm(x) x = self.batch_norm(x)
@ -461,13 +489,14 @@ class BatchNormBlock(nn.Module):
return x + self.bias return x + self.bias
def __repr__(self): def __repr__(self):
return 'BatchNormBlock(in_feat: {:d}, momentum: {:.3f}, only_bias: {:s})'.format(self.in_dim, return (
self.bn_momentum, "BatchNormBlock(in_feat: {:d}, momentum: {:.3f}, only_bias: {:s})".format(
str(not self.use_bn)) self.in_dim, self.bn_momentum, str(not self.use_bn)
)
)
class UnaryBlock(nn.Module): class UnaryBlock(nn.Module):
def __init__(self, in_dim, out_dim, use_bn, bn_momentum, no_relu=False): def __init__(self, in_dim, out_dim, use_bn, bn_momentum, no_relu=False):
""" """
Initialize a standard unary block with its ReLU and BatchNorm. Initialize a standard unary block with its ReLU and BatchNorm.
@ -497,14 +526,12 @@ class UnaryBlock(nn.Module):
return x return x
def __repr__(self): def __repr__(self):
return 'UnaryBlock(in_feat: {:d}, out_feat: {:d}, BN: {:s}, ReLU: {:s})'.format(self.in_dim, return "UnaryBlock(in_feat: {:d}, out_feat: {:d}, BN: {:s}, ReLU: {:s})".format(
self.out_dim, self.in_dim, self.out_dim, str(self.use_bn), str(not self.no_relu)
str(self.use_bn), )
str(not self.no_relu))
class SimpleBlock(nn.Module): class SimpleBlock(nn.Module):
def __init__(self, block_name, in_dim, out_dim, radius, layer_ind, config): def __init__(self, block_name, in_dim, out_dim, radius, layer_ind, config):
""" """
Initialize a simple convolution block with its ReLU and BatchNorm. Initialize a simple convolution block with its ReLU and BatchNorm.
@ -527,17 +554,19 @@ class SimpleBlock(nn.Module):
self.out_dim = out_dim self.out_dim = out_dim
# Define the KPConv class # Define the KPConv class
self.KPConv = KPConv(config.num_kernel_points, self.KPConv = KPConv(
config.in_points_dim, config.num_kernel_points,
in_dim, config.in_points_dim,
out_dim // 2, in_dim,
current_extent, out_dim // 2,
radius, current_extent,
fixed_kernel_points=config.fixed_kernel_points, radius,
KP_influence=config.KP_influence, fixed_kernel_points=config.fixed_kernel_points,
aggregation_mode=config.aggregation_mode, KP_influence=config.KP_influence,
deformable='deform' in block_name, aggregation_mode=config.aggregation_mode,
modulated=config.modulated) deformable="deform" in block_name,
modulated=config.modulated,
)
# Other opperations # Other opperations
self.batch_norm = BatchNormBlock(out_dim // 2, self.use_bn, self.bn_momentum) self.batch_norm = BatchNormBlock(out_dim // 2, self.use_bn, self.bn_momentum)
@ -546,8 +575,7 @@ class SimpleBlock(nn.Module):
return return
def forward(self, x, batch): def forward(self, x, batch):
if "strided" in self.block_name:
if 'strided' in self.block_name:
q_pts = batch.points[self.layer_ind + 1] q_pts = batch.points[self.layer_ind + 1]
s_pts = batch.points[self.layer_ind] s_pts = batch.points[self.layer_ind]
neighb_inds = batch.pools[self.layer_ind] neighb_inds = batch.pools[self.layer_ind]
@ -561,7 +589,6 @@ class SimpleBlock(nn.Module):
class ResnetBottleneckBlock(nn.Module): class ResnetBottleneckBlock(nn.Module):
def __init__(self, block_name, in_dim, out_dim, radius, layer_ind, config): def __init__(self, block_name, in_dim, out_dim, radius, layer_ind, config):
""" """
Initialize a resnet bottleneck block. Initialize a resnet bottleneck block.
@ -585,30 +612,40 @@ class ResnetBottleneckBlock(nn.Module):
# First downscaling mlp # First downscaling mlp
if in_dim != out_dim // 4: if in_dim != out_dim // 4:
self.unary1 = UnaryBlock(in_dim, out_dim // 4, self.use_bn, self.bn_momentum) self.unary1 = UnaryBlock(
in_dim, out_dim // 4, self.use_bn, self.bn_momentum
)
else: else:
self.unary1 = nn.Identity() self.unary1 = nn.Identity()
# KPConv block # KPConv block
self.KPConv = KPConv(config.num_kernel_points, self.KPConv = KPConv(
config.in_points_dim, config.num_kernel_points,
out_dim // 4, config.in_points_dim,
out_dim // 4, out_dim // 4,
current_extent, out_dim // 4,
radius, current_extent,
fixed_kernel_points=config.fixed_kernel_points, radius,
KP_influence=config.KP_influence, fixed_kernel_points=config.fixed_kernel_points,
aggregation_mode=config.aggregation_mode, KP_influence=config.KP_influence,
deformable='deform' in block_name, aggregation_mode=config.aggregation_mode,
modulated=config.modulated) deformable="deform" in block_name,
self.batch_norm_conv = BatchNormBlock(out_dim // 4, self.use_bn, self.bn_momentum) modulated=config.modulated,
)
self.batch_norm_conv = BatchNormBlock(
out_dim // 4, self.use_bn, self.bn_momentum
)
# Second upscaling mlp # Second upscaling mlp
self.unary2 = UnaryBlock(out_dim // 4, out_dim, self.use_bn, self.bn_momentum, no_relu=True) self.unary2 = UnaryBlock(
out_dim // 4, out_dim, self.use_bn, self.bn_momentum, no_relu=True
)
# Shortcut optional mpl # Shortcut optional mpl
if in_dim != out_dim: if in_dim != out_dim:
self.unary_shortcut = UnaryBlock(in_dim, out_dim, self.use_bn, self.bn_momentum, no_relu=True) self.unary_shortcut = UnaryBlock(
in_dim, out_dim, self.use_bn, self.bn_momentum, no_relu=True
)
else: else:
self.unary_shortcut = nn.Identity() self.unary_shortcut = nn.Identity()
@ -618,8 +655,7 @@ class ResnetBottleneckBlock(nn.Module):
return return
def forward(self, features, batch): def forward(self, features, batch):
if "strided" in self.block_name:
if 'strided' in self.block_name:
q_pts = batch.points[self.layer_ind + 1] q_pts = batch.points[self.layer_ind + 1]
s_pts = batch.points[self.layer_ind] s_pts = batch.points[self.layer_ind]
neighb_inds = batch.pools[self.layer_ind] neighb_inds = batch.pools[self.layer_ind]
@ -639,7 +675,7 @@ class ResnetBottleneckBlock(nn.Module):
x = self.unary2(x) x = self.unary2(x)
# Shortcut # Shortcut
if 'strided' in self.block_name: if "strided" in self.block_name:
shortcut = max_pool(features, neighb_inds) shortcut = max_pool(features, neighb_inds)
else: else:
shortcut = features shortcut = features
@ -649,7 +685,6 @@ class ResnetBottleneckBlock(nn.Module):
class GlobalAverageBlock(nn.Module): class GlobalAverageBlock(nn.Module):
def __init__(self): def __init__(self):
""" """
Initialize a global average block with its ReLU and BatchNorm. Initialize a global average block with its ReLU and BatchNorm.
@ -662,7 +697,6 @@ class GlobalAverageBlock(nn.Module):
class NearestUpsampleBlock(nn.Module): class NearestUpsampleBlock(nn.Module):
def __init__(self, layer_ind): def __init__(self, layer_ind):
""" """
Initialize a nearest upsampling block with its ReLU and BatchNorm. Initialize a nearest upsampling block with its ReLU and BatchNorm.
@ -675,12 +709,12 @@ class NearestUpsampleBlock(nn.Module):
return closest_pool(x, batch.upsamples[self.layer_ind - 1]) return closest_pool(x, batch.upsamples[self.layer_ind - 1])
def __repr__(self): def __repr__(self):
return 'NearestUpsampleBlock(layer: {:d} -> {:d})'.format(self.layer_ind, return "NearestUpsampleBlock(layer: {:d} -> {:d})".format(
self.layer_ind - 1) self.layer_ind, self.layer_ind - 1
)
class MaxPoolBlock(nn.Module): class MaxPoolBlock(nn.Module):
def __init__(self, layer_ind): def __init__(self, layer_ind):
""" """
Initialize a max pooling block with its ReLU and BatchNorm. Initialize a max pooling block with its ReLU and BatchNorm.
@ -691,4 +725,3 @@ class MaxPoolBlock(nn.Module):
def forward(self, x, batch): def forward(self, x, batch):
return max_pool(x, batch.pools[self.layer_ind + 1]) return max_pool(x, batch.pools[self.layer_ind + 1])

View file

@ -22,14 +22,11 @@
# #
# Common libs # Common libs
import os
import torch import torch
import numpy as np import numpy as np
import matplotlib.pyplot as plt import matplotlib.pyplot as plt
from os.path import isfile, join, exists from os.path import isfile, join, exists
from os import listdir, remove, getcwd from os import listdir, remove
from sklearn.metrics import confusion_matrix
import time
# My libs # My libs
from utils.config import Config from utils.config import Config
@ -37,7 +34,6 @@ from utils.metrics import IoU_from_confusions, smooth_metrics, fast_confusion
from utils.ply import read_ply from utils.ply import read_ply
# Datasets # Datasets
from datasetss.ModelNet40 import ModelNet40Dataset
from datasetss.S3DIS import S3DISDataset from datasetss.S3DIS import S3DISDataset
from datasetss.SemanticKitti import SemanticKittiDataset from datasetss.SemanticKitti import SemanticKittiDataset
@ -47,8 +43,8 @@ from datasetss.SemanticKitti import SemanticKittiDataset
# \***********************/ # \***********************/
# #
def listdir_str(path):
def listdir_str(path):
# listdir can return binary string instead od decoded string sometimes. # listdir can return binary string instead od decoded string sometimes.
# This function ensures a steady behavior # This function ensures a steady behavior
@ -63,41 +59,39 @@ def listdir_str(path):
return f_list return f_list
def running_mean(signal, n, axis=0, stride=1): def running_mean(signal, n, axis=0, stride=1):
signal = np.array(signal) signal = np.array(signal)
torch_conv = torch.nn.Conv1d(1, 1, kernel_size=2*n+1, stride=stride, bias=False) torch_conv = torch.nn.Conv1d(1, 1, kernel_size=2 * n + 1, stride=stride, bias=False)
torch_conv.weight.requires_grad_(False) torch_conv.weight.requires_grad_(False)
torch_conv.weight *= 0 torch_conv.weight *= 0
torch_conv.weight += 1 / (2*n+1) torch_conv.weight += 1 / (2 * n + 1)
if signal.ndim == 1: if signal.ndim == 1:
torch_signal = torch.from_numpy(signal.reshape([1, 1, -1]).astype(np.float32)) torch_signal = torch.from_numpy(signal.reshape([1, 1, -1]).astype(np.float32))
return torch_conv(torch_signal).squeeze().numpy() return torch_conv(torch_signal).squeeze().numpy()
elif signal.ndim == 2: elif signal.ndim == 2:
print('TODO implement with torch and stride here') print("TODO implement with torch and stride here")
smoothed = np.empty(signal.shape) smoothed = np.empty(signal.shape)
if axis == 0: if axis == 0:
for i, sig in enumerate(signal): for i, sig in enumerate(signal):
sig_sum = np.convolve(sig, np.ones((2*n+1,)), mode='same') sig_sum = np.convolve(sig, np.ones((2 * n + 1,)), mode="same")
sig_num = np.convolve(sig*0+1, np.ones((2*n+1,)), mode='same') sig_num = np.convolve(sig * 0 + 1, np.ones((2 * n + 1,)), mode="same")
smoothed[i, :] = sig_sum / sig_num smoothed[i, :] = sig_sum / sig_num
elif axis == 1: elif axis == 1:
for i, sig in enumerate(signal.T): for i, sig in enumerate(signal.T):
sig_sum = np.convolve(sig, np.ones((2*n+1,)), mode='same') sig_sum = np.convolve(sig, np.ones((2 * n + 1,)), mode="same")
sig_num = np.convolve(sig*0+1, np.ones((2*n+1,)), mode='same') sig_num = np.convolve(sig * 0 + 1, np.ones((2 * n + 1,)), mode="same")
smoothed[:, i] = sig_sum / sig_num smoothed[:, i] = sig_sum / sig_num
else: else:
print('wrong axis') print("wrong axis")
return smoothed return smoothed
else: else:
print('wrong dimensions') print("wrong dimensions")
return None return None
def IoU_class_metrics(all_IoUs, smooth_n): def IoU_class_metrics(all_IoUs, smooth_n):
# Get mean IoU per class for consecutive epochs to directly get a mean without further smoothing # Get mean IoU per class for consecutive epochs to directly get a mean without further smoothing
smoothed_IoUs = [] smoothed_IoUs = []
for epoch in range(len(all_IoUs)): for epoch in range(len(all_IoUs)):
@ -111,8 +105,7 @@ def IoU_class_metrics(all_IoUs, smooth_n):
def load_confusions(filename, n_class): def load_confusions(filename, n_class):
with open(filename, "r") as f:
with open(filename, 'r') as f:
lines = f.readlines() lines = f.readlines()
confs = np.zeros((len(lines), n_class, n_class)) confs = np.zeros((len(lines), n_class, n_class))
@ -124,9 +117,8 @@ def load_confusions(filename, n_class):
def load_training_results(path): def load_training_results(path):
filename = join(path, "training.txt")
filename = join(path, 'training.txt') with open(filename, "r") as f:
with open(filename, 'r') as f:
lines = f.readlines() lines = f.readlines()
epochs = [] epochs = []
@ -137,7 +129,7 @@ def load_training_results(path):
t = [] t = []
for line in lines[1:]: for line in lines[1:]:
line_info = line.split() line_info = line.split()
if (len(line) > 0): if len(line) > 0:
epochs += [int(line_info[0])] epochs += [int(line_info[0])]
steps += [int(line_info[1])] steps += [int(line_info[1])]
L_out += [float(line_info[2])] L_out += [float(line_info[2])]
@ -151,8 +143,7 @@ def load_training_results(path):
def load_single_IoU(filename, n_parts): def load_single_IoU(filename, n_parts):
with open(filename, "r") as f:
with open(filename, 'r') as f:
lines = f.readlines() lines = f.readlines()
# Load all IoUs # Load all IoUs
@ -163,37 +154,42 @@ def load_single_IoU(filename, n_parts):
def load_snap_clouds(path, dataset, only_last=False): def load_snap_clouds(path, dataset, only_last=False):
cloud_folders = np.array(
cloud_folders = np.array([join(path, f) for f in listdir_str(path) if f.startswith('val_preds')]) [join(path, f) for f in listdir_str(path) if f.startswith("val_preds")]
cloud_epochs = np.array([int(f.split('_')[-1]) for f in cloud_folders]) )
cloud_epochs = np.array([int(f.split("_")[-1]) for f in cloud_folders])
epoch_order = np.argsort(cloud_epochs) epoch_order = np.argsort(cloud_epochs)
cloud_epochs = cloud_epochs[epoch_order] cloud_epochs = cloud_epochs[epoch_order]
cloud_folders = cloud_folders[epoch_order] cloud_folders = cloud_folders[epoch_order]
Confs = np.zeros((len(cloud_epochs), dataset.num_classes, dataset.num_classes), dtype=np.int32) Confs = np.zeros(
(len(cloud_epochs), dataset.num_classes, dataset.num_classes), dtype=np.int32
)
for c_i, cloud_folder in enumerate(cloud_folders): for c_i, cloud_folder in enumerate(cloud_folders):
if only_last and c_i < len(cloud_epochs) - 1: if only_last and c_i < len(cloud_epochs) - 1:
continue continue
# Load confusion if previously saved # Load confusion if previously saved
conf_file = join(cloud_folder, 'conf.txt') conf_file = join(cloud_folder, "conf.txt")
if isfile(conf_file): if isfile(conf_file):
Confs[c_i] += np.loadtxt(conf_file, dtype=np.int32) Confs[c_i] += np.loadtxt(conf_file, dtype=np.int32)
else: else:
for f in listdir_str(cloud_folder): for f in listdir_str(cloud_folder):
if f.endswith('.ply') and not f.endswith('sub.ply'): if f.endswith(".ply") and not f.endswith("sub.ply"):
data = read_ply(join(cloud_folder, f)) data = read_ply(join(cloud_folder, f))
labels = data['class'] labels = data["class"]
preds = data['preds'] preds = data["preds"]
Confs[c_i] += fast_confusion(labels, preds, dataset.label_values).astype(np.int32) Confs[c_i] += fast_confusion(
labels, preds, dataset.label_values
).astype(np.int32)
np.savetxt(conf_file, Confs[c_i], '%12d') np.savetxt(conf_file, Confs[c_i], "%12d")
# Erase ply to save disk memory # Erase ply to save disk memory
if c_i < len(cloud_folders) - 1: if c_i < len(cloud_folders) - 1:
for f in listdir_str(cloud_folder): for f in listdir_str(cloud_folder):
if f.endswith('.ply'): if f.endswith(".ply"):
remove(join(cloud_folder, f)) remove(join(cloud_folder, f))
# Remove ignored labels from confusions # Remove ignored labels from confusions
@ -213,7 +209,6 @@ def load_snap_clouds(path, dataset, only_last=False):
def compare_trainings(list_of_paths, list_of_labels=None): def compare_trainings(list_of_paths, list_of_labels=None):
# Parameters # Parameters
# ********** # **********
@ -231,13 +226,13 @@ def compare_trainings(list_of_paths, list_of_labels=None):
all_loss = [] all_loss = []
all_lr = [] all_lr = []
all_times = [] all_times = []
all_RAMs = []
for path in list_of_paths: for path in list_of_paths:
print(path) print(path)
if ('val_IoUs.txt' in [f for f in listdir_str(path)]) or ('val_confs.txt' in [f for f in listdir_str(path)]): if ("val_IoUs.txt" in [f for f in listdir_str(path)]) or (
"val_confs.txt" in [f for f in listdir_str(path)]
):
config = Config() config = Config()
config.load(path) config.load(path)
else: else:
@ -278,59 +273,58 @@ def compare_trainings(list_of_paths, list_of_labels=None):
# Plots learning rate # Plots learning rate
# ******************* # *******************
if plot_lr: if plot_lr:
# Figure # Figure
fig = plt.figure('lr') fig = plt.figure("lr")
for i, label in enumerate(list_of_labels): for i, label in enumerate(list_of_labels):
plt.plot(all_epochs[i], all_lr[i], linewidth=1, label=label) plt.plot(all_epochs[i], all_lr[i], linewidth=1, label=label)
# Set names for axes # Set names for axes
plt.xlabel('epochs') plt.xlabel("epochs")
plt.ylabel('lr') plt.ylabel("lr")
plt.yscale('log') plt.yscale("log")
# Display legends and title # Display legends and title
plt.legend(loc=1) plt.legend(loc=1)
# Customize the graph # Customize the graph
ax = fig.gca() ax = fig.gca()
ax.grid(linestyle='-.', which='both') ax.grid(linestyle="-.", which="both")
# ax.set_yticks(np.arange(0.8, 1.02, 0.02)) # ax.set_yticks(np.arange(0.8, 1.02, 0.02))
# Plots loss # Plots loss
# ********** # **********
# Figure # Figure
fig = plt.figure('loss') fig = plt.figure("loss")
for i, label in enumerate(list_of_labels): for i, label in enumerate(list_of_labels):
plt.plot(all_epochs[i], all_loss[i], linewidth=1, label=label) plt.plot(all_epochs[i], all_loss[i], linewidth=1, label=label)
# Set names for axes # Set names for axes
plt.xlabel('epochs') plt.xlabel("epochs")
plt.ylabel('loss') plt.ylabel("loss")
plt.yscale('log') plt.yscale("log")
# Display legends and title # Display legends and title
plt.legend(loc=1) plt.legend(loc=1)
plt.title('Losses compare') plt.title("Losses compare")
# Customize the graph # Customize the graph
ax = fig.gca() ax = fig.gca()
ax.grid(linestyle='-.', which='both') ax.grid(linestyle="-.", which="both")
# ax.set_yticks(np.arange(0.8, 1.02, 0.02)) # ax.set_yticks(np.arange(0.8, 1.02, 0.02))
# Plot Times # Plot Times
# ********** # **********
# Figure # Figure
fig = plt.figure('time') fig = plt.figure("time")
for i, label in enumerate(list_of_labels): for i, label in enumerate(list_of_labels):
plt.plot(all_epochs[i], np.array(all_times[i]) / 3600, linewidth=1, label=label) plt.plot(all_epochs[i], np.array(all_times[i]) / 3600, linewidth=1, label=label)
# Set names for axes # Set names for axes
plt.xlabel('epochs') plt.xlabel("epochs")
plt.ylabel('time') plt.ylabel("time")
# plt.yscale('log') # plt.yscale('log')
# Display legends and title # Display legends and title
@ -338,7 +332,7 @@ def compare_trainings(list_of_paths, list_of_labels=None):
# Customize the graph # Customize the graph
ax = fig.gca() ax = fig.gca()
ax.grid(linestyle='-.', which='both') ax.grid(linestyle="-.", which="both")
# ax.set_yticks(np.arange(0.8, 1.02, 0.02)) # ax.set_yticks(np.arange(0.8, 1.02, 0.02))
# Show all # Show all
@ -346,7 +340,6 @@ def compare_trainings(list_of_paths, list_of_labels=None):
def compare_convergences_segment(dataset, list_of_paths, list_of_names=None): def compare_convergences_segment(dataset, list_of_paths, list_of_names=None):
# Parameters # Parameters
# ********** # **********
@ -368,18 +361,20 @@ def compare_convergences_segment(dataset, list_of_paths, list_of_names=None):
config = Config() config = Config()
config.load(list_of_paths[0]) config.load(list_of_paths[0])
class_list = [dataset.label_to_names[label] for label in dataset.label_values class_list = [
if label not in dataset.ignored_labels] dataset.label_to_names[label]
for label in dataset.label_values
if label not in dataset.ignored_labels
]
s = '{:^10}|'.format('mean') s = "{:^10}|".format("mean")
for c in class_list: for c in class_list:
s += '{:^10}'.format(c) s += "{:^10}".format(c)
print(s) print(s)
print(10*'-' + '|' + 10*config.num_classes*'-') print(10 * "-" + "|" + 10 * config.num_classes * "-")
for path in list_of_paths: for path in list_of_paths:
# Get validation IoUs # Get validation IoUs
file = join(path, 'val_IoUs.txt') file = join(path, "val_IoUs.txt")
val_IoUs = load_single_IoU(file, config.num_classes) val_IoUs = load_single_IoU(file, config.num_classes)
# Get mean IoU # Get mean IoU
@ -390,9 +385,9 @@ def compare_convergences_segment(dataset, list_of_paths, list_of_names=None):
all_mIoUs += [mIoUs] all_mIoUs += [mIoUs]
all_class_IoUs += [class_IoUs] all_class_IoUs += [class_IoUs]
s = '{:^10.1f}|'.format(100*mIoUs[-1]) s = "{:^10.1f}|".format(100 * mIoUs[-1])
for IoU in class_IoUs[-1]: for IoU in class_IoUs[-1]:
s += '{:^10.1f}'.format(100*IoU) s += "{:^10.1f}".format(100 * IoU)
print(s) print(s)
# Get optional full validation on clouds # Get optional full validation on clouds
@ -400,73 +395,80 @@ def compare_convergences_segment(dataset, list_of_paths, list_of_names=None):
all_snap_epochs += [snap_epochs] all_snap_epochs += [snap_epochs]
all_snap_IoUs += [snap_IoUs] all_snap_IoUs += [snap_IoUs]
print(10*'-' + '|' + 10*config.num_classes*'-') print(10 * "-" + "|" + 10 * config.num_classes * "-")
for snap_IoUs in all_snap_IoUs: for snap_IoUs in all_snap_IoUs:
if len(snap_IoUs) > 0: if len(snap_IoUs) > 0:
s = '{:^10.1f}|'.format(100*np.mean(snap_IoUs[-1])) s = "{:^10.1f}|".format(100 * np.mean(snap_IoUs[-1]))
for IoU in snap_IoUs[-1]: for IoU in snap_IoUs[-1]:
s += '{:^10.1f}'.format(100*IoU) s += "{:^10.1f}".format(100 * IoU)
else: else:
s = '{:^10s}'.format('-') s = "{:^10s}".format("-")
for _ in range(config.num_classes): for _ in range(config.num_classes):
s += '{:^10s}'.format('-') s += "{:^10s}".format("-")
print(s) print(s)
# Plots # Plots
# ***** # *****
# Figure # Figure
fig = plt.figure('mIoUs') fig = plt.figure("mIoUs")
for i, name in enumerate(list_of_names): for i, name in enumerate(list_of_names):
p = plt.plot(all_pred_epochs[i], all_mIoUs[i], '--', linewidth=1, label=name) p = plt.plot(all_pred_epochs[i], all_mIoUs[i], "--", linewidth=1, label=name)
plt.plot(all_snap_epochs[i], np.mean(all_snap_IoUs[i], axis=1), linewidth=1, color=p[-1].get_color()) plt.plot(
plt.xlabel('epochs') all_snap_epochs[i],
plt.ylabel('IoU') np.mean(all_snap_IoUs[i], axis=1),
linewidth=1,
color=p[-1].get_color(),
)
plt.xlabel("epochs")
plt.ylabel("IoU")
# Set limits for y axis # Set limits for y axis
#plt.ylim(0.55, 0.95) # plt.ylim(0.55, 0.95)
# Display legends and title # Display legends and title
plt.legend(loc=4) plt.legend(loc=4)
# Customize the graph # Customize the graph
ax = fig.gca() ax = fig.gca()
ax.grid(linestyle='-.', which='both') ax.grid(linestyle="-.", which="both")
#ax.set_yticks(np.arange(0.8, 1.02, 0.02)) # ax.set_yticks(np.arange(0.8, 1.02, 0.02))
displayed_classes = [0, 1, 2, 3, 4, 5, 6, 7] displayed_classes = [0, 1, 2, 3, 4, 5, 6, 7]
displayed_classes = [] displayed_classes = []
for c_i, c_name in enumerate(class_list): for c_i, c_name in enumerate(class_list):
if c_i in displayed_classes: if c_i in displayed_classes:
# Figure # Figure
fig = plt.figure(c_name + ' IoU') fig = plt.figure(c_name + " IoU")
for i, name in enumerate(list_of_names): for i, name in enumerate(list_of_names):
plt.plot(all_pred_epochs[i], all_class_IoUs[i][:, c_i], linewidth=1, label=name) plt.plot(
plt.xlabel('epochs') all_pred_epochs[i],
plt.ylabel('IoU') all_class_IoUs[i][:, c_i],
linewidth=1,
label=name,
)
plt.xlabel("epochs")
plt.ylabel("IoU")
# Set limits for y axis # Set limits for y axis
#plt.ylim(0.8, 1) # plt.ylim(0.8, 1)
# Display legends and title # Display legends and title
plt.legend(loc=4) plt.legend(loc=4)
# Customize the graph # Customize the graph
ax = fig.gca() ax = fig.gca()
ax.grid(linestyle='-.', which='both') ax.grid(linestyle="-.", which="both")
#ax.set_yticks(np.arange(0.8, 1.02, 0.02)) # ax.set_yticks(np.arange(0.8, 1.02, 0.02))
# Show all # Show all
plt.show() plt.show()
def compare_convergences_classif(list_of_paths, list_of_labels=None): def compare_convergences_classif(list_of_paths, list_of_labels=None):
# Parameters # Parameters
# ********** # **********
steps_per_epoch = 0
smooth_n = 12 smooth_n = 12
if list_of_labels is None: if list_of_labels is None:
@ -477,13 +479,10 @@ def compare_convergences_classif(list_of_paths, list_of_labels=None):
all_pred_epochs = [] all_pred_epochs = []
all_val_OA = [] all_val_OA = []
all_train_OA = []
all_vote_OA = [] all_vote_OA = []
all_vote_confs = [] all_vote_confs = []
for path in list_of_paths: for path in list_of_paths:
# Load parameters # Load parameters
config = Config() config = Config()
config.load(list_of_paths[0]) config.load(list_of_paths[0])
@ -496,21 +495,31 @@ def compare_convergences_classif(list_of_paths, list_of_labels=None):
first_e = np.min(epochs) first_e = np.min(epochs)
# Get validation confusions # Get validation confusions
file = join(path, 'val_confs.txt') file = join(path, "val_confs.txt")
val_C1 = load_confusions(file, n_class) val_C1 = load_confusions(file, n_class)
val_PRE, val_REC, val_F1, val_IoU, val_ACC = smooth_metrics(val_C1, smooth_n=smooth_n) val_PRE, val_REC, val_F1, val_IoU, val_ACC = smooth_metrics(
val_C1, smooth_n=smooth_n
)
# Get vote confusions # Get vote confusions
file = join(path, 'vote_confs.txt') file = join(path, "vote_confs.txt")
if exists(file): if exists(file):
vote_C2 = load_confusions(file, n_class) vote_C2 = load_confusions(file, n_class)
vote_PRE, vote_REC, vote_F1, vote_IoU, vote_ACC = smooth_metrics(vote_C2, smooth_n=2) vote_PRE, vote_REC, vote_F1, vote_IoU, vote_ACC = smooth_metrics(
vote_C2, smooth_n=2
)
else: else:
vote_C2 = val_C1 vote_C2 = val_C1
vote_PRE, vote_REC, vote_F1, vote_IoU, vote_ACC = (val_PRE, val_REC, val_F1, val_IoU, val_ACC) vote_PRE, vote_REC, vote_F1, vote_IoU, vote_ACC = (
val_PRE,
val_REC,
val_F1,
val_IoU,
val_ACC,
)
# Aggregate results # Aggregate results
all_pred_epochs += [np.array([i+first_e for i in range(len(val_ACC))])] all_pred_epochs += [np.array([i + first_e for i in range(len(val_ACC))])]
all_val_OA += [val_ACC] all_val_OA += [val_ACC]
all_vote_OA += [vote_ACC] all_vote_OA += [vote_ACC]
all_vote_confs += [vote_C2] all_vote_confs += [vote_C2]
@ -521,12 +530,15 @@ def compare_convergences_classif(list_of_paths, list_of_labels=None):
# *********** # ***********
for i, label in enumerate(list_of_labels): for i, label in enumerate(list_of_labels):
print("\n" + label + "\n" + "*" * len(label) + "\n")
print('\n' + label + '\n' + '*' * len(label) + '\n')
print(list_of_paths[i]) print(list_of_paths[i])
best_epoch = np.argmax(all_vote_OA[i]) best_epoch = np.argmax(all_vote_OA[i])
print('Best Accuracy : {:.1f} % (epoch {:d})'.format(100 * all_vote_OA[i][best_epoch], best_epoch)) print(
"Best Accuracy : {:.1f} % (epoch {:d})".format(
100 * all_vote_OA[i][best_epoch], best_epoch
)
)
confs = all_vote_confs[i] confs = all_vote_confs[i]
@ -544,32 +556,31 @@ def compare_convergences_classif(list_of_paths, list_of_labels=None):
diags = np.diagonal(class_avg_confs, axis1=-2, axis2=-1) diags = np.diagonal(class_avg_confs, axis1=-2, axis2=-1)
class_avg_ACC = np.sum(diags, axis=-1) / np.sum(class_avg_confs, axis=(-1, -2)) class_avg_ACC = np.sum(diags, axis=-1) / np.sum(class_avg_confs, axis=(-1, -2))
print('Corresponding mAcc : {:.1f} %'.format(100 * class_avg_ACC[best_epoch])) print("Corresponding mAcc : {:.1f} %".format(100 * class_avg_ACC[best_epoch]))
# Plots # Plots
# ***** # *****
for fig_name, OA in zip(['Validation', 'Vote'], [all_val_OA, all_vote_OA]): for fig_name, OA in zip(["Validation", "Vote"], [all_val_OA, all_vote_OA]):
# Figure # Figure
fig = plt.figure(fig_name) fig = plt.figure(fig_name)
for i, label in enumerate(list_of_labels): for i, label in enumerate(list_of_labels):
plt.plot(all_pred_epochs[i], OA[i], linewidth=1, label=label) plt.plot(all_pred_epochs[i], OA[i], linewidth=1, label=label)
plt.xlabel('epochs') plt.xlabel("epochs")
plt.ylabel(fig_name + ' Accuracy') plt.ylabel(fig_name + " Accuracy")
# Set limits for y axis # Set limits for y axis
#plt.ylim(0.55, 0.95) # plt.ylim(0.55, 0.95)
# Display legends and title # Display legends and title
plt.legend(loc=4) plt.legend(loc=4)
# Customize the graph # Customize the graph
ax = fig.gca() ax = fig.gca()
ax.grid(linestyle='-.', which='both') ax.grid(linestyle="-.", which="both")
#ax.set_yticks(np.arange(0.8, 1.02, 0.02)) # ax.set_yticks(np.arange(0.8, 1.02, 0.02))
#for i, label in enumerate(list_of_labels): # for i, label in enumerate(list_of_labels):
# print(label, np.max(all_train_OA[i]), np.max(all_val_OA[i])) # print(label, np.max(all_train_OA[i]), np.max(all_val_OA[i]))
# Show all # Show all
@ -577,7 +588,6 @@ def compare_convergences_classif(list_of_paths, list_of_labels=None):
def compare_convergences_SLAM(dataset, list_of_paths, list_of_names=None): def compare_convergences_SLAM(dataset, list_of_paths, list_of_names=None):
# Parameters # Parameters
# ********** # **********
@ -599,23 +609,25 @@ def compare_convergences_SLAM(dataset, list_of_paths, list_of_names=None):
config = Config() config = Config()
config.load(list_of_paths[0]) config.load(list_of_paths[0])
class_list = [dataset.label_to_names[label] for label in dataset.label_values class_list = [
if label not in dataset.ignored_labels] dataset.label_to_names[label]
for label in dataset.label_values
if label not in dataset.ignored_labels
]
s = '{:^6}|'.format('mean') s = "{:^6}|".format("mean")
for c in class_list: for c in class_list:
s += '{:^6}'.format(c[:4]) s += "{:^6}".format(c[:4])
print(s) print(s)
print(6*'-' + '|' + 6*config.num_classes*'-') print(6 * "-" + "|" + 6 * config.num_classes * "-")
for path in list_of_paths: for path in list_of_paths:
# Get validation IoUs # Get validation IoUs
nc_model = dataset.num_classes - len(dataset.ignored_labels) nc_model = dataset.num_classes - len(dataset.ignored_labels)
file = join(path, 'val_IoUs.txt') file = join(path, "val_IoUs.txt")
val_IoUs = load_single_IoU(file, nc_model) val_IoUs = load_single_IoU(file, nc_model)
# Get Subpart IoUs # Get Subpart IoUs
file = join(path, 'subpart_IoUs.txt') file = join(path, "subpart_IoUs.txt")
subpart_IoUs = load_single_IoU(file, nc_model) subpart_IoUs = load_single_IoU(file, nc_model)
# Get mean IoU # Get mean IoU
@ -629,69 +641,75 @@ def compare_convergences_SLAM(dataset, list_of_paths, list_of_names=None):
all_subpart_mIoUs += [subpart_mIoUs] all_subpart_mIoUs += [subpart_mIoUs]
all_subpart_class_IoUs += [subpart_class_IoUs] all_subpart_class_IoUs += [subpart_class_IoUs]
s = '{:^6.1f}|'.format(100*subpart_mIoUs[-1]) s = "{:^6.1f}|".format(100 * subpart_mIoUs[-1])
for IoU in subpart_class_IoUs[-1]: for IoU in subpart_class_IoUs[-1]:
s += '{:^6.1f}'.format(100*IoU) s += "{:^6.1f}".format(100 * IoU)
print(s) print(s)
print(6*'-' + '|' + 6*config.num_classes*'-') print(6 * "-" + "|" + 6 * config.num_classes * "-")
for snap_IoUs in all_val_class_IoUs: for snap_IoUs in all_val_class_IoUs:
if len(snap_IoUs) > 0: if len(snap_IoUs) > 0:
s = '{:^6.1f}|'.format(100*np.mean(snap_IoUs[-1])) s = "{:^6.1f}|".format(100 * np.mean(snap_IoUs[-1]))
for IoU in snap_IoUs[-1]: for IoU in snap_IoUs[-1]:
s += '{:^6.1f}'.format(100*IoU) s += "{:^6.1f}".format(100 * IoU)
else: else:
s = '{:^6s}'.format('-') s = "{:^6s}".format("-")
for _ in range(config.num_classes): for _ in range(config.num_classes):
s += '{:^6s}'.format('-') s += "{:^6s}".format("-")
print(s) print(s)
# Plots # Plots
# ***** # *****
# Figure # Figure
fig = plt.figure('mIoUs') fig = plt.figure("mIoUs")
for i, name in enumerate(list_of_names): for i, name in enumerate(list_of_names):
p = plt.plot(all_pred_epochs[i], all_subpart_mIoUs[i], '--', linewidth=1, label=name) p = plt.plot(
plt.plot(all_pred_epochs[i], all_val_mIoUs[i], linewidth=1, color=p[-1].get_color()) all_pred_epochs[i], all_subpart_mIoUs[i], "--", linewidth=1, label=name
plt.xlabel('epochs') )
plt.ylabel('IoU') plt.plot(
all_pred_epochs[i], all_val_mIoUs[i], linewidth=1, color=p[-1].get_color()
)
plt.xlabel("epochs")
plt.ylabel("IoU")
# Set limits for y axis # Set limits for y axis
#plt.ylim(0.55, 0.95) # plt.ylim(0.55, 0.95)
# Display legends and title # Display legends and title
plt.legend(loc=4) plt.legend(loc=4)
# Customize the graph # Customize the graph
ax = fig.gca() ax = fig.gca()
ax.grid(linestyle='-.', which='both') ax.grid(linestyle="-.", which="both")
#ax.set_yticks(np.arange(0.8, 1.02, 0.02)) # ax.set_yticks(np.arange(0.8, 1.02, 0.02))
displayed_classes = [0, 1, 2, 3, 4, 5, 6, 7] displayed_classes = [0, 1, 2, 3, 4, 5, 6, 7]
#displayed_classes = [] # displayed_classes = []
for c_i, c_name in enumerate(class_list): for c_i, c_name in enumerate(class_list):
if c_i in displayed_classes: if c_i in displayed_classes:
# Figure # Figure
fig = plt.figure(c_name + ' IoU') fig = plt.figure(c_name + " IoU")
for i, name in enumerate(list_of_names): for i, name in enumerate(list_of_names):
plt.plot(all_pred_epochs[i], all_val_class_IoUs[i][:, c_i], linewidth=1, label=name) plt.plot(
plt.xlabel('epochs') all_pred_epochs[i],
plt.ylabel('IoU') all_val_class_IoUs[i][:, c_i],
linewidth=1,
label=name,
)
plt.xlabel("epochs")
plt.ylabel("IoU")
# Set limits for y axis # Set limits for y axis
#plt.ylim(0.8, 1) # plt.ylim(0.8, 1)
# Display legends and title # Display legends and title
plt.legend(loc=4) plt.legend(loc=4)
# Customize the graph # Customize the graph
ax = fig.gca() ax = fig.gca()
ax.grid(linestyle='-.', which='both') ax.grid(linestyle="-.", which="both")
#ax.set_yticks(np.arange(0.8, 1.02, 0.02)) # ax.set_yticks(np.arange(0.8, 1.02, 0.02))
# Show all # Show all
plt.show() plt.show()
@ -713,23 +731,22 @@ def experiment_name_1():
""" """
# Using the dates of the logs, you can easily gather consecutive ones. All logs should be of the same dataset. # Using the dates of the logs, you can easily gather consecutive ones. All logs should be of the same dataset.
start = 'Log_2020-04-22_11-52-58' start = "Log_2020-04-22_11-52-58"
end = 'Log_2023-07-29_12-40-27' end = "Log_2023-07-29_12-40-27"
# Name of the result path # Name of the result path
res_path = 'results' res_path = "results"
# Gather logs and sort by date # Gather logs and sort by date
logs = np.sort([join(res_path, l) for l in listdir_str(res_path) if start <= l <= end]) logs = np.sort(
[join(res_path, l) for l in listdir_str(res_path) if start <= l <= end]
)
# Give names to the logs (for plot legends) # Give names to the logs (for plot legends)
logs_names = ['name_log_1', logs_names = ["name_log_1", "name_log_2", "name_log_3", "name_log_4"]
'name_log_2',
'name_log_3',
'name_log_4']
# safe check log names # safe check log names
logs_names = np.array(logs_names[:len(logs)]) logs_names = np.array(logs_names[: len(logs)])
return logs, logs_names return logs, logs_names
@ -743,27 +760,26 @@ def experiment_name_2():
""" """
# Using the dates of the logs, you can easily gather consecutive ones. All logs should be of the same dataset. # Using the dates of the logs, you can easily gather consecutive ones. All logs should be of the same dataset.
start = 'Log_2020-04-22_11-52-58' start = "Log_2020-04-22_11-52-58"
end = 'Log_2020-05-22_11-52-58' end = "Log_2020-05-22_11-52-58"
# Name of the result path # Name of the result path
res_path = 'results' res_path = "results"
# Gather logs and sort by date # Gather logs and sort by date
logs = np.sort([join(res_path, l) for l in listdir_str(res_path) if start <= l <= end]) logs = np.sort(
[join(res_path, l) for l in listdir_str(res_path) if start <= l <= end]
)
# Optionally add a specific log at a specific place in the log list # Optionally add a specific log at a specific place in the log list
logs = logs.astype('<U50') logs = logs.astype("<U50")
logs = np.insert(logs, 0, 'results/Log_2020-04-04_10-04-42') logs = np.insert(logs, 0, "results/Log_2020-04-04_10-04-42")
# Give names to the logs (for plot legends) # Give names to the logs (for plot legends)
logs_names = ['name_log_inserted', logs_names = ["name_log_inserted", "name_log_1", "name_log_2", "name_log_3"]
'name_log_1',
'name_log_2',
'name_log_3']
# safe check log names # safe check log names
logs_names = np.array(logs_names[:len(logs)]) logs_names = np.array(logs_names[: len(logs)])
return logs, logs_names return logs, logs_names
@ -774,8 +790,7 @@ def experiment_name_2():
# \***************/ # \***************/
# #
if __name__ == '__main__': if __name__ == "__main__":
###################################################### ######################################################
# Choose a list of log to plot together for comparison # Choose a list of log to plot together for comparison
###################################################### ######################################################
@ -793,15 +808,15 @@ if __name__ == '__main__':
for log in logs: for log in logs:
config = Config() config = Config()
config.load(log) config.load(log)
if 'ShapeNetPart' in config.dataset: if "ShapeNetPart" in config.dataset:
this_dataset = 'ShapeNetPart' this_dataset = "ShapeNetPart"
else: else:
this_dataset = config.dataset this_dataset = config.dataset
if plot_dataset: if plot_dataset:
if plot_dataset == this_dataset: if plot_dataset == this_dataset:
continue continue
else: else:
raise ValueError('All logs must share the same dataset to be compared') raise ValueError("All logs must share the same dataset to be compared")
else: else:
plot_dataset = this_dataset plot_dataset = this_dataset
@ -809,19 +824,15 @@ if __name__ == '__main__':
compare_trainings(logs, logs_names) compare_trainings(logs, logs_names)
# Plot the validation # Plot the validation
if config.dataset_task == 'classification': if config.dataset_task == "classification":
compare_convergences_classif(logs, logs_names) compare_convergences_classif(logs, logs_names)
elif config.dataset_task == 'cloud_segmentation': elif config.dataset_task == "cloud_segmentation":
if config.dataset.startswith('S3DIS'): if config.dataset.startswith("S3DIS"):
dataset = S3DISDataset(config, load_data=False) dataset = S3DISDataset(config, load_data=False)
compare_convergences_segment(dataset, logs, logs_names) compare_convergences_segment(dataset, logs, logs_names)
elif config.dataset_task == 'slam_segmentation': elif config.dataset_task == "slam_segmentation":
if config.dataset.startswith('SemanticKitti'): if config.dataset.startswith("SemanticKitti"):
dataset = SemanticKittiDataset(config) dataset = SemanticKittiDataset(config)
compare_convergences_SLAM(dataset, logs, logs_names) compare_convergences_SLAM(dataset, logs, logs_names)
else: else:
raise ValueError('Unsupported dataset : ' + plot_dataset) raise ValueError("Unsupported dataset : " + plot_dataset)

View file

@ -22,11 +22,8 @@
# #
# Common libs # Common libs
import signal
import os import os
import numpy as np import numpy as np
import sys
import torch
# Dataset # Dataset
from datasetss.ModelNet40 import * from datasetss.ModelNet40 import *
@ -45,20 +42,25 @@ from models.architectures import KPCNN, KPFCNN
# \***************/ # \***************/
# #
def model_choice(chosen_log):
def model_choice(chosen_log):
########################### ###########################
# Call the test initializer # Call the test initializer
########################### ###########################
# Automatically retrieve the last trained model # Automatically retrieve the last trained model
if chosen_log in ['last_ModelNet40', 'last_ShapeNetPart', 'last_S3DIS']: if chosen_log in ["last_ModelNet40", "last_ShapeNetPart", "last_S3DIS"]:
# Dataset name # Dataset name
test_dataset = '_'.join(chosen_log.split('_')[1:]) test_dataset = "_".join(chosen_log.split("_")[1:])
# List all training logs # List all training logs
logs = np.sort([os.path.join('results', f) for f in os.listdir('results') if f.startswith('Log')]) logs = np.sort(
[
os.path.join("results", f)
for f in os.listdir("results")
if f.startswith("Log")
]
)
# Find the last log of asked dataset # Find the last log of asked dataset
for log in logs[::-1]: for log in logs[::-1]:
@ -68,12 +70,12 @@ def model_choice(chosen_log):
chosen_log = log chosen_log = log
break break
if chosen_log in ['last_ModelNet40', 'last_ShapeNetPart', 'last_S3DIS']: if chosen_log in ["last_ModelNet40", "last_ShapeNetPart", "last_S3DIS"]:
raise ValueError('No log of the dataset "' + test_dataset + '" found') raise ValueError('No log of the dataset "' + test_dataset + '" found')
# Check if log exists # Check if log exists
if not os.path.exists(chosen_log): if not os.path.exists(chosen_log):
raise ValueError('The given log does not exists: ' + chosen_log) raise ValueError("The given log does not exists: " + chosen_log)
return chosen_log return chosen_log
@ -84,8 +86,7 @@ def model_choice(chosen_log):
# \***************/ # \***************/
# #
if __name__ == '__main__': if __name__ == "__main__":
############################### ###############################
# Choose the model to visualize # Choose the model to visualize
############################### ###############################
@ -95,7 +96,7 @@ if __name__ == '__main__':
# > 'last_XXX': Automatically retrieve the last trained model on dataset XXX # > 'last_XXX': Automatically retrieve the last trained model on dataset XXX
# > '(old_)results/Log_YYYY-MM-DD_HH-MM-SS': Directly provide the path of a trained model # > '(old_)results/Log_YYYY-MM-DD_HH-MM-SS': Directly provide the path of a trained model
chosen_log = 'results/Light_KPFCNN' chosen_log = "results/Light_KPFCNN"
# Choose the index of the checkpoint to load OR None if you want to load the current checkpoint # Choose the index of the checkpoint to load OR None if you want to load the current checkpoint
chkp_idx = -1 chkp_idx = -1
@ -111,25 +112,25 @@ if __name__ == '__main__':
############################ ############################
# Set which gpu is going to be used # Set which gpu is going to be used
GPU_ID = '0' GPU_ID = "0"
# Set GPU visible device # Set GPU visible device
os.environ['CUDA_VISIBLE_DEVICES'] = GPU_ID os.environ["CUDA_VISIBLE_DEVICES"] = GPU_ID
############### ###############
# Previous chkp # Previous chkp
############### ###############
# Find all checkpoints in the chosen training folder # Find all checkpoints in the chosen training folder
chkp_path = os.path.join(chosen_log, 'checkpoints') chkp_path = os.path.join(chosen_log, "checkpoints")
chkps = [f for f in os.listdir(chkp_path) if f[:4] == 'chkp'] chkps = [f for f in os.listdir(chkp_path) if f[:4] == "chkp"]
# Find which snapshot to restore # Find which snapshot to restore
if chkp_idx is None: if chkp_idx is None:
chosen_chkp = 'current_chkp.tar' chosen_chkp = "current_chkp.tar"
else: else:
chosen_chkp = np.sort(chkps)[chkp_idx] chosen_chkp = np.sort(chkps)[chkp_idx]
chosen_chkp = os.path.join(chosen_log, 'checkpoints', chosen_chkp) chosen_chkp = os.path.join(chosen_log, "checkpoints", chosen_chkp)
# Initialize configuration class # Initialize configuration class
config = Config() config = Config()
@ -141,10 +142,10 @@ if __name__ == '__main__':
# Change parameters for the test here. For example, you can stop augmenting the input data. # Change parameters for the test here. For example, you can stop augmenting the input data.
#config.augment_noise = 0.0001 # config.augment_noise = 0.0001
#config.augment_symmetries = False # config.augment_symmetries = False
#config.batch_num = 3 # config.batch_num = 3
#config.in_radius = 4 # config.in_radius = 4
config.validation_size = 200 config.validation_size = 200
config.input_threads = 10 config.input_threads = 10
@ -153,67 +154,69 @@ if __name__ == '__main__':
############## ##############
print() print()
print('Data Preparation') print("Data Preparation")
print('****************') print("****************")
print(config.dataset) print(config.dataset)
if on_val: if on_val:
set = 'validation' set = "validation"
else: else:
set = 'test' set = "test"
# Initiate dataset # Initiate dataset
if config.dataset == 'ModelNet40': if config.dataset == "ModelNet40":
test_dataset = ModelNet40Dataset(config, train=False) test_dataset = ModelNet40Dataset(config, train=False)
test_sampler = ModelNet40Sampler(test_dataset) test_sampler = ModelNet40Sampler(test_dataset)
collate_fn = ModelNet40Collate collate_fn = ModelNet40Collate
elif config.dataset == 'S3DIS': elif config.dataset == "S3DIS":
test_dataset = S3DISDataset(config, set='validation', use_potentials=True) test_dataset = S3DISDataset(config, set="validation", use_potentials=True)
test_sampler = S3DISSampler(test_dataset) test_sampler = S3DISSampler(test_dataset)
collate_fn = S3DISCollate collate_fn = S3DISCollate
elif config.dataset == 'SemanticKitti': elif config.dataset == "SemanticKitti":
test_dataset = SemanticKittiDataset(config, set=set, balance_classes=False) test_dataset = SemanticKittiDataset(config, set=set, balance_classes=False)
test_sampler = SemanticKittiSampler(test_dataset) test_sampler = SemanticKittiSampler(test_dataset)
collate_fn = SemanticKittiCollate collate_fn = SemanticKittiCollate
else: else:
raise ValueError('Unsupported dataset : ' + config.dataset) raise ValueError("Unsupported dataset : " + config.dataset)
# Data loader # Data loader
test_loader = DataLoader(test_dataset, test_loader = DataLoader(
batch_size=1, test_dataset,
sampler=test_sampler, batch_size=1,
collate_fn=collate_fn, sampler=test_sampler,
num_workers=config.input_threads, collate_fn=collate_fn,
pin_memory=True) num_workers=config.input_threads,
pin_memory=True,
)
# Calibrate samplers # Calibrate samplers
test_sampler.calibration(test_loader, verbose=True) test_sampler.calibration(test_loader, verbose=True)
print('\nModel Preparation') print("\nModel Preparation")
print('*****************') print("*****************")
# Define network model # Define network model
t1 = time.time() t1 = time.time()
if config.dataset_task == 'classification': if config.dataset_task == "classification":
net = KPCNN(config) net = KPCNN(config)
elif config.dataset_task in ['cloud_segmentation', 'slam_segmentation']: elif config.dataset_task in ["cloud_segmentation", "slam_segmentation"]:
net = KPFCNN(config, test_dataset.label_values, test_dataset.ignored_labels) net = KPFCNN(config, test_dataset.label_values, test_dataset.ignored_labels)
else: else:
raise ValueError('Unsupported dataset_task for testing: ' + config.dataset_task) raise ValueError("Unsupported dataset_task for testing: " + config.dataset_task)
# Define a visualizer class # Define a visualizer class
tester = ModelTester(net, chkp_path=chosen_chkp) tester = ModelTester(net, chkp_path=chosen_chkp)
print('Done in {:.1f}s\n'.format(time.time() - t1)) print("Done in {:.1f}s\n".format(time.time() - t1))
print('\nStart test') print("\nStart test")
print('**********\n') print("**********\n")
# Training # Training
if config.dataset_task == 'classification': if config.dataset_task == "classification":
tester.classification_test(net, test_loader, config) tester.classification_test(net, test_loader, config)
elif config.dataset_task == 'cloud_segmentation': elif config.dataset_task == "cloud_segmentation":
tester.cloud_segmentation_test(net, test_loader, config) tester.cloud_segmentation_test(net, test_loader, config)
elif config.dataset_task == 'slam_segmentation': elif config.dataset_task == "slam_segmentation":
tester.slam_segmentation_test(net, test_loader, config) tester.slam_segmentation_test(net, test_loader, config)
else: else:
raise ValueError('Unsupported dataset_task for testing: ' + config.dataset_task) raise ValueError("Unsupported dataset_task for testing: " + config.dataset_task)

View file

@ -26,7 +26,6 @@ import signal
import os import os
import numpy as np import numpy as np
import sys import sys
import torch
# Dataset # Dataset
from datasetss.ModelNet40 import * from datasetss.ModelNet40 import *
@ -43,6 +42,7 @@ from models.architectures import KPCNN
# \******************/ # \******************/
# #
class Modelnet40Config(Config): class Modelnet40Config(Config):
""" """
Override the parameters you want to modify for this dataset Override the parameters you want to modify for this dataset
@ -53,13 +53,13 @@ class Modelnet40Config(Config):
#################### ####################
# Dataset name # Dataset name
dataset = 'ModelNet40' dataset = "ModelNet40"
# Number of classes in the dataset (This value is overwritten by dataset class when Initializating dataset). # Number of classes in the dataset (This value is overwritten by dataset class when Initializating dataset).
num_classes = None num_classes = None
# Type of task performed on this dataset (also overwritten) # Type of task performed on this dataset (also overwritten)
dataset_task = '' dataset_task = ""
# Number of CPU threads for the input pipeline # Number of CPU threads for the input pipeline
input_threads = 10 input_threads = 10
@ -69,21 +69,23 @@ class Modelnet40Config(Config):
######################### #########################
# Define layers # Define layers
architecture = ['simple', architecture = [
'resnetb', "simple",
'resnetb_strided', "resnetb",
'resnetb', "resnetb_strided",
'resnetb', "resnetb",
'resnetb_strided', "resnetb",
'resnetb', "resnetb_strided",
'resnetb', "resnetb",
'resnetb_strided', "resnetb",
'resnetb', "resnetb_strided",
'resnetb', "resnetb",
'resnetb_strided', "resnetb",
'resnetb', "resnetb_strided",
'resnetb', "resnetb",
'global_average'] "resnetb",
"global_average",
]
################### ###################
# KPConv parameters # KPConv parameters
@ -105,10 +107,10 @@ class Modelnet40Config(Config):
KP_extent = 1.2 KP_extent = 1.2
# Behavior of convolutions in ('constant', 'linear', 'gaussian') # Behavior of convolutions in ('constant', 'linear', 'gaussian')
KP_influence = 'linear' KP_influence = "linear"
# Aggregation function of KPConv in ('closest', 'sum') # Aggregation function of KPConv in ('closest', 'sum')
aggregation_mode = 'sum' aggregation_mode = "sum"
# Choice of input features # Choice of input features
in_features_dim = 1 in_features_dim = 1
@ -123,10 +125,10 @@ class Modelnet40Config(Config):
# Deformable offset loss # Deformable offset loss
# 'point2point' fitting geometry by penalizing distance from deform point to input points # 'point2point' fitting geometry by penalizing distance from deform point to input points
# 'point2plane' fitting geometry by penalizing distance from deform point to input point triplet (not implemented) # 'point2plane' fitting geometry by penalizing distance from deform point to input point triplet (not implemented)
deform_fitting_mode = 'point2point' deform_fitting_mode = "point2point"
deform_fitting_power = 1.0 # Multiplier for the fitting/repulsive loss deform_fitting_power = 1.0 # Multiplier for the fitting/repulsive loss
deform_lr_factor = 0.1 # Multiplier for learning rate applied to the deformations deform_lr_factor = 0.1 # Multiplier for learning rate applied to the deformations
repulse_extent = 1.2 # Distance of repulsion for deformed kernel points repulse_extent = 1.2 # Distance of repulsion for deformed kernel points
##################### #####################
# Training parameters # Training parameters
@ -138,7 +140,7 @@ class Modelnet40Config(Config):
# Learning rate management # Learning rate management
learning_rate = 1e-2 learning_rate = 1e-2
momentum = 0.98 momentum = 0.98
lr_decays = {i: 0.1**(1/100) for i in range(1, max_epoch)} lr_decays = {i: 0.1 ** (1 / 100) for i in range(1, max_epoch)}
grad_clip_norm = 100.0 grad_clip_norm = 100.0
# Number of batch # Number of batch
@ -156,7 +158,7 @@ class Modelnet40Config(Config):
# Augmentations # Augmentations
augment_scale_anisotropic = True augment_scale_anisotropic = True
augment_symmetries = [True, True, True] augment_symmetries = [True, True, True]
augment_rotation = 'none' augment_rotation = "none"
augment_scale_min = 0.8 augment_scale_min = 0.8
augment_scale_max = 1.2 augment_scale_max = 1.2
augment_noise = 0.001 augment_noise = 0.001
@ -166,7 +168,7 @@ class Modelnet40Config(Config):
# > 'none': Each point in the whole batch has the same contribution. # > 'none': Each point in the whole batch has the same contribution.
# > 'class': Each class has the same contribution (points are weighted according to class balance) # > 'class': Each class has the same contribution (points are weighted according to class balance)
# > 'batch': Each cloud in the batch has the same contribution (points are weighted according cloud sizes) # > 'batch': Each cloud in the batch has the same contribution (points are weighted according cloud sizes)
segloss_balance = 'none' segloss_balance = "none"
# Do we nee to save convergence # Do we nee to save convergence
saving = True saving = True
@ -179,40 +181,40 @@ class Modelnet40Config(Config):
# \***************/ # \***************/
# #
if __name__ == '__main__': if __name__ == "__main__":
############################ ############################
# Initialize the environment # Initialize the environment
############################ ############################
# Set which gpu is going to be used # Set which gpu is going to be used
GPU_ID = '0' GPU_ID = "0"
# Set GPU visible device # Set GPU visible device
os.environ['CUDA_VISIBLE_DEVICES'] = GPU_ID os.environ["CUDA_VISIBLE_DEVICES"] = GPU_ID
############### ###############
# Previous chkp # Previous chkp
############### ###############
# Choose here if you want to start training from a previous snapshot (None for new training) # Choose here if you want to start training from a previous snapshot (None for new training)
#previous_training_path = 'Log_2020-03-19_19-53-27' # previous_training_path = 'Log_2020-03-19_19-53-27'
previous_training_path = '' previous_training_path = ""
# Choose index of checkpoint to start from. If None, uses the latest chkp # Choose index of checkpoint to start from. If None, uses the latest chkp
chkp_idx = None chkp_idx = None
if previous_training_path: if previous_training_path:
# Find all snapshot in the chosen training folder # Find all snapshot in the chosen training folder
chkp_path = os.path.join('results', previous_training_path, 'checkpoints') chkp_path = os.path.join("results", previous_training_path, "checkpoints")
chkps = [f for f in os.listdir(chkp_path) if f[:4] == 'chkp'] chkps = [f for f in os.listdir(chkp_path) if f[:4] == "chkp"]
# Find which snapshot to restore # Find which snapshot to restore
if chkp_idx is None: if chkp_idx is None:
chosen_chkp = 'current_chkp.tar' chosen_chkp = "current_chkp.tar"
else: else:
chosen_chkp = np.sort(chkps)[chkp_idx] chosen_chkp = np.sort(chkps)[chkp_idx]
chosen_chkp = os.path.join('results', previous_training_path, 'checkpoints', chosen_chkp) chosen_chkp = os.path.join(
"results", previous_training_path, "checkpoints", chosen_chkp
)
else: else:
chosen_chkp = None chosen_chkp = None
@ -222,13 +224,13 @@ if __name__ == '__main__':
############## ##############
print() print()
print('Data Preparation') print("Data Preparation")
print('****************') print("****************")
# Initialize configuration class # Initialize configuration class
config = Modelnet40Config() config = Modelnet40Config()
if previous_training_path: if previous_training_path:
config.load(os.path.join('results', previous_training_path)) config.load(os.path.join("results", previous_training_path))
config.saving_path = None config.saving_path = None
# Get path from argument if given # Get path from argument if given
@ -244,28 +246,32 @@ if __name__ == '__main__':
test_sampler = ModelNet40Sampler(test_dataset, balance_labels=True) test_sampler = ModelNet40Sampler(test_dataset, balance_labels=True)
# Initialize the dataloader # Initialize the dataloader
training_loader = DataLoader(training_dataset, training_loader = DataLoader(
batch_size=1, training_dataset,
sampler=training_sampler, batch_size=1,
collate_fn=ModelNet40Collate, sampler=training_sampler,
num_workers=config.input_threads, collate_fn=ModelNet40Collate,
pin_memory=True) num_workers=config.input_threads,
test_loader = DataLoader(test_dataset, pin_memory=True,
batch_size=1, )
sampler=test_sampler, test_loader = DataLoader(
collate_fn=ModelNet40Collate, test_dataset,
num_workers=config.input_threads, batch_size=1,
pin_memory=True) sampler=test_sampler,
collate_fn=ModelNet40Collate,
num_workers=config.input_threads,
pin_memory=True,
)
# Calibrate samplers # Calibrate samplers
training_sampler.calibration(training_loader) training_sampler.calibration(training_loader)
test_sampler.calibration(test_loader) test_sampler.calibration(test_loader)
#debug_timing(test_dataset, test_sampler, test_loader) # debug_timing(test_dataset, test_sampler, test_loader)
#debug_show_clouds(training_dataset, training_sampler, training_loader) # debug_show_clouds(training_dataset, training_sampler, training_loader)
print('\nModel Preparation') print("\nModel Preparation")
print('*****************') print("*****************")
# Define network model # Define network model
t1 = time.time() t1 = time.time()
@ -273,20 +279,17 @@ if __name__ == '__main__':
# Define a trainer class # Define a trainer class
trainer = ModelTrainer(net, config, chkp_path=chosen_chkp) trainer = ModelTrainer(net, config, chkp_path=chosen_chkp)
print('Done in {:.1f}s\n'.format(time.time() - t1)) print("Done in {:.1f}s\n".format(time.time() - t1))
print('\nStart training') print("\nStart training")
print('**************') print("**************")
# Training # Training
try: try:
trainer.train(net, training_loader, test_loader, config) trainer.train(net, training_loader, test_loader, config)
except: except:
print('Caught an error') print("Caught an error")
os.kill(os.getpid(), signal.SIGINT) os.kill(os.getpid(), signal.SIGINT)
print('Forcing exit now') print("Forcing exit now")
os.kill(os.getpid(), signal.SIGINT) os.kill(os.getpid(), signal.SIGINT)

View file

@ -40,6 +40,7 @@ from models.architectures import KPFCNN
# \******************/ # \******************/
# #
class NPM3DConfig(Config): class NPM3DConfig(Config):
""" """
Override the parameters you want to modify for this dataset Override the parameters you want to modify for this dataset
@ -50,13 +51,13 @@ class NPM3DConfig(Config):
#################### ####################
# Dataset name # Dataset name
dataset = 'NPM3D' dataset = "NPM3D"
# Number of classes in the dataset (This value is overwritten by dataset class when Initializating dataset). # Number of classes in the dataset (This value is overwritten by dataset class when Initializating dataset).
num_classes = None num_classes = None
# Type of task performed on this dataset (also overwritten) # Type of task performed on this dataset (also overwritten)
dataset_task = '' dataset_task = ""
# Number of CPU threads for the input pipeline # Number of CPU threads for the input pipeline
input_threads = 10 input_threads = 10
@ -66,28 +67,30 @@ class NPM3DConfig(Config):
######################### #########################
# # Define layers # # Define layers
architecture = ['simple', architecture = [
'resnetb', "simple",
'resnetb_strided', "resnetb",
'resnetb', "resnetb_strided",
'resnetb', "resnetb",
'resnetb_strided', "resnetb",
'resnetb', "resnetb_strided",
'resnetb', "resnetb",
'resnetb_strided', "resnetb",
'resnetb', "resnetb_strided",
'resnetb', "resnetb",
'resnetb_strided', "resnetb",
'resnetb', "resnetb_strided",
'resnetb', "resnetb",
'nearest_upsample', "resnetb",
'unary', "nearest_upsample",
'nearest_upsample', "unary",
'unary', "nearest_upsample",
'nearest_upsample', "unary",
'unary', "nearest_upsample",
'nearest_upsample', "unary",
'unary'] "nearest_upsample",
"unary",
]
################### ###################
# KPConv parameters # KPConv parameters
@ -112,10 +115,10 @@ class NPM3DConfig(Config):
KP_extent = 1.2 KP_extent = 1.2
# Behavior of convolutions in ('constant', 'linear', 'gaussian') # Behavior of convolutions in ('constant', 'linear', 'gaussian')
KP_influence = 'linear' KP_influence = "linear"
# Aggregation function of KPConv in ('closest', 'sum') # Aggregation function of KPConv in ('closest', 'sum')
aggregation_mode = 'sum' aggregation_mode = "sum"
# Choice of input features # Choice of input features
first_features_dim = 128 first_features_dim = 128
@ -131,10 +134,10 @@ class NPM3DConfig(Config):
# Deformable offset loss # Deformable offset loss
# 'point2point' fitting geometry by penalizing distance from deform point to input points # 'point2point' fitting geometry by penalizing distance from deform point to input points
# 'point2plane' fitting geometry by penalizing distance from deform point to input point triplet (not implemented) # 'point2plane' fitting geometry by penalizing distance from deform point to input point triplet (not implemented)
deform_fitting_mode = 'point2point' deform_fitting_mode = "point2point"
deform_fitting_power = 1.0 # Multiplier for the fitting/repulsive loss deform_fitting_power = 1.0 # Multiplier for the fitting/repulsive loss
deform_lr_factor = 0.1 # Multiplier for learning rate applied to the deformations deform_lr_factor = 0.1 # Multiplier for learning rate applied to the deformations
repulse_extent = 1.2 # Distance of repulsion for deformed kernel points repulse_extent = 1.2 # Distance of repulsion for deformed kernel points
##################### #####################
# Training parameters # Training parameters
@ -164,7 +167,7 @@ class NPM3DConfig(Config):
# Augmentations # Augmentations
augment_scale_anisotropic = True augment_scale_anisotropic = True
augment_symmetries = [True, False, False] augment_symmetries = [True, False, False]
augment_rotation = 'vertical' augment_rotation = "vertical"
augment_scale_min = 0.9 augment_scale_min = 0.9
augment_scale_max = 1.1 augment_scale_max = 1.1
augment_noise = 0.001 augment_noise = 0.001
@ -174,7 +177,7 @@ class NPM3DConfig(Config):
# > 'none': Each point in the whole batch has the same contribution. # > 'none': Each point in the whole batch has the same contribution.
# > 'class': Each class has the same contribution (points are weighted according to class balance) # > 'class': Each class has the same contribution (points are weighted according to class balance)
# > 'batch': Each cloud in the batch has the same contribution (points are weighted according cloud sizes) # > 'batch': Each cloud in the batch has the same contribution (points are weighted according cloud sizes)
segloss_balance = 'none' segloss_balance = "none"
# Do we nee to save convergence # Do we nee to save convergence
saving = True saving = True
@ -187,17 +190,16 @@ class NPM3DConfig(Config):
# \***************/ # \***************/
# #
if __name__ == '__main__': if __name__ == "__main__":
############################ ############################
# Initialize the environment # Initialize the environment
############################ ############################
# Set which gpu is going to be used # Set which gpu is going to be used
GPU_ID = '0' GPU_ID = "0"
# Set GPU visible device # Set GPU visible device
os.environ['CUDA_VISIBLE_DEVICES'] = GPU_ID os.environ["CUDA_VISIBLE_DEVICES"] = GPU_ID
############### ###############
# Previous chkp # Previous chkp
@ -205,22 +207,23 @@ if __name__ == '__main__':
# Choose here if you want to start training from a previous snapshot (None for new training) # Choose here if you want to start training from a previous snapshot (None for new training)
# previous_training_path = 'Log_2020-03-19_19-53-27' # previous_training_path = 'Log_2020-03-19_19-53-27'
previous_training_path = '' previous_training_path = ""
# Choose index of checkpoint to start from. If None, uses the latest chkp # Choose index of checkpoint to start from. If None, uses the latest chkp
chkp_idx = None chkp_idx = None
if previous_training_path: if previous_training_path:
# Find all snapshot in the chosen training folder # Find all snapshot in the chosen training folder
chkp_path = os.path.join('results', previous_training_path, 'checkpoints') chkp_path = os.path.join("results", previous_training_path, "checkpoints")
chkps = [f for f in os.listdir(chkp_path) if f[:4] == 'chkp'] chkps = [f for f in os.listdir(chkp_path) if f[:4] == "chkp"]
# Find which snapshot to restore # Find which snapshot to restore
if chkp_idx is None: if chkp_idx is None:
chosen_chkp = 'current_chkp.tar' chosen_chkp = "current_chkp.tar"
else: else:
chosen_chkp = np.sort(chkps)[chkp_idx] chosen_chkp = np.sort(chkps)[chkp_idx]
chosen_chkp = os.path.join('results', previous_training_path, 'checkpoints', chosen_chkp) chosen_chkp = os.path.join(
"results", previous_training_path, "checkpoints", chosen_chkp
)
else: else:
chosen_chkp = None chosen_chkp = None
@ -230,13 +233,13 @@ if __name__ == '__main__':
############## ##############
print() print()
print('Data Preparation') print("Data Preparation")
print('****************') print("****************")
# Initialize configuration class # Initialize configuration class
config = NPM3DConfig() config = NPM3DConfig()
if previous_training_path: if previous_training_path:
config.load(os.path.join('results', previous_training_path)) config.load(os.path.join("results", previous_training_path))
config.saving_path = None config.saving_path = None
# Get path from argument if given # Get path from argument if given
@ -244,26 +247,30 @@ if __name__ == '__main__':
config.saving_path = sys.argv[1] config.saving_path = sys.argv[1]
# Initialize datasets # Initialize datasets
training_dataset = NPM3DDataset(config, set='training', use_potentials=True) training_dataset = NPM3DDataset(config, set="training", use_potentials=True)
test_dataset = NPM3DDataset(config, set='validation', use_potentials=True) test_dataset = NPM3DDataset(config, set="validation", use_potentials=True)
# Initialize samplers # Initialize samplers
training_sampler = NPM3DSampler(training_dataset) training_sampler = NPM3DSampler(training_dataset)
test_sampler = NPM3DSampler(test_dataset) test_sampler = NPM3DSampler(test_dataset)
# Initialize the dataloader # Initialize the dataloader
training_loader = DataLoader(training_dataset, training_loader = DataLoader(
batch_size=1, training_dataset,
sampler=training_sampler, batch_size=1,
collate_fn=NPM3DCollate, sampler=training_sampler,
num_workers=config.input_threads, collate_fn=NPM3DCollate,
pin_memory=True) num_workers=config.input_threads,
test_loader = DataLoader(test_dataset, pin_memory=True,
batch_size=1, )
sampler=test_sampler, test_loader = DataLoader(
collate_fn=NPM3DCollate, test_dataset,
num_workers=config.input_threads, batch_size=1,
pin_memory=True) sampler=test_sampler,
collate_fn=NPM3DCollate,
num_workers=config.input_threads,
pin_memory=True,
)
# Calibrate samplers # Calibrate samplers
training_sampler.calibration(training_loader, verbose=True) training_sampler.calibration(training_loader, verbose=True)
@ -274,8 +281,8 @@ if __name__ == '__main__':
# debug_timing(test_dataset, test_loader) # debug_timing(test_dataset, test_loader)
# debug_upsampling(training_dataset, training_loader) # debug_upsampling(training_dataset, training_loader)
print('\nModel Preparation') print("\nModel Preparation")
print('*****************') print("*****************")
# Define network model # Define network model
t1 = time.time() t1 = time.time()
@ -283,25 +290,28 @@ if __name__ == '__main__':
debug = False debug = False
if debug: if debug:
print('\n*************************************\n') print("\n*************************************\n")
print(net) print(net)
print('\n*************************************\n') print("\n*************************************\n")
for param in net.parameters(): for param in net.parameters():
if param.requires_grad: if param.requires_grad:
print(param.shape) print(param.shape)
print('\n*************************************\n') print("\n*************************************\n")
print("Model size %i" % sum(param.numel() for param in net.parameters() if param.requires_grad)) print(
print('\n*************************************\n') "Model size %i"
% sum(param.numel() for param in net.parameters() if param.requires_grad)
)
print("\n*************************************\n")
# Define a trainer class # Define a trainer class
trainer = ModelTrainer(net, config, chkp_path=chosen_chkp) trainer = ModelTrainer(net, config, chkp_path=chosen_chkp)
print('Done in {:.1f}s\n'.format(time.time() - t1)) print("Done in {:.1f}s\n".format(time.time() - t1))
print('\nStart training') print("\nStart training")
print('**************') print("**************")
# Training # Training
trainer.train(net, training_loader, test_loader, config) trainer.train(net, training_loader, test_loader, config)
print('Forcing exit now') print("Forcing exit now")
os.kill(os.getpid(), signal.SIGINT) os.kill(os.getpid(), signal.SIGINT)

View file

@ -40,6 +40,7 @@ from models.architectures import KPFCNN
# \******************/ # \******************/
# #
class S3DISConfig(Config): class S3DISConfig(Config):
""" """
Override the parameters you want to modify for this dataset Override the parameters you want to modify for this dataset
@ -50,13 +51,13 @@ class S3DISConfig(Config):
#################### ####################
# Dataset name # Dataset name
dataset = 'S3DIS' dataset = "S3DIS"
# Number of classes in the dataset (This value is overwritten by dataset class when Initializating dataset). # Number of classes in the dataset (This value is overwritten by dataset class when Initializating dataset).
num_classes = None num_classes = None
# Type of task performed on this dataset (also overwritten) # Type of task performed on this dataset (also overwritten)
dataset_task = '' dataset_task = ""
# Number of CPU threads for the input pipeline # Number of CPU threads for the input pipeline
input_threads = 10 input_threads = 10
@ -66,28 +67,30 @@ class S3DISConfig(Config):
######################### #########################
# # Define layers # # Define layers
architecture = ['simple', architecture = [
'resnetb', "simple",
'resnetb_strided', "resnetb",
'resnetb', "resnetb_strided",
'resnetb', "resnetb",
'resnetb_strided', "resnetb",
'resnetb', "resnetb_strided",
'resnetb', "resnetb",
'resnetb_strided', "resnetb",
'resnetb_deformable', "resnetb_strided",
'resnetb_deformable', "resnetb_deformable",
'resnetb_deformable_strided', "resnetb_deformable",
'resnetb_deformable', "resnetb_deformable_strided",
'resnetb_deformable', "resnetb_deformable",
'nearest_upsample', "resnetb_deformable",
'unary', "nearest_upsample",
'nearest_upsample', "unary",
'unary', "nearest_upsample",
'nearest_upsample', "unary",
'unary', "nearest_upsample",
'nearest_upsample', "unary",
'unary'] "nearest_upsample",
"unary",
]
# Define layers # Define layers
# architecture = ['simple', # architecture = ['simple',
@ -136,10 +139,10 @@ class S3DISConfig(Config):
KP_extent = 1.2 KP_extent = 1.2
# Behavior of convolutions in ('constant', 'linear', 'gaussian') # Behavior of convolutions in ('constant', 'linear', 'gaussian')
KP_influence = 'linear' KP_influence = "linear"
# Aggregation function of KPConv in ('closest', 'sum') # Aggregation function of KPConv in ('closest', 'sum')
aggregation_mode = 'sum' aggregation_mode = "sum"
# Choice of input features # Choice of input features
first_features_dim = 128 first_features_dim = 128
@ -155,10 +158,10 @@ class S3DISConfig(Config):
# Deformable offset loss # Deformable offset loss
# 'point2point' fitting geometry by penalizing distance from deform point to input points # 'point2point' fitting geometry by penalizing distance from deform point to input points
# 'point2plane' fitting geometry by penalizing distance from deform point to input point triplet (not implemented) # 'point2plane' fitting geometry by penalizing distance from deform point to input point triplet (not implemented)
deform_fitting_mode = 'point2point' deform_fitting_mode = "point2point"
deform_fitting_power = 1.0 # Multiplier for the fitting/repulsive loss deform_fitting_power = 1.0 # Multiplier for the fitting/repulsive loss
deform_lr_factor = 0.1 # Multiplier for learning rate applied to the deformations deform_lr_factor = 0.1 # Multiplier for learning rate applied to the deformations
repulse_extent = 1.2 # Distance of repulsion for deformed kernel points repulse_extent = 1.2 # Distance of repulsion for deformed kernel points
##################### #####################
# Training parameters # Training parameters
@ -188,7 +191,7 @@ class S3DISConfig(Config):
# Augmentations # Augmentations
augment_scale_anisotropic = True augment_scale_anisotropic = True
augment_symmetries = [True, False, False] augment_symmetries = [True, False, False]
augment_rotation = 'vertical' augment_rotation = "vertical"
augment_scale_min = 0.9 augment_scale_min = 0.9
augment_scale_max = 1.1 augment_scale_max = 1.1
augment_noise = 0.001 augment_noise = 0.001
@ -198,7 +201,7 @@ class S3DISConfig(Config):
# > 'none': Each point in the whole batch has the same contribution. # > 'none': Each point in the whole batch has the same contribution.
# > 'class': Each class has the same contribution (points are weighted according to class balance) # > 'class': Each class has the same contribution (points are weighted according to class balance)
# > 'batch': Each cloud in the batch has the same contribution (points are weighted according cloud sizes) # > 'batch': Each cloud in the batch has the same contribution (points are weighted according cloud sizes)
segloss_balance = 'none' segloss_balance = "none"
# Do we nee to save convergence # Do we nee to save convergence
saving = True saving = True
@ -211,17 +214,16 @@ class S3DISConfig(Config):
# \***************/ # \***************/
# #
if __name__ == '__main__': if __name__ == "__main__":
############################ ############################
# Initialize the environment # Initialize the environment
############################ ############################
# Set which gpu is going to be used # Set which gpu is going to be used
GPU_ID = '0' GPU_ID = "0"
# Set GPU visible device # Set GPU visible device
os.environ['CUDA_VISIBLE_DEVICES'] = GPU_ID os.environ["CUDA_VISIBLE_DEVICES"] = GPU_ID
############### ###############
# Previous chkp # Previous chkp
@ -229,22 +231,23 @@ if __name__ == '__main__':
# Choose here if you want to start training from a previous snapshot (None for new training) # Choose here if you want to start training from a previous snapshot (None for new training)
# previous_training_path = 'Log_2020-03-19_19-53-27' # previous_training_path = 'Log_2020-03-19_19-53-27'
previous_training_path = '' previous_training_path = ""
# Choose index of checkpoint to start from. If None, uses the latest chkp # Choose index of checkpoint to start from. If None, uses the latest chkp
chkp_idx = None chkp_idx = None
if previous_training_path: if previous_training_path:
# Find all snapshot in the chosen training folder # Find all snapshot in the chosen training folder
chkp_path = os.path.join('results', previous_training_path, 'checkpoints') chkp_path = os.path.join("results", previous_training_path, "checkpoints")
chkps = [f for f in os.listdir(chkp_path) if f[:4] == 'chkp'] chkps = [f for f in os.listdir(chkp_path) if f[:4] == "chkp"]
# Find which snapshot to restore # Find which snapshot to restore
if chkp_idx is None: if chkp_idx is None:
chosen_chkp = 'current_chkp.tar' chosen_chkp = "current_chkp.tar"
else: else:
chosen_chkp = np.sort(chkps)[chkp_idx] chosen_chkp = np.sort(chkps)[chkp_idx]
chosen_chkp = os.path.join('results', previous_training_path, 'checkpoints', chosen_chkp) chosen_chkp = os.path.join(
"results", previous_training_path, "checkpoints", chosen_chkp
)
else: else:
chosen_chkp = None chosen_chkp = None
@ -254,13 +257,13 @@ if __name__ == '__main__':
############## ##############
print() print()
print('Data Preparation') print("Data Preparation")
print('****************') print("****************")
# Initialize configuration class # Initialize configuration class
config = S3DISConfig() config = S3DISConfig()
if previous_training_path: if previous_training_path:
config.load(os.path.join('results', previous_training_path)) config.load(os.path.join("results", previous_training_path))
config.saving_path = None config.saving_path = None
# Get path from argument if given # Get path from argument if given
@ -268,26 +271,30 @@ if __name__ == '__main__':
config.saving_path = sys.argv[1] config.saving_path = sys.argv[1]
# Initialize datasets # Initialize datasets
training_dataset = S3DISDataset(config, set='training', use_potentials=True) training_dataset = S3DISDataset(config, set="training", use_potentials=True)
test_dataset = S3DISDataset(config, set='validation', use_potentials=True) test_dataset = S3DISDataset(config, set="validation", use_potentials=True)
# Initialize samplers # Initialize samplers
training_sampler = S3DISSampler(training_dataset) training_sampler = S3DISSampler(training_dataset)
test_sampler = S3DISSampler(test_dataset) test_sampler = S3DISSampler(test_dataset)
# Initialize the dataloader # Initialize the dataloader
training_loader = DataLoader(training_dataset, training_loader = DataLoader(
batch_size=1, training_dataset,
sampler=training_sampler, batch_size=1,
collate_fn=S3DISCollate, sampler=training_sampler,
num_workers=config.input_threads, collate_fn=S3DISCollate,
pin_memory=True) num_workers=config.input_threads,
test_loader = DataLoader(test_dataset, pin_memory=True,
batch_size=1, )
sampler=test_sampler, test_loader = DataLoader(
collate_fn=S3DISCollate, test_dataset,
num_workers=config.input_threads, batch_size=1,
pin_memory=True) sampler=test_sampler,
collate_fn=S3DISCollate,
num_workers=config.input_threads,
pin_memory=True,
)
# Calibrate samplers # Calibrate samplers
training_sampler.calibration(training_loader, verbose=True) training_sampler.calibration(training_loader, verbose=True)
@ -298,8 +305,8 @@ if __name__ == '__main__':
# debug_timing(test_dataset, test_loader) # debug_timing(test_dataset, test_loader)
# debug_upsampling(training_dataset, training_loader) # debug_upsampling(training_dataset, training_loader)
print('\nModel Preparation') print("\nModel Preparation")
print('*****************') print("*****************")
# Define network model # Define network model
t1 = time.time() t1 = time.time()
@ -307,25 +314,28 @@ if __name__ == '__main__':
debug = False debug = False
if debug: if debug:
print('\n*************************************\n') print("\n*************************************\n")
print(net) print(net)
print('\n*************************************\n') print("\n*************************************\n")
for param in net.parameters(): for param in net.parameters():
if param.requires_grad: if param.requires_grad:
print(param.shape) print(param.shape)
print('\n*************************************\n') print("\n*************************************\n")
print("Model size %i" % sum(param.numel() for param in net.parameters() if param.requires_grad)) print(
print('\n*************************************\n') "Model size %i"
% sum(param.numel() for param in net.parameters() if param.requires_grad)
)
print("\n*************************************\n")
# Define a trainer class # Define a trainer class
trainer = ModelTrainer(net, config, chkp_path=chosen_chkp) trainer = ModelTrainer(net, config, chkp_path=chosen_chkp)
print('Done in {:.1f}s\n'.format(time.time() - t1)) print("Done in {:.1f}s\n".format(time.time() - t1))
print('\nStart training') print("\nStart training")
print('**************') print("**************")
# Training # Training
trainer.train(net, training_loader, test_loader, config) trainer.train(net, training_loader, test_loader, config)
print('Forcing exit now') print("Forcing exit now")
os.kill(os.getpid(), signal.SIGINT) os.kill(os.getpid(), signal.SIGINT)

View file

@ -26,7 +26,6 @@ import signal
import os import os
import numpy as np import numpy as np
import sys import sys
import torch
# Dataset # Dataset
from datasetss.SemanticKitti import * from datasetss.SemanticKitti import *
@ -43,6 +42,7 @@ from models.architectures import KPFCNN
# \******************/ # \******************/
# #
class SemanticKittiConfig(Config): class SemanticKittiConfig(Config):
""" """
Override the parameters you want to modify for this dataset Override the parameters you want to modify for this dataset
@ -53,13 +53,13 @@ class SemanticKittiConfig(Config):
#################### ####################
# Dataset name # Dataset name
dataset = 'SemanticKitti' dataset = "SemanticKitti"
# Number of classes in the dataset (This value is overwritten by dataset class when Initializating dataset). # Number of classes in the dataset (This value is overwritten by dataset class when Initializating dataset).
num_classes = None num_classes = None
# Type of task performed on this dataset (also overwritten) # Type of task performed on this dataset (also overwritten)
dataset_task = '' dataset_task = ""
# Number of CPU threads for the input pipeline # Number of CPU threads for the input pipeline
input_threads = 10 input_threads = 10
@ -69,27 +69,29 @@ class SemanticKittiConfig(Config):
######################### #########################
# Define layers # Define layers
architecture = ['simple', architecture = [
'resnetb', "simple",
'resnetb_strided', "resnetb",
'resnetb', "resnetb_strided",
'resnetb', "resnetb",
'resnetb_strided', "resnetb",
'resnetb', "resnetb_strided",
'resnetb', "resnetb",
'resnetb_strided', "resnetb",
'resnetb', "resnetb_strided",
'resnetb', "resnetb",
'resnetb_strided', "resnetb",
'resnetb', "resnetb_strided",
'nearest_upsample', "resnetb",
'unary', "nearest_upsample",
'nearest_upsample', "unary",
'unary', "nearest_upsample",
'nearest_upsample', "unary",
'unary', "nearest_upsample",
'nearest_upsample', "unary",
'unary'] "nearest_upsample",
"unary",
]
################### ###################
# KPConv parameters # KPConv parameters
@ -122,10 +124,10 @@ class SemanticKittiConfig(Config):
KP_extent = 1.2 KP_extent = 1.2
# Behavior of convolutions in ('constant', 'linear', 'gaussian') # Behavior of convolutions in ('constant', 'linear', 'gaussian')
KP_influence = 'linear' KP_influence = "linear"
# Aggregation function of KPConv in ('closest', 'sum') # Aggregation function of KPConv in ('closest', 'sum')
aggregation_mode = 'sum' aggregation_mode = "sum"
# Choice of input features # Choice of input features
first_features_dim = 128 first_features_dim = 128
@ -141,10 +143,10 @@ class SemanticKittiConfig(Config):
# Deformable offset loss # Deformable offset loss
# 'point2point' fitting geometry by penalizing distance from deform point to input points # 'point2point' fitting geometry by penalizing distance from deform point to input points
# 'point2plane' fitting geometry by penalizing distance from deform point to input point triplet (not implemented) # 'point2plane' fitting geometry by penalizing distance from deform point to input point triplet (not implemented)
deform_fitting_mode = 'point2point' deform_fitting_mode = "point2point"
deform_fitting_power = 1.0 # Multiplier for the fitting/repulsive loss deform_fitting_power = 1.0 # Multiplier for the fitting/repulsive loss
deform_lr_factor = 0.1 # Multiplier for learning rate applied to the deformations deform_lr_factor = 0.1 # Multiplier for learning rate applied to the deformations
repulse_extent = 1.2 # Distance of repulsion for deformed kernel points repulse_extent = 1.2 # Distance of repulsion for deformed kernel points
##################### #####################
# Training parameters # Training parameters
@ -171,7 +173,7 @@ class SemanticKittiConfig(Config):
# Augmentations # Augmentations
augment_scale_anisotropic = True augment_scale_anisotropic = True
augment_symmetries = [True, False, False] augment_symmetries = [True, False, False]
augment_rotation = 'vertical' augment_rotation = "vertical"
augment_scale_min = 0.8 augment_scale_min = 0.8
augment_scale_max = 1.2 augment_scale_max = 1.2
augment_noise = 0.001 augment_noise = 0.001
@ -202,17 +204,16 @@ class SemanticKittiConfig(Config):
# \***************/ # \***************/
# #
if __name__ == '__main__': if __name__ == "__main__":
############################ ############################
# Initialize the environment # Initialize the environment
############################ ############################
# Set which gpu is going to be used # Set which gpu is going to be used
GPU_ID = '0' GPU_ID = "0"
# Set GPU visible device # Set GPU visible device
os.environ['CUDA_VISIBLE_DEVICES'] = GPU_ID os.environ["CUDA_VISIBLE_DEVICES"] = GPU_ID
############### ###############
# Previous chkp # Previous chkp
@ -220,22 +221,23 @@ if __name__ == '__main__':
# Choose here if you want to start training from a previous snapshot (None for new training) # Choose here if you want to start training from a previous snapshot (None for new training)
# previous_training_path = 'Log_2020-03-19_19-53-27' # previous_training_path = 'Log_2020-03-19_19-53-27'
previous_training_path = '' previous_training_path = ""
# Choose index of checkpoint to start from. If None, uses the latest chkp # Choose index of checkpoint to start from. If None, uses the latest chkp
chkp_idx = None chkp_idx = None
if previous_training_path: if previous_training_path:
# Find all snapshot in the chosen training folder # Find all snapshot in the chosen training folder
chkp_path = os.path.join('results', previous_training_path, 'checkpoints') chkp_path = os.path.join("results", previous_training_path, "checkpoints")
chkps = [f for f in os.listdir(chkp_path) if f[:4] == 'chkp'] chkps = [f for f in os.listdir(chkp_path) if f[:4] == "chkp"]
# Find which snapshot to restore # Find which snapshot to restore
if chkp_idx is None: if chkp_idx is None:
chosen_chkp = 'current_chkp.tar' chosen_chkp = "current_chkp.tar"
else: else:
chosen_chkp = np.sort(chkps)[chkp_idx] chosen_chkp = np.sort(chkps)[chkp_idx]
chosen_chkp = os.path.join('results', previous_training_path, 'checkpoints', chosen_chkp) chosen_chkp = os.path.join(
"results", previous_training_path, "checkpoints", chosen_chkp
)
else: else:
chosen_chkp = None chosen_chkp = None
@ -245,13 +247,13 @@ if __name__ == '__main__':
############## ##############
print() print()
print('Data Preparation') print("Data Preparation")
print('****************') print("****************")
# Initialize configuration class # Initialize configuration class
config = SemanticKittiConfig() config = SemanticKittiConfig()
if previous_training_path: if previous_training_path:
config.load(os.path.join('results', previous_training_path)) config.load(os.path.join("results", previous_training_path))
config.saving_path = None config.saving_path = None
# Get path from argument if given # Get path from argument if given
@ -259,28 +261,32 @@ if __name__ == '__main__':
config.saving_path = sys.argv[1] config.saving_path = sys.argv[1]
# Initialize datasets # Initialize datasets
training_dataset = SemanticKittiDataset(config, set='training', training_dataset = SemanticKittiDataset(
balance_classes=True) config, set="training", balance_classes=True
test_dataset = SemanticKittiDataset(config, set='validation', )
balance_classes=False) test_dataset = SemanticKittiDataset(config, set="validation", balance_classes=False)
# Initialize samplers # Initialize samplers
training_sampler = SemanticKittiSampler(training_dataset) training_sampler = SemanticKittiSampler(training_dataset)
test_sampler = SemanticKittiSampler(test_dataset) test_sampler = SemanticKittiSampler(test_dataset)
# Initialize the dataloader # Initialize the dataloader
training_loader = DataLoader(training_dataset, training_loader = DataLoader(
batch_size=1, training_dataset,
sampler=training_sampler, batch_size=1,
collate_fn=SemanticKittiCollate, sampler=training_sampler,
num_workers=config.input_threads, collate_fn=SemanticKittiCollate,
pin_memory=True) num_workers=config.input_threads,
test_loader = DataLoader(test_dataset, pin_memory=True,
batch_size=1, )
sampler=test_sampler, test_loader = DataLoader(
collate_fn=SemanticKittiCollate, test_dataset,
num_workers=config.input_threads, batch_size=1,
pin_memory=True) sampler=test_sampler,
collate_fn=SemanticKittiCollate,
num_workers=config.input_threads,
pin_memory=True,
)
# Calibrate max_in_point value # Calibrate max_in_point value
training_sampler.calib_max_in(config, training_loader, verbose=True) training_sampler.calib_max_in(config, training_loader, verbose=True)
@ -294,8 +300,8 @@ if __name__ == '__main__':
# debug_timing(test_dataset, test_loader) # debug_timing(test_dataset, test_loader)
# debug_class_w(training_dataset, training_loader) # debug_class_w(training_dataset, training_loader)
print('\nModel Preparation') print("\nModel Preparation")
print('*****************') print("*****************")
# Define network model # Define network model
t1 = time.time() t1 = time.time()
@ -303,25 +309,28 @@ if __name__ == '__main__':
debug = False debug = False
if debug: if debug:
print('\n*************************************\n') print("\n*************************************\n")
print(net) print(net)
print('\n*************************************\n') print("\n*************************************\n")
for param in net.parameters(): for param in net.parameters():
if param.requires_grad: if param.requires_grad:
print(param.shape) print(param.shape)
print('\n*************************************\n') print("\n*************************************\n")
print("Model size %i" % sum(param.numel() for param in net.parameters() if param.requires_grad)) print(
print('\n*************************************\n') "Model size %i"
% sum(param.numel() for param in net.parameters() if param.requires_grad)
)
print("\n*************************************\n")
# Define a trainer class # Define a trainer class
trainer = ModelTrainer(net, config, chkp_path=chosen_chkp) trainer = ModelTrainer(net, config, chkp_path=chosen_chkp)
print('Done in {:.1f}s\n'.format(time.time() - t1)) print("Done in {:.1f}s\n".format(time.time() - t1))
print('\nStart training') print("\nStart training")
print('**************') print("**************")
# Training # Training
trainer.train(net, training_loader, test_loader, config) trainer.train(net, training_loader, test_loader, config)
print('Forcing exit now') print("Forcing exit now")
os.kill(os.getpid(), signal.SIGINT) os.kill(os.getpid(), signal.SIGINT)

View file

@ -21,14 +21,14 @@ import numpy as np
# Colors for printing # Colors for printing
class bcolors: class bcolors:
HEADER = '\033[95m' HEADER = "\033[95m"
OKBLUE = '\033[94m' OKBLUE = "\033[94m"
OKGREEN = '\033[92m' OKGREEN = "\033[92m"
WARNING = '\033[93m' WARNING = "\033[93m"
FAIL = '\033[91m' FAIL = "\033[91m"
ENDC = '\033[0m' ENDC = "\033[0m"
BOLD = '\033[1m' BOLD = "\033[1m"
UNDERLINE = '\033[4m' UNDERLINE = "\033[4m"
class Config: class Config:
@ -41,10 +41,10 @@ class Config:
################## ##################
# Dataset name # Dataset name
dataset = '' dataset = ""
# Type of network model # Type of network model
dataset_task = '' dataset_task = ""
# Number of classes in the dataset # Number of classes in the dataset
num_classes = 0 num_classes = 0
@ -69,8 +69,8 @@ class Config:
architecture = [] architecture = []
# Decide the mode of equivariance and invariance # Decide the mode of equivariance and invariance
equivar_mode = '' equivar_mode = ""
invar_mode = '' invar_mode = ""
# Dimension of the first feature maps # Dimension of the first feature maps
first_features_dim = 64 first_features_dim = 64
@ -102,14 +102,14 @@ class Config:
KP_extent = 1.0 KP_extent = 1.0
# Influence function when d < KP_extent. ('constant', 'linear', 'gaussian') When d > KP_extent, always zero # Influence function when d < KP_extent. ('constant', 'linear', 'gaussian') When d > KP_extent, always zero
KP_influence = 'linear' KP_influence = "linear"
# Aggregation function of KPConv in ('closest', 'sum') # Aggregation function of KPConv in ('closest', 'sum')
# Decide if you sum all kernel point influences, or if you only take the influence of the closest KP # Decide if you sum all kernel point influences, or if you only take the influence of the closest KP
aggregation_mode = 'sum' aggregation_mode = "sum"
# Fixed points in the kernel : 'none', 'center' or 'verticals' # Fixed points in the kernel : 'none', 'center' or 'verticals'
fixed_kernel_points = 'center' fixed_kernel_points = "center"
# Use modulateion in deformable convolutions # Use modulateion in deformable convolutions
modulated = False modulated = False
@ -141,12 +141,12 @@ class Config:
augment_scale_min = 0.9 augment_scale_min = 0.9
augment_scale_max = 1.1 augment_scale_max = 1.1
augment_symmetries = [False, False, False] augment_symmetries = [False, False, False]
augment_rotation = 'vertical' augment_rotation = "vertical"
augment_noise = 0.005 augment_noise = 0.005
augment_color = 0.7 augment_color = 0.7
# Augment with occlusions (not implemented yet) # Augment with occlusions (not implemented yet)
augment_occlusion = 'none' augment_occlusion = "none"
augment_occlusion_ratio = 0.2 augment_occlusion_ratio = 0.2
augment_occlusion_num = 1 augment_occlusion_num = 1
@ -154,7 +154,7 @@ class Config:
weight_decay = 1e-3 weight_decay = 1e-3
# The way we balance segmentation loss DEPRECATED # The way we balance segmentation loss DEPRECATED
segloss_balance = 'none' segloss_balance = "none"
# Choose weights for class (used in segmentation loss). Empty list for no weights # Choose weights for class (used in segmentation loss). Empty list for no weights
class_w = [] class_w = []
@ -162,10 +162,10 @@ class Config:
# Deformable offset loss # Deformable offset loss
# 'point2point' fitting geometry by penalizing distance from deform point to input points # 'point2point' fitting geometry by penalizing distance from deform point to input points
# 'point2plane' fitting geometry by penalizing distance from deform point to input point triplet (not implemented) # 'point2plane' fitting geometry by penalizing distance from deform point to input point triplet (not implemented)
deform_fitting_mode = 'point2point' deform_fitting_mode = "point2point"
deform_fitting_power = 1.0 # Multiplier for the fitting/repulsive loss deform_fitting_power = 1.0 # Multiplier for the fitting/repulsive loss
deform_lr_factor = 0.1 # Multiplier for learning rate applied to the deformations deform_lr_factor = 0.1 # Multiplier for learning rate applied to the deformations
repulse_extent = 1.0 # Distance of repulsion for deformed kernel points repulse_extent = 1.0 # Distance of repulsion for deformed kernel points
# Number of batch # Number of batch
batch_num = 10 batch_num = 10
@ -193,7 +193,16 @@ class Config:
""" """
# Number of layers # Number of layers
self.num_layers = len([block for block in self.architecture if 'pool' in block or 'strided' in block]) + 1 self.num_layers = (
len(
[
block
for block in self.architecture
if "pool" in block or "strided" in block
]
)
+ 1
)
################### ###################
# Deform layer list # Deform layer list
@ -206,9 +215,13 @@ class Config:
self.deform_layers = [] self.deform_layers = []
arch = self.architecture arch = self.architecture
for block_i, block in enumerate(arch): for block_i, block in enumerate(arch):
# Get all blocks of the layer # Get all blocks of the layer
if not ('pool' in block or 'strided' in block or 'global' in block or 'upsample' in block): if not (
"pool" in block
or "strided" in block
or "global" in block
or "upsample" in block
):
layer_blocks += [block] layer_blocks += [block]
continue continue
@ -217,50 +230,51 @@ class Config:
deform_layer = False deform_layer = False
if layer_blocks: if layer_blocks:
if np.any(['deformable' in blck for blck in layer_blocks]): if np.any(["deformable" in blck for blck in layer_blocks]):
deform_layer = True deform_layer = True
if 'pool' in block or 'strided' in block: if "pool" in block or "strided" in block:
if 'deformable' in block: if "deformable" in block:
deform_layer = True deform_layer = True
self.deform_layers += [deform_layer] self.deform_layers += [deform_layer]
layer_blocks = [] layer_blocks = []
# Stop when meeting a global pooling or upsampling # Stop when meeting a global pooling or upsampling
if 'global' in block or 'upsample' in block: if "global" in block or "upsample" in block:
break break
def load(self, path): def load(self, path):
filename = join(path, "parameters.txt")
filename = join(path, 'parameters.txt') with open(filename, "r") as f:
with open(filename, 'r') as f:
lines = f.readlines() lines = f.readlines()
# Class variable dictionary # Class variable dictionary
for line in lines: for line in lines:
line_info = line.split() line_info = line.split()
if len(line_info) > 2 and line_info[0] != '#': if len(line_info) > 2 and line_info[0] != "#":
if line_info[2] == "None":
if line_info[2] == 'None':
setattr(self, line_info[0], None) setattr(self, line_info[0], None)
elif line_info[0] == 'lr_decay_epochs': elif line_info[0] == "lr_decay_epochs":
self.lr_decays = {int(b.split(':')[0]): float(b.split(':')[1]) for b in line_info[2:]} self.lr_decays = {
int(b.split(":")[0]): float(b.split(":")[1])
for b in line_info[2:]
}
elif line_info[0] == 'architecture': elif line_info[0] == "architecture":
self.architecture = [b for b in line_info[2:]] self.architecture = [b for b in line_info[2:]]
elif line_info[0] == 'augment_symmetries': elif line_info[0] == "augment_symmetries":
self.augment_symmetries = [bool(int(b)) for b in line_info[2:]] self.augment_symmetries = [bool(int(b)) for b in line_info[2:]]
elif line_info[0] == 'num_classes': elif line_info[0] == "num_classes":
if len(line_info) > 3: if len(line_info) > 3:
self.num_classes = [int(c) for c in line_info[2:]] self.num_classes = [int(c) for c in line_info[2:]]
else: else:
self.num_classes = int(line_info[2]) self.num_classes = int(line_info[2])
elif line_info[0] == 'class_w': elif line_info[0] == "class_w":
self.class_w = [float(w) for w in line_info[2:]] self.class_w = [float(w) for w in line_info[2:]]
elif hasattr(self, line_info[0]): elif hasattr(self, line_info[0]):
@ -275,108 +289,132 @@ class Config:
self.__init__() self.__init__()
def save(self): def save(self):
with open(join(self.saving_path, "parameters.txt"), "w") as text_file:
with open(join(self.saving_path, 'parameters.txt'), "w") as text_file: text_file.write("# -----------------------------------#\n")
text_file.write("# Parameters of the training session #\n")
text_file.write('# -----------------------------------#\n') text_file.write("# -----------------------------------#\n\n")
text_file.write('# Parameters of the training session #\n')
text_file.write('# -----------------------------------#\n\n')
# Input parameters # Input parameters
text_file.write('# Input parameters\n') text_file.write("# Input parameters\n")
text_file.write('# ****************\n\n') text_file.write("# ****************\n\n")
text_file.write('dataset = {:s}\n'.format(self.dataset)) text_file.write("dataset = {:s}\n".format(self.dataset))
text_file.write('dataset_task = {:s}\n'.format(self.dataset_task)) text_file.write("dataset_task = {:s}\n".format(self.dataset_task))
if type(self.num_classes) is list: if type(self.num_classes) is list:
text_file.write('num_classes =') text_file.write("num_classes =")
for n in self.num_classes: for n in self.num_classes:
text_file.write(' {:d}'.format(n)) text_file.write(" {:d}".format(n))
text_file.write('\n') text_file.write("\n")
else: else:
text_file.write('num_classes = {:d}\n'.format(self.num_classes)) text_file.write("num_classes = {:d}\n".format(self.num_classes))
text_file.write('in_points_dim = {:d}\n'.format(self.in_points_dim)) text_file.write("in_points_dim = {:d}\n".format(self.in_points_dim))
text_file.write('in_features_dim = {:d}\n'.format(self.in_features_dim)) text_file.write("in_features_dim = {:d}\n".format(self.in_features_dim))
text_file.write('in_radius = {:.6f}\n'.format(self.in_radius)) text_file.write("in_radius = {:.6f}\n".format(self.in_radius))
text_file.write('input_threads = {:d}\n\n'.format(self.input_threads)) text_file.write("input_threads = {:d}\n\n".format(self.input_threads))
# Model parameters # Model parameters
text_file.write('# Model parameters\n') text_file.write("# Model parameters\n")
text_file.write('# ****************\n\n') text_file.write("# ****************\n\n")
text_file.write('architecture =') text_file.write("architecture =")
for a in self.architecture: for a in self.architecture:
text_file.write(' {:s}'.format(a)) text_file.write(" {:s}".format(a))
text_file.write('\n') text_file.write("\n")
text_file.write('equivar_mode = {:s}\n'.format(self.equivar_mode)) text_file.write("equivar_mode = {:s}\n".format(self.equivar_mode))
text_file.write('invar_mode = {:s}\n'.format(self.invar_mode)) text_file.write("invar_mode = {:s}\n".format(self.invar_mode))
text_file.write('num_layers = {:d}\n'.format(self.num_layers)) text_file.write("num_layers = {:d}\n".format(self.num_layers))
text_file.write('first_features_dim = {:d}\n'.format(self.first_features_dim)) text_file.write(
text_file.write('use_batch_norm = {:d}\n'.format(int(self.use_batch_norm))) "first_features_dim = {:d}\n".format(self.first_features_dim)
text_file.write('batch_norm_momentum = {:.6f}\n\n'.format(self.batch_norm_momentum)) )
text_file.write('segmentation_ratio = {:.6f}\n\n'.format(self.segmentation_ratio)) text_file.write("use_batch_norm = {:d}\n".format(int(self.use_batch_norm)))
text_file.write(
"batch_norm_momentum = {:.6f}\n\n".format(self.batch_norm_momentum)
)
text_file.write(
"segmentation_ratio = {:.6f}\n\n".format(self.segmentation_ratio)
)
# KPConv parameters # KPConv parameters
text_file.write('# KPConv parameters\n') text_file.write("# KPConv parameters\n")
text_file.write('# *****************\n\n') text_file.write("# *****************\n\n")
text_file.write('first_subsampling_dl = {:.6f}\n'.format(self.first_subsampling_dl)) text_file.write(
text_file.write('num_kernel_points = {:d}\n'.format(self.num_kernel_points)) "first_subsampling_dl = {:.6f}\n".format(self.first_subsampling_dl)
text_file.write('conv_radius = {:.6f}\n'.format(self.conv_radius)) )
text_file.write('deform_radius = {:.6f}\n'.format(self.deform_radius)) text_file.write("num_kernel_points = {:d}\n".format(self.num_kernel_points))
text_file.write('fixed_kernel_points = {:s}\n'.format(self.fixed_kernel_points)) text_file.write("conv_radius = {:.6f}\n".format(self.conv_radius))
text_file.write('KP_extent = {:.6f}\n'.format(self.KP_extent)) text_file.write("deform_radius = {:.6f}\n".format(self.deform_radius))
text_file.write('KP_influence = {:s}\n'.format(self.KP_influence)) text_file.write(
text_file.write('aggregation_mode = {:s}\n'.format(self.aggregation_mode)) "fixed_kernel_points = {:s}\n".format(self.fixed_kernel_points)
text_file.write('modulated = {:d}\n'.format(int(self.modulated))) )
text_file.write('n_frames = {:d}\n'.format(self.n_frames)) text_file.write("KP_extent = {:.6f}\n".format(self.KP_extent))
text_file.write('max_in_points = {:d}\n\n'.format(self.max_in_points)) text_file.write("KP_influence = {:s}\n".format(self.KP_influence))
text_file.write('max_val_points = {:d}\n\n'.format(self.max_val_points)) text_file.write("aggregation_mode = {:s}\n".format(self.aggregation_mode))
text_file.write('val_radius = {:.6f}\n\n'.format(self.val_radius)) text_file.write("modulated = {:d}\n".format(int(self.modulated)))
text_file.write("n_frames = {:d}\n".format(self.n_frames))
text_file.write("max_in_points = {:d}\n\n".format(self.max_in_points))
text_file.write("max_val_points = {:d}\n\n".format(self.max_val_points))
text_file.write("val_radius = {:.6f}\n\n".format(self.val_radius))
# Training parameters # Training parameters
text_file.write('# Training parameters\n') text_file.write("# Training parameters\n")
text_file.write('# *******************\n\n') text_file.write("# *******************\n\n")
text_file.write('learning_rate = {:f}\n'.format(self.learning_rate)) text_file.write("learning_rate = {:f}\n".format(self.learning_rate))
text_file.write('momentum = {:f}\n'.format(self.momentum)) text_file.write("momentum = {:f}\n".format(self.momentum))
text_file.write('lr_decay_epochs =') text_file.write("lr_decay_epochs =")
for e, d in self.lr_decays.items(): for e, d in self.lr_decays.items():
text_file.write(' {:d}:{:f}'.format(e, d)) text_file.write(" {:d}:{:f}".format(e, d))
text_file.write('\n') text_file.write("\n")
text_file.write('grad_clip_norm = {:f}\n\n'.format(self.grad_clip_norm)) text_file.write("grad_clip_norm = {:f}\n\n".format(self.grad_clip_norm))
text_file.write("augment_symmetries =")
text_file.write('augment_symmetries =')
for a in self.augment_symmetries: for a in self.augment_symmetries:
text_file.write(' {:d}'.format(int(a))) text_file.write(" {:d}".format(int(a)))
text_file.write('\n') text_file.write("\n")
text_file.write('augment_rotation = {:s}\n'.format(self.augment_rotation)) text_file.write("augment_rotation = {:s}\n".format(self.augment_rotation))
text_file.write('augment_noise = {:f}\n'.format(self.augment_noise)) text_file.write("augment_noise = {:f}\n".format(self.augment_noise))
text_file.write('augment_occlusion = {:s}\n'.format(self.augment_occlusion)) text_file.write("augment_occlusion = {:s}\n".format(self.augment_occlusion))
text_file.write('augment_occlusion_ratio = {:.6f}\n'.format(self.augment_occlusion_ratio)) text_file.write(
text_file.write('augment_occlusion_num = {:d}\n'.format(self.augment_occlusion_num)) "augment_occlusion_ratio = {:.6f}\n".format(
text_file.write('augment_scale_anisotropic = {:d}\n'.format(int(self.augment_scale_anisotropic))) self.augment_occlusion_ratio
text_file.write('augment_scale_min = {:.6f}\n'.format(self.augment_scale_min)) )
text_file.write('augment_scale_max = {:.6f}\n'.format(self.augment_scale_max)) )
text_file.write('augment_color = {:.6f}\n\n'.format(self.augment_color)) text_file.write(
"augment_occlusion_num = {:d}\n".format(self.augment_occlusion_num)
)
text_file.write(
"augment_scale_anisotropic = {:d}\n".format(
int(self.augment_scale_anisotropic)
)
)
text_file.write(
"augment_scale_min = {:.6f}\n".format(self.augment_scale_min)
)
text_file.write(
"augment_scale_max = {:.6f}\n".format(self.augment_scale_max)
)
text_file.write("augment_color = {:.6f}\n\n".format(self.augment_color))
text_file.write('weight_decay = {:f}\n'.format(self.weight_decay)) text_file.write("weight_decay = {:f}\n".format(self.weight_decay))
text_file.write('segloss_balance = {:s}\n'.format(self.segloss_balance)) text_file.write("segloss_balance = {:s}\n".format(self.segloss_balance))
text_file.write('class_w =') text_file.write("class_w =")
for a in self.class_w: for a in self.class_w:
text_file.write(' {:.6f}'.format(a)) text_file.write(" {:.6f}".format(a))
text_file.write('\n') text_file.write("\n")
text_file.write('deform_fitting_mode = {:s}\n'.format(self.deform_fitting_mode)) text_file.write(
text_file.write('deform_fitting_power = {:.6f}\n'.format(self.deform_fitting_power)) "deform_fitting_mode = {:s}\n".format(self.deform_fitting_mode)
text_file.write('deform_lr_factor = {:.6f}\n'.format(self.deform_lr_factor)) )
text_file.write('repulse_extent = {:.6f}\n'.format(self.repulse_extent)) text_file.write(
text_file.write('batch_num = {:d}\n'.format(self.batch_num)) "deform_fitting_power = {:.6f}\n".format(self.deform_fitting_power)
text_file.write('val_batch_num = {:d}\n'.format(self.val_batch_num)) )
text_file.write('max_epoch = {:d}\n'.format(self.max_epoch)) text_file.write("deform_lr_factor = {:.6f}\n".format(self.deform_lr_factor))
text_file.write("repulse_extent = {:.6f}\n".format(self.repulse_extent))
text_file.write("batch_num = {:d}\n".format(self.batch_num))
text_file.write("val_batch_num = {:d}\n".format(self.val_batch_num))
text_file.write("max_epoch = {:d}\n".format(self.max_epoch))
if self.epoch_steps is None: if self.epoch_steps is None:
text_file.write('epoch_steps = None\n') text_file.write("epoch_steps = None\n")
else: else:
text_file.write('epoch_steps = {:d}\n'.format(self.epoch_steps)) text_file.write("epoch_steps = {:d}\n".format(self.epoch_steps))
text_file.write('validation_size = {:d}\n'.format(self.validation_size)) text_file.write("validation_size = {:d}\n".format(self.validation_size))
text_file.write('checkpoint_gap = {:d}\n'.format(self.checkpoint_gap)) text_file.write("checkpoint_gap = {:d}\n".format(self.checkpoint_gap))

View file

@ -23,20 +23,12 @@
# Basic libs # Basic libs
import torch
import numpy as np import numpy as np
from sklearn.neighbors import KDTree
from os import makedirs, remove, rename, listdir
from os.path import exists, join
import time
import sys
# PLY reader # PLY reader
from utils.ply import write_ply, read_ply
# Configuration class # Configuration class
from utils.config import Config
def show_ModelNet_models(all_points): def show_ModelNet_models(all_points):
@ -47,7 +39,7 @@ def show_ModelNet_models(all_points):
########################### ###########################
# Create figure for features # Create figure for features
fig1 = mlab.figure('Models', bgcolor=(1, 1, 1), size=(1000, 800)) fig1 = mlab.figure("Models", bgcolor=(1, 1, 1), size=(1000, 800))
fig1.scene.parallel_projection = False fig1.scene.parallel_projection = False
# Indices # Indices
@ -55,7 +47,6 @@ def show_ModelNet_models(all_points):
file_i = 0 file_i = 0
def update_scene(): def update_scene():
# clear figure # clear figure
mlab.clf(fig1) mlab.clf(fig1)
@ -66,17 +57,19 @@ def show_ModelNet_models(all_points):
points = (points * 1.5 + np.array([1.0, 1.0, 1.0])) * 50.0 points = (points * 1.5 + np.array([1.0, 1.0, 1.0])) * 50.0
# Show point clouds colorized with activations # Show point clouds colorized with activations
activations = mlab.points3d(points[:, 0], mlab.points3d(
points[:, 1], points[:, 0],
points[:, 2], points[:, 1],
points[:, 2], points[:, 2],
scale_factor=3.0, points[:, 2],
scale_mode='none', scale_factor=3.0,
figure=fig1) scale_mode="none",
figure=fig1,
)
# New title # New title
mlab.title(str(file_i), color=(0, 0, 0), size=0.3, height=0.01) mlab.title(str(file_i), color=(0, 0, 0), size=0.3, height=0.01)
text = '<--- (press g for previous)' + 50 * ' ' + '(press h for next) --->' text = "<--- (press g for previous)" + 50 * " " + "(press h for next) --->"
mlab.text(0.01, 0.01, text, color=(0, 0, 0), width=0.98) mlab.text(0.01, 0.01, text, color=(0, 0, 0), width=0.98)
mlab.orientation_axes() mlab.orientation_axes()
@ -85,13 +78,11 @@ def show_ModelNet_models(all_points):
def keyboard_callback(vtk_obj, event): def keyboard_callback(vtk_obj, event):
global file_i global file_i
if vtk_obj.GetKeyCode() in ['g', 'G']: if vtk_obj.GetKeyCode() in ["g", "G"]:
file_i = (file_i - 1) % len(all_points) file_i = (file_i - 1) % len(all_points)
update_scene() update_scene()
elif vtk_obj.GetKeyCode() in ['h', 'H']: elif vtk_obj.GetKeyCode() in ["h", "H"]:
file_i = (file_i + 1) % len(all_points) file_i = (file_i + 1) % len(all_points)
update_scene() update_scene()
@ -99,7 +90,7 @@ def show_ModelNet_models(all_points):
# Draw a first plot # Draw a first plot
update_scene() update_scene()
fig1.scene.interactor.add_observer('KeyPressEvent', keyboard_callback) fig1.scene.interactor.add_observer("KeyPressEvent", keyboard_callback)
mlab.show() mlab.show()
@ -111,7 +102,7 @@ def show_ModelNet_examples(clouds, cloud_normals=None, cloud_labels=None):
########################### ###########################
# Create figure for features # Create figure for features
fig1 = mlab.figure('Models', bgcolor=(1, 1, 1), size=(1000, 800)) fig1 = mlab.figure("Models", bgcolor=(1, 1, 1), size=(1000, 800))
fig1.scene.parallel_projection = False fig1.scene.parallel_projection = False
if cloud_labels is None: if cloud_labels is None:
@ -123,7 +114,6 @@ def show_ModelNet_examples(clouds, cloud_normals=None, cloud_labels=None):
show_normals = True show_normals = True
def update_scene(): def update_scene():
# clear figure # clear figure
mlab.clf(fig1) mlab.clf(fig1)
@ -139,27 +129,31 @@ def show_ModelNet_examples(clouds, cloud_normals=None, cloud_labels=None):
points = (points * 1.5 + np.array([1.0, 1.0, 1.0])) * 50.0 points = (points * 1.5 + np.array([1.0, 1.0, 1.0])) * 50.0
# Show point clouds colorized with activations # Show point clouds colorized with activations
activations = mlab.points3d(points[:, 0], mlab.points3d(
points[:, 1], points[:, 0],
points[:, 2], points[:, 1],
labels, points[:, 2],
scale_factor=3.0, labels,
scale_mode='none', scale_factor=3.0,
figure=fig1) scale_mode="none",
figure=fig1,
)
if normals is not None and show_normals: if normals is not None and show_normals:
activations = mlab.quiver3d(points[:, 0], mlab.quiver3d(
points[:, 1], points[:, 0],
points[:, 2], points[:, 1],
normals[:, 0], points[:, 2],
normals[:, 1], normals[:, 0],
normals[:, 2], normals[:, 1],
scale_factor=10.0, normals[:, 2],
scale_mode='none', scale_factor=10.0,
figure=fig1) scale_mode="none",
figure=fig1,
)
# New title # New title
mlab.title(str(file_i), color=(0, 0, 0), size=0.3, height=0.01) mlab.title(str(file_i), color=(0, 0, 0), size=0.3, height=0.01)
text = '<--- (press g for previous)' + 50 * ' ' + '(press h for next) --->' text = "<--- (press g for previous)" + 50 * " " + "(press h for next) --->"
mlab.text(0.01, 0.01, text, color=(0, 0, 0), width=0.98) mlab.text(0.01, 0.01, text, color=(0, 0, 0), width=0.98)
mlab.orientation_axes() mlab.orientation_axes()
@ -168,15 +162,15 @@ def show_ModelNet_examples(clouds, cloud_normals=None, cloud_labels=None):
def keyboard_callback(vtk_obj, event): def keyboard_callback(vtk_obj, event):
global file_i, show_normals global file_i, show_normals
if vtk_obj.GetKeyCode() in ['g', 'G']: if vtk_obj.GetKeyCode() in ["g", "G"]:
file_i = (file_i - 1) % len(clouds) file_i = (file_i - 1) % len(clouds)
update_scene() update_scene()
elif vtk_obj.GetKeyCode() in ['h', 'H']: elif vtk_obj.GetKeyCode() in ["h", "H"]:
file_i = (file_i + 1) % len(clouds) file_i = (file_i + 1) % len(clouds)
update_scene() update_scene()
elif vtk_obj.GetKeyCode() in ['n', 'N']: elif vtk_obj.GetKeyCode() in ["n", "N"]:
show_normals = not show_normals show_normals = not show_normals
update_scene() update_scene()
@ -184,7 +178,7 @@ def show_ModelNet_examples(clouds, cloud_normals=None, cloud_labels=None):
# Draw a first plot # Draw a first plot
update_scene() update_scene()
fig1.scene.interactor.add_observer('KeyPressEvent', keyboard_callback) fig1.scene.interactor.add_observer("KeyPressEvent", keyboard_callback)
mlab.show() mlab.show()
@ -196,7 +190,7 @@ def show_neighbors(query, supports, neighbors):
########################### ###########################
# Create figure for features # Create figure for features
fig1 = mlab.figure('Models', bgcolor=(1, 1, 1), size=(1000, 800)) fig1 = mlab.figure("Models", bgcolor=(1, 1, 1), size=(1000, 800))
fig1.scene.parallel_projection = False fig1.scene.parallel_projection = False
# Indices # Indices
@ -204,7 +198,6 @@ def show_neighbors(query, supports, neighbors):
file_i = 0 file_i = 0
def update_scene(): def update_scene():
# clear figure # clear figure
mlab.clf(fig1) mlab.clf(fig1)
@ -212,36 +205,40 @@ def show_neighbors(query, supports, neighbors):
p1 = (query * 1.5 + np.array([1.0, 1.0, 1.0])) * 50.0 p1 = (query * 1.5 + np.array([1.0, 1.0, 1.0])) * 50.0
p2 = (supports * 1.5 + np.array([1.0, 1.0, 1.0])) * 50.0 p2 = (supports * 1.5 + np.array([1.0, 1.0, 1.0])) * 50.0
l1 = p1[:, 2]*0 l1 = p1[:, 2] * 0
l1[file_i] = 1 l1[file_i] = 1
l2 = p2[:, 2]*0 + 2 l2 = p2[:, 2] * 0 + 2
l2[neighbors[file_i]] = 3 l2[neighbors[file_i]] = 3
# Show point clouds colorized with activations # Show point clouds colorized with activations
activations = mlab.points3d(p1[:, 0], mlab.points3d(
p1[:, 1], p1[:, 0],
p1[:, 2], p1[:, 1],
l1, p1[:, 2],
scale_factor=2.0, l1,
scale_mode='none', scale_factor=2.0,
vmin=0.0, scale_mode="none",
vmax=3.0, vmin=0.0,
figure=fig1) vmax=3.0,
figure=fig1,
)
activations = mlab.points3d(p2[:, 0], mlab.points3d(
p2[:, 1], p2[:, 0],
p2[:, 2], p2[:, 1],
l2, p2[:, 2],
scale_factor=3.0, l2,
scale_mode='none', scale_factor=3.0,
vmin=0.0, scale_mode="none",
vmax=3.0, vmin=0.0,
figure=fig1) vmax=3.0,
figure=fig1,
)
# New title # New title
mlab.title(str(file_i), color=(0, 0, 0), size=0.3, height=0.01) mlab.title(str(file_i), color=(0, 0, 0), size=0.3, height=0.01)
text = '<--- (press g for previous)' + 50 * ' ' + '(press h for next) --->' text = "<--- (press g for previous)" + 50 * " " + "(press h for next) --->"
mlab.text(0.01, 0.01, text, color=(0, 0, 0), width=0.98) mlab.text(0.01, 0.01, text, color=(0, 0, 0), width=0.98)
mlab.orientation_axes() mlab.orientation_axes()
@ -250,13 +247,11 @@ def show_neighbors(query, supports, neighbors):
def keyboard_callback(vtk_obj, event): def keyboard_callback(vtk_obj, event):
global file_i global file_i
if vtk_obj.GetKeyCode() in ['g', 'G']: if vtk_obj.GetKeyCode() in ["g", "G"]:
file_i = (file_i - 1) % len(query) file_i = (file_i - 1) % len(query)
update_scene() update_scene()
elif vtk_obj.GetKeyCode() in ['h', 'H']: elif vtk_obj.GetKeyCode() in ["h", "H"]:
file_i = (file_i + 1) % len(query) file_i = (file_i + 1) % len(query)
update_scene() update_scene()
@ -264,7 +259,7 @@ def show_neighbors(query, supports, neighbors):
# Draw a first plot # Draw a first plot
update_scene() update_scene()
fig1.scene.interactor.add_observer('KeyPressEvent', keyboard_callback) fig1.scene.interactor.add_observer("KeyPressEvent", keyboard_callback)
mlab.show() mlab.show()
@ -276,7 +271,7 @@ def show_input_batch(batch):
########################### ###########################
# Create figure for features # Create figure for features
fig1 = mlab.figure('Input', bgcolor=(1, 1, 1), size=(1000, 800)) fig1 = mlab.figure("Input", bgcolor=(1, 1, 1), size=(1000, 800))
fig1.scene.parallel_projection = False fig1.scene.parallel_projection = False
# Unstack batch # Unstack batch
@ -292,18 +287,20 @@ def show_input_batch(batch):
show_pools = False show_pools = False
def update_scene(): def update_scene():
# clear figure # clear figure
mlab.clf(fig1) mlab.clf(fig1)
# Rescale points for visu # Rescale points for visu
p = (all_points[l_i][b_i] * 1.5 + np.array([1.0, 1.0, 1.0])) * 50.0 p = (all_points[l_i][b_i] * 1.5 + np.array([1.0, 1.0, 1.0])) * 50.0
labels = p[:, 2]*0 labels = p[:, 2] * 0
if show_pools: if show_pools:
p2 = (all_points[l_i+1][b_i][neighb_i:neighb_i+1] * 1.5 + np.array([1.0, 1.0, 1.0])) * 50.0 p2 = (
all_points[l_i + 1][b_i][neighb_i : neighb_i + 1] * 1.5
+ np.array([1.0, 1.0, 1.0])
) * 50.0
p = np.vstack((p, p2)) p = np.vstack((p, p2))
labels = np.hstack((labels, np.ones((1,), dtype=np.int32)*3)) labels = np.hstack((labels, np.ones((1,), dtype=np.int32) * 3))
pool_inds = all_pools[l_i][b_i][neighb_i] pool_inds = all_pools[l_i][b_i][neighb_i]
pool_inds = pool_inds[pool_inds >= 0] pool_inds = pool_inds[pool_inds >= 0]
labels[pool_inds] = 2 labels[pool_inds] = 2
@ -314,16 +311,17 @@ def show_input_batch(batch):
labels[neighb_i] = 3 labels[neighb_i] = 3
# Show point clouds colorized with activations # Show point clouds colorized with activations
mlab.points3d(p[:, 0], mlab.points3d(
p[:, 1], p[:, 0],
p[:, 2], p[:, 1],
labels, p[:, 2],
scale_factor=2.0, labels,
scale_mode='none', scale_factor=2.0,
vmin=0.0, scale_mode="none",
vmax=3.0, vmin=0.0,
figure=fig1) vmax=3.0,
figure=fig1,
)
""" """
mlab.points3d(p[-2:, 0], mlab.points3d(p[-2:, 0],
@ -350,12 +348,16 @@ def show_input_batch(batch):
""" """
# New title # New title
title_str = '<([) b_i={:d} (])> <(,) l_i={:d} (.)> <(N) n_i={:d} (M)>'.format(b_i, l_i, neighb_i) title_str = (
"<([) b_i={:d} (])> <(,) l_i={:d} (.)> <(N) n_i={:d} (M)>".format(
b_i, l_i, neighb_i
)
)
mlab.title(title_str, color=(0, 0, 0), size=0.3, height=0.90) mlab.title(title_str, color=(0, 0, 0), size=0.3, height=0.90)
if show_pools: if show_pools:
text = 'pools (switch with G)' text = "pools (switch with G)"
else: else:
text = 'neighbors (switch with G)' text = "neighbors (switch with G)"
mlab.text(0.01, 0.01, text, color=(0, 0, 0), width=0.3) mlab.text(0.01, 0.01, text, color=(0, 0, 0), width=0.3)
mlab.orientation_axes() mlab.orientation_axes()
@ -364,17 +366,17 @@ def show_input_batch(batch):
def keyboard_callback(vtk_obj, event): def keyboard_callback(vtk_obj, event):
global b_i, l_i, neighb_i, show_pools global b_i, l_i, neighb_i, show_pools
if vtk_obj.GetKeyCode() in ['[', '{']: if vtk_obj.GetKeyCode() in ["[", "{"]:
b_i = (b_i - 1) % len(all_points[l_i]) b_i = (b_i - 1) % len(all_points[l_i])
neighb_i = 0 neighb_i = 0
update_scene() update_scene()
elif vtk_obj.GetKeyCode() in [']', '}']: elif vtk_obj.GetKeyCode() in ["]", "}"]:
b_i = (b_i + 1) % len(all_points[l_i]) b_i = (b_i + 1) % len(all_points[l_i])
neighb_i = 0 neighb_i = 0
update_scene() update_scene()
elif vtk_obj.GetKeyCode() in [',', '<']: elif vtk_obj.GetKeyCode() in [",", "<"]:
if show_pools: if show_pools:
l_i = (l_i - 1) % (len(all_points) - 1) l_i = (l_i - 1) % (len(all_points) - 1)
else: else:
@ -382,7 +384,7 @@ def show_input_batch(batch):
neighb_i = 0 neighb_i = 0
update_scene() update_scene()
elif vtk_obj.GetKeyCode() in ['.', '>']: elif vtk_obj.GetKeyCode() in [".", ">"]:
if show_pools: if show_pools:
l_i = (l_i + 1) % (len(all_points) - 1) l_i = (l_i + 1) % (len(all_points) - 1)
else: else:
@ -390,15 +392,15 @@ def show_input_batch(batch):
neighb_i = 0 neighb_i = 0
update_scene() update_scene()
elif vtk_obj.GetKeyCode() in ['n', 'N']: elif vtk_obj.GetKeyCode() in ["n", "N"]:
neighb_i = (neighb_i - 1) % all_points[l_i][b_i].shape[0] neighb_i = (neighb_i - 1) % all_points[l_i][b_i].shape[0]
update_scene() update_scene()
elif vtk_obj.GetKeyCode() in ['m', 'M']: elif vtk_obj.GetKeyCode() in ["m", "M"]:
neighb_i = (neighb_i + 1) % all_points[l_i][b_i].shape[0] neighb_i = (neighb_i + 1) % all_points[l_i][b_i].shape[0]
update_scene() update_scene()
elif vtk_obj.GetKeyCode() in ['g', 'G']: elif vtk_obj.GetKeyCode() in ["g", "G"]:
if l_i < len(all_points) - 1: if l_i < len(all_points) - 1:
show_pools = not show_pools show_pools = not show_pools
neighb_i = 0 neighb_i = 0
@ -408,29 +410,5 @@ def show_input_batch(batch):
# Draw a first plot # Draw a first plot
update_scene() update_scene()
fig1.scene.interactor.add_observer('KeyPressEvent', keyboard_callback) fig1.scene.interactor.add_observer("KeyPressEvent", keyboard_callback)
mlab.show() mlab.show()

View file

@ -32,6 +32,7 @@ import numpy as np
# \***************/ # \***************/
# #
def fast_confusion(true, pred, label_values=None): def fast_confusion(true, pred, label_values=None):
""" """
Fast confusion matrix (100x faster than Scikit learn). But only works if labels are la Fast confusion matrix (100x faster than Scikit learn). But only works if labels are la
@ -45,13 +46,25 @@ def fast_confusion(true, pred, label_values=None):
true = np.squeeze(true) true = np.squeeze(true)
pred = np.squeeze(pred) pred = np.squeeze(pred)
if len(true.shape) != 1: if len(true.shape) != 1:
raise ValueError('Truth values are stored in a {:d}D array instead of 1D array'. format(len(true.shape))) raise ValueError(
"Truth values are stored in a {:d}D array instead of 1D array".format(
len(true.shape)
)
)
if len(pred.shape) != 1: if len(pred.shape) != 1:
raise ValueError('Prediction values are stored in a {:d}D array instead of 1D array'. format(len(pred.shape))) raise ValueError(
"Prediction values are stored in a {:d}D array instead of 1D array".format(
len(pred.shape)
)
)
if true.dtype not in [np.int32, np.int64]: if true.dtype not in [np.int32, np.int64]:
raise ValueError('Truth values are {:s} instead of int32 or int64'.format(true.dtype)) raise ValueError(
"Truth values are {:s} instead of int32 or int64".format(true.dtype)
)
if pred.dtype not in [np.int32, np.int64]: if pred.dtype not in [np.int32, np.int64]:
raise ValueError('Prediction values are {:s} instead of int32 or int64'.format(pred.dtype)) raise ValueError(
"Prediction values are {:s} instead of int32 or int64".format(pred.dtype)
)
true = true.astype(np.int32) true = true.astype(np.int32)
pred = pred.astype(np.int32) pred = pred.astype(np.int32)
@ -62,9 +75,13 @@ def fast_confusion(true, pred, label_values=None):
else: else:
# Ensure they are good if given # Ensure they are good if given
if label_values.dtype not in [np.int32, np.int64]: if label_values.dtype not in [np.int32, np.int64]:
raise ValueError('label values are {:s} instead of int32 or int64'.format(label_values.dtype)) raise ValueError(
"label values are {:s} instead of int32 or int64".format(
label_values.dtype
)
)
if len(np.unique(label_values)) < len(label_values): if len(np.unique(label_values)) < len(label_values):
raise ValueError('Given labels are not unique') raise ValueError("Given labels are not unique")
# Sort labels # Sort labels
label_values = np.sort(label_values) label_values = np.sort(label_values)
@ -72,33 +89,32 @@ def fast_confusion(true, pred, label_values=None):
# Get the number of classes # Get the number of classes
num_classes = len(label_values) num_classes = len(label_values)
#print(num_classes) # print(num_classes)
#print(label_values) # print(label_values)
#print(np.max(true)) # print(np.max(true))
#print(np.max(pred)) # print(np.max(pred))
#print(np.max(true * num_classes + pred)) # print(np.max(true * num_classes + pred))
# Start confusion computations # Start confusion computations
if label_values[0] == 0 and label_values[-1] == num_classes - 1: if label_values[0] == 0 and label_values[-1] == num_classes - 1:
# Vectorized confusion # Vectorized confusion
vec_conf = np.bincount(true * num_classes + pred) vec_conf = np.bincount(true * num_classes + pred)
# Add possible missing values due to classes not being in pred or true # Add possible missing values due to classes not being in pred or true
#print(vec_conf.shape) # print(vec_conf.shape)
if vec_conf.shape[0] < num_classes ** 2: if vec_conf.shape[0] < num_classes**2:
vec_conf = np.pad(vec_conf, (0, num_classes ** 2 - vec_conf.shape[0]), 'constant') vec_conf = np.pad(
#print(vec_conf.shape) vec_conf, (0, num_classes**2 - vec_conf.shape[0]), "constant"
)
# print(vec_conf.shape)
# Reshape confusion in a matrix # Reshape confusion in a matrix
return vec_conf.reshape((num_classes, num_classes)) return vec_conf.reshape((num_classes, num_classes))
else: else:
# Ensure no negative classes # Ensure no negative classes
if label_values[0] < 0: if label_values[0] < 0:
raise ValueError('Unsupported negative classes') raise ValueError("Unsupported negative classes")
# Get the data in [0,num_classes[ # Get the data in [0,num_classes[
label_map = np.zeros((label_values[-1] + 1,), dtype=np.int32) label_map = np.zeros((label_values[-1] + 1,), dtype=np.int32)
@ -112,12 +128,15 @@ def fast_confusion(true, pred, label_values=None):
vec_conf = np.bincount(true * num_classes + pred) vec_conf = np.bincount(true * num_classes + pred)
# Add possible missing values due to classes not being in pred or true # Add possible missing values due to classes not being in pred or true
if vec_conf.shape[0] < num_classes ** 2: if vec_conf.shape[0] < num_classes**2:
vec_conf = np.pad(vec_conf, (0, num_classes ** 2 - vec_conf.shape[0]), 'constant') vec_conf = np.pad(
vec_conf, (0, num_classes**2 - vec_conf.shape[0]), "constant"
)
# Reshape confusion in a matrix # Reshape confusion in a matrix
return vec_conf.reshape((num_classes, num_classes)) return vec_conf.reshape((num_classes, num_classes))
def metrics(confusions, ignore_unclassified=False): def metrics(confusions, ignore_unclassified=False):
""" """
Computes different metrics from confusion matrices. Computes different metrics from confusion matrices.
@ -128,7 +147,7 @@ def metrics(confusions, ignore_unclassified=False):
""" """
# If the first class (often "unclassified") should be ignored, erase it from the confusion. # If the first class (often "unclassified") should be ignored, erase it from the confusion.
if (ignore_unclassified): if ignore_unclassified:
confusions[..., 0, :] = 0 confusions[..., 0, :] = 0
confusions[..., :, 0] = 0 confusions[..., :, 0] = 0
@ -176,7 +195,9 @@ def smooth_metrics(confusions, smooth_n=0, ignore_unclassified=False):
for epoch in range(confusions.shape[-3]): for epoch in range(confusions.shape[-3]):
i0 = max(epoch - smooth_n, 0) i0 = max(epoch - smooth_n, 0)
i1 = min(epoch + smooth_n + 1, confusions.shape[-3]) i1 = min(epoch + smooth_n + 1, confusions.shape[-3])
smoothed_confusions[..., epoch, :, :] = np.sum(confusions[..., i0:i1, :, :], axis=-3) smoothed_confusions[..., epoch, :, :] = np.sum(
confusions[..., i0:i1, :, :], axis=-3
)
# Compute TP, FP, FN. This assume that the second to last axis counts the truths (like the first axis of a # Compute TP, FP, FN. This assume that the second to last axis counts the truths (like the first axis of a
# confusion matrix), and that the last axis counts the predictions (like the second axis of a confusion matrix) # confusion matrix), and that the last axis counts the predictions (like the second axis of a confusion matrix)

View file

@ -28,28 +28,29 @@ import sys
# Define PLY types # Define PLY types
ply_dtypes = dict([ ply_dtypes = dict(
(b'int8', 'i1'), [
(b'char', 'i1'), (b"int8", "i1"),
(b'uint8', 'u1'), (b"char", "i1"),
(b'uchar', 'u1'), (b"uint8", "u1"),
(b'int16', 'i2'), (b"uchar", "u1"),
(b'short', 'i2'), (b"int16", "i2"),
(b'uint16', 'u2'), (b"short", "i2"),
(b'ushort', 'u2'), (b"uint16", "u2"),
(b'int32', 'i4'), (b"ushort", "u2"),
(b'int', 'i4'), (b"int32", "i4"),
(b'uint32', 'u4'), (b"int", "i4"),
(b'uint', 'u4'), (b"uint32", "u4"),
(b'float32', 'f4'), (b"uint", "u4"),
(b'float', 'f4'), (b"float32", "f4"),
(b'float64', 'f8'), (b"float", "f4"),
(b'double', 'f8') (b"float64", "f8"),
]) (b"double", "f8"),
]
)
# Numpy reader format # Numpy reader format
valid_formats = {'ascii': '', 'binary_big_endian': '>', valid_formats = {"ascii": "", "binary_big_endian": ">", "binary_little_endian": "<"}
'binary_little_endian': '<'}
# ---------------------------------------------------------------------------------------------------------------------- # ----------------------------------------------------------------------------------------------------------------------
@ -65,14 +66,14 @@ def parse_header(plyfile, ext):
properties = [] properties = []
num_points = None num_points = None
while b'end_header' not in line and line != b'': while b"end_header" not in line and line != b"":
line = plyfile.readline() line = plyfile.readline()
if b'element' in line: if b"element" in line:
line = line.split() line = line.split()
num_points = int(line[2]) num_points = int(line[2])
elif b'property' in line: elif b"property" in line:
line = line.split() line = line.split()
properties.append((line[2].decode(), ext + ply_dtypes[line[1]])) properties.append((line[2].decode(), ext + ply_dtypes[line[1]]))
@ -87,28 +88,27 @@ def parse_mesh_header(plyfile, ext):
num_faces = None num_faces = None
current_element = None current_element = None
while b"end_header" not in line and line != b"":
while b'end_header' not in line and line != b'':
line = plyfile.readline() line = plyfile.readline()
# Find point element # Find point element
if b'element vertex' in line: if b"element vertex" in line:
current_element = 'vertex' current_element = "vertex"
line = line.split() line = line.split()
num_points = int(line[2]) num_points = int(line[2])
elif b'element face' in line: elif b"element face" in line:
current_element = 'face' current_element = "face"
line = line.split() line = line.split()
num_faces = int(line[2]) num_faces = int(line[2])
elif b'property' in line: elif b"property" in line:
if current_element == 'vertex': if current_element == "vertex":
line = line.split() line = line.split()
vertex_properties.append((line[2].decode(), ext + ply_dtypes[line[1]])) vertex_properties.append((line[2].decode(), ext + ply_dtypes[line[1]]))
elif current_element == 'vertex': elif current_element == "vertex":
if not line.startswith('property list uchar int'): if not line.startswith("property list uchar int"):
raise ValueError('Unsupported faces property : ' + line) raise ValueError("Unsupported faces property : " + line)
return num_points, num_faces, vertex_properties return num_points, num_faces, vertex_properties
@ -140,7 +140,7 @@ def read_ply(filename, triangular_mesh=False):
>>> data = read_ply('example.ply') >>> data = read_ply('example.ply')
>>> values = data['values'] >>> values = data['values']
array([0, 0, 1, 1, 0]) array([0, 0, 1, 1, 0])
>>> points = np.vstack((data['x'], data['y'], data['z'])).T >>> points = np.vstack((data['x'], data['y'], data['z'])).T
array([[ 0.466 0.595 0.324] array([[ 0.466 0.595 0.324]
[ 0.538 0.407 0.654] [ 0.538 0.407 0.654]
@ -150,24 +150,21 @@ def read_ply(filename, triangular_mesh=False):
""" """
with open(filename, 'rb') as plyfile: with open(filename, "rb") as plyfile:
# Check if the file start with ply # Check if the file start with ply
if b'ply' not in plyfile.readline(): if b"ply" not in plyfile.readline():
raise ValueError('The file does not start whith the word ply') raise ValueError("The file does not start whith the word ply")
# get binary_little/big or ascii # get binary_little/big or ascii
fmt = plyfile.readline().split()[1].decode() fmt = plyfile.readline().split()[1].decode()
if fmt == "ascii": if fmt == "ascii":
raise ValueError('The file is not binary') raise ValueError("The file is not binary")
# get extension for building the numpy dtypes # get extension for building the numpy dtypes
ext = valid_formats[fmt] ext = valid_formats[fmt]
# PointCloud reader vs mesh reader # PointCloud reader vs mesh reader
if triangular_mesh: if triangular_mesh:
# Parse header # Parse header
num_points, num_faces, properties = parse_mesh_header(plyfile, ext) num_points, num_faces, properties = parse_mesh_header(plyfile, ext)
@ -175,18 +172,19 @@ def read_ply(filename, triangular_mesh=False):
vertex_data = np.fromfile(plyfile, dtype=properties, count=num_points) vertex_data = np.fromfile(plyfile, dtype=properties, count=num_points)
# Get face data # Get face data
face_properties = [('k', ext + 'u1'), face_properties = [
('v1', ext + 'i4'), ("k", ext + "u1"),
('v2', ext + 'i4'), ("v1", ext + "i4"),
('v3', ext + 'i4')] ("v2", ext + "i4"),
("v3", ext + "i4"),
]
faces_data = np.fromfile(plyfile, dtype=face_properties, count=num_faces) faces_data = np.fromfile(plyfile, dtype=face_properties, count=num_faces)
# Return vertex data and concatenated faces # Return vertex data and concatenated faces
faces = np.vstack((faces_data['v1'], faces_data['v2'], faces_data['v3'])).T faces = np.vstack((faces_data["v1"], faces_data["v2"], faces_data["v3"])).T
data = [vertex_data, faces] data = [vertex_data, faces]
else: else:
# Parse header # Parse header
num_points, properties = parse_header(plyfile, ext) num_points, properties = parse_header(plyfile, ext)
@ -197,18 +195,17 @@ def read_ply(filename, triangular_mesh=False):
def header_properties(field_list, field_names): def header_properties(field_list, field_names):
# List of lines to write # List of lines to write
lines = [] lines = []
# First line describing element vertex # First line describing element vertex
lines.append('element vertex %d' % field_list[0].shape[0]) lines.append("element vertex %d" % field_list[0].shape[0])
# Properties lines # Properties lines
i = 0 i = 0
for fields in field_list: for fields in field_list:
for field in fields.T: for field in fields.T:
lines.append('property %s %s' % (field.dtype.name, field_names[i])) lines.append("property %s %s" % (field.dtype.name, field_names[i]))
i += 1 i += 1
return lines return lines
@ -221,16 +218,16 @@ def write_ply(filename, field_list, field_names, triangular_faces=None):
Parameters Parameters
---------- ----------
filename : string filename : string
the name of the file to which the data is saved. A '.ply' extension will be appended to the the name of the file to which the data is saved. A '.ply' extension will be appended to the
file name if it does no already have one. file name if it does no already have one.
field_list : list, tuple, numpy array field_list : list, tuple, numpy array
the fields to be saved in the ply file. Either a numpy array, a list of numpy arrays or a the fields to be saved in the ply file. Either a numpy array, a list of numpy arrays or a
tuple of numpy arrays. Each 1D numpy array and each column of 2D numpy arrays are considered tuple of numpy arrays. Each 1D numpy array and each column of 2D numpy arrays are considered
as one field. as one field.
field_names : list field_names : list
the name of each fields as a list of strings. Has to be the same length as the number of the name of each fields as a list of strings. Has to be the same length as the number of
fields. fields.
Examples Examples
@ -248,57 +245,59 @@ def write_ply(filename, field_list, field_names, triangular_faces=None):
""" """
# Format list input to the right form # Format list input to the right form
field_list = list(field_list) if (type(field_list) == list or type(field_list) == tuple) else list((field_list,)) field_list = (
list(field_list)
if (type(field_list) == list or type(field_list) == tuple)
else list((field_list,))
)
for i, field in enumerate(field_list): for i, field in enumerate(field_list):
if field.ndim < 2: if field.ndim < 2:
field_list[i] = field.reshape(-1, 1) field_list[i] = field.reshape(-1, 1)
if field.ndim > 2: if field.ndim > 2:
print('fields have more than 2 dimensions') print("fields have more than 2 dimensions")
return False return False
# check all fields have the same number of data # check all fields have the same number of data
n_points = [field.shape[0] for field in field_list] n_points = [field.shape[0] for field in field_list]
if not np.all(np.equal(n_points, n_points[0])): if not np.all(np.equal(n_points, n_points[0])):
print('wrong field dimensions') print("wrong field dimensions")
return False return False
# Check if field_names and field_list have same nb of column # Check if field_names and field_list have same nb of column
n_fields = np.sum([field.shape[1] for field in field_list]) n_fields = np.sum([field.shape[1] for field in field_list])
if (n_fields != len(field_names)): if n_fields != len(field_names):
print('wrong number of field names') print("wrong number of field names")
return False return False
# Add extension if not there # Add extension if not there
if not filename.endswith('.ply'): if not filename.endswith(".ply"):
filename += '.ply' filename += ".ply"
# open in text mode to write the header # open in text mode to write the header
with open(filename, 'w') as plyfile: with open(filename, "w") as plyfile:
# First magical word # First magical word
header = ['ply'] header = ["ply"]
# Encoding format # Encoding format
header.append('format binary_' + sys.byteorder + '_endian 1.0') header.append("format binary_" + sys.byteorder + "_endian 1.0")
# Points properties description # Points properties description
header.extend(header_properties(field_list, field_names)) header.extend(header_properties(field_list, field_names))
# Add faces if needded # Add faces if needded
if triangular_faces is not None: if triangular_faces is not None:
header.append('element face {:d}'.format(triangular_faces.shape[0])) header.append("element face {:d}".format(triangular_faces.shape[0]))
header.append('property list uchar int vertex_indices') header.append("property list uchar int vertex_indices")
# End of header # End of header
header.append('end_header') header.append("end_header")
# Write all lines # Write all lines
for line in header: for line in header:
plyfile.write("%s\n" % line) plyfile.write("%s\n" % line)
# open in binary/append to use tofile # open in binary/append to use tofile
with open(filename, 'ab') as plyfile: with open(filename, "ab") as plyfile:
# Create a structured array # Create a structured array
i = 0 i = 0
type_list = [] type_list = []
@ -317,19 +316,19 @@ def write_ply(filename, field_list, field_names, triangular_faces=None):
if triangular_faces is not None: if triangular_faces is not None:
triangular_faces = triangular_faces.astype(np.int32) triangular_faces = triangular_faces.astype(np.int32)
type_list = [('k', 'uint8')] + [(str(ind), 'int32') for ind in range(3)] type_list = [("k", "uint8")] + [(str(ind), "int32") for ind in range(3)]
data = np.empty(triangular_faces.shape[0], dtype=type_list) data = np.empty(triangular_faces.shape[0], dtype=type_list)
data['k'] = np.full((triangular_faces.shape[0],), 3, dtype=np.uint8) data["k"] = np.full((triangular_faces.shape[0],), 3, dtype=np.uint8)
data['0'] = triangular_faces[:, 0] data["0"] = triangular_faces[:, 0]
data['1'] = triangular_faces[:, 1] data["1"] = triangular_faces[:, 1]
data['2'] = triangular_faces[:, 2] data["2"] = triangular_faces[:, 2]
data.tofile(plyfile) data.tofile(plyfile)
return True return True
def describe_element(name, df): def describe_element(name, df):
""" Takes the columns of the dataframe and builds a ply-like description """Takes the columns of the dataframe and builds a ply-like description
Parameters Parameters
---------- ----------
@ -340,16 +339,16 @@ def describe_element(name, df):
------- -------
element: list[str] element: list[str]
""" """
property_formats = {'f': 'float', 'u': 'uchar', 'i': 'int'} property_formats = {"f": "float", "u": "uchar", "i": "int"}
element = ['element ' + name + ' ' + str(len(df))] element = ["element " + name + " " + str(len(df))]
if name == 'face': if name == "face":
element.append("property list uchar int points_indices") element.append("property list uchar int points_indices")
else: else:
for i in range(len(df.columns)): for i in range(len(df.columns)):
# get first letter of dtype to infer format # get first letter of dtype to infer format
f = property_formats[str(df.dtypes[i])[0]] f = property_formats[str(df.dtypes[i])[0]]
element.append('property ' + f + ' ' + df.columns.values[i]) element.append("property " + f + " " + df.columns.values[i])
return element return element

View file

@ -24,22 +24,18 @@
# Basic libs # Basic libs
import torch import torch
import torch.nn as nn
import numpy as np import numpy as np
from os import makedirs, listdir from os import makedirs
from os.path import exists, join from os.path import exists, join
import time import time
import json
from sklearn.neighbors import KDTree
# PLY reader # PLY reader
from utils.ply import read_ply, write_ply from utils.ply import write_ply
# Metrics # Metrics
from utils.metrics import IoU_from_confusions, fast_confusion from utils.metrics import IoU_from_confusions, fast_confusion
from sklearn.metrics import confusion_matrix
#from utils.visualizer import show_ModelNet_models # from utils.visualizer import show_ModelNet_models
# ---------------------------------------------------------------------------------------------------------------------- # ----------------------------------------------------------------------------------------------------------------------
# #
@ -49,12 +45,10 @@ from sklearn.metrics import confusion_matrix
class ModelTester: class ModelTester:
# Initialization methods # Initialization methods
# ------------------------------------------------------------------------------------------------------------------ # ------------------------------------------------------------------------------------------------------------------
def __init__(self, net, chkp_path=None, on_gpu=True): def __init__(self, net, chkp_path=None, on_gpu=True):
############ ############
# Parameters # Parameters
############ ############
@ -71,8 +65,8 @@ class ModelTester:
########################## ##########################
checkpoint = torch.load(chkp_path) checkpoint = torch.load(chkp_path)
net.load_state_dict(checkpoint['model_state_dict']) net.load_state_dict(checkpoint["model_state_dict"])
self.epoch = checkpoint['epoch'] self.epoch = checkpoint["epoch"]
net.eval() net.eval()
print("Model and training state restored.") print("Model and training state restored.")
@ -82,7 +76,6 @@ class ModelTester:
# ------------------------------------------------------------------------------------------------------------------ # ------------------------------------------------------------------------------------------------------------------
def classification_test(self, net, test_loader, config, num_votes=100, debug=False): def classification_test(self, net, test_loader, config, num_votes=100, debug=False):
############ ############
# Initialize # Initialize
############ ############
@ -91,7 +84,6 @@ class ModelTester:
softmax = torch.nn.Softmax(1) softmax = torch.nn.Softmax(1)
# Number of classes including ignored labels # Number of classes including ignored labels
nc_tot = test_loader.dataset.num_classes
# Number of classes predicted by the model # Number of classes predicted by the model
nc_model = config.num_classes nc_model = config.num_classes
@ -104,7 +96,6 @@ class ModelTester:
mean_dt = np.zeros(1) mean_dt = np.zeros(1)
last_display = time.time() last_display = time.time()
while np.min(self.test_counts) < num_votes: while np.min(self.test_counts) < num_votes:
# Run model on all test examples # Run model on all test examples
# ****************************** # ******************************
@ -115,12 +106,11 @@ class ModelTester:
# Start validation loop # Start validation loop
for batch in test_loader: for batch in test_loader:
# New time # New time
t = t[-1:] t = t[-1:]
t += [time.time()] t += [time.time()]
if 'cuda' in self.device.type: if "cuda" in self.device.type:
batch.to(self.device) batch.to(self.device)
# Forward pass # Forward pass
@ -131,7 +121,7 @@ class ModelTester:
targets += [batch.labels.cpu().numpy()] targets += [batch.labels.cpu().numpy()]
obj_inds += [batch.model_inds.cpu().numpy()] obj_inds += [batch.model_inds.cpu().numpy()]
if 'cuda' in self.device.type: if "cuda" in self.device.type:
torch.cuda.synchronize(self.device) torch.cuda.synchronize(self.device)
# Average timing # Average timing
@ -141,22 +131,28 @@ class ModelTester:
# Display # Display
if (t[-1] - last_display) > 1.0: if (t[-1] - last_display) > 1.0:
last_display = t[-1] last_display = t[-1]
message = 'Test vote {:.0f} : {:.1f}% (timings : {:4.2f} {:4.2f})' message = "Test vote {:.0f} : {:.1f}% (timings : {:4.2f} {:4.2f})"
print(message.format(np.min(self.test_counts), print(
100 * len(obj_inds) / config.validation_size, message.format(
1000 * (mean_dt[0]), np.min(self.test_counts),
1000 * (mean_dt[1]))) 100 * len(obj_inds) / config.validation_size,
1000 * (mean_dt[0]),
1000 * (mean_dt[1]),
)
)
# Stack all validation predictions # Stack all validation predictions
probs = np.vstack(probs) probs = np.vstack(probs)
targets = np.hstack(targets) targets = np.hstack(targets)
obj_inds = np.hstack(obj_inds) obj_inds = np.hstack(obj_inds)
if np.any(test_loader.dataset.input_labels[obj_inds] != targets): if np.any(test_loader.dataset.input_labels[obj_inds] != targets):
raise ValueError('wrong object indices') raise ValueError("wrong object indices")
# Compute incremental average (predictions are always ordered) # Compute incremental average (predictions are always ordered)
self.test_counts[obj_inds] += 1 self.test_counts[obj_inds] += 1
self.test_probs[obj_inds] += (probs - self.test_probs[obj_inds]) / (self.test_counts[obj_inds]) self.test_probs[obj_inds] += (probs - self.test_probs[obj_inds]) / (
self.test_counts[obj_inds]
)
# Save/Display temporary results # Save/Display temporary results
# ****************************** # ******************************
@ -164,16 +160,20 @@ class ModelTester:
test_labels = np.array(test_loader.dataset.label_values) test_labels = np.array(test_loader.dataset.label_values)
# Compute classification results # Compute classification results
C1 = fast_confusion(test_loader.dataset.input_labels, C1 = fast_confusion(
np.argmax(self.test_probs, axis=1), test_loader.dataset.input_labels,
test_labels) np.argmax(self.test_probs, axis=1),
test_labels,
)
ACC = 100 * np.sum(np.diag(C1)) / (np.sum(C1) + 1e-6) ACC = 100 * np.sum(np.diag(C1)) / (np.sum(C1) + 1e-6)
print('Test Accuracy = {:.1f}%'.format(ACC)) print("Test Accuracy = {:.1f}%".format(ACC))
return return
def cloud_segmentation_test(self, net, test_loader, config, num_votes=100, debug=False): def cloud_segmentation_test(
self, net, test_loader, config, num_votes=100, debug=False
):
""" """
Test method for cloud segmentation models Test method for cloud segmentation models
""" """
@ -188,36 +188,41 @@ class ModelTester:
softmax = torch.nn.Softmax(1) softmax = torch.nn.Softmax(1)
# Number of classes including ignored labels # Number of classes including ignored labels
nc_tot = test_loader.dataset.num_classes
# Number of classes predicted by the model # Number of classes predicted by the model
nc_model = config.num_classes nc_model = config.num_classes
# Initiate global prediction over test clouds # Initiate global prediction over test clouds
self.test_probs = [np.zeros((l.shape[0], nc_model)) for l in test_loader.dataset.input_labels] self.test_probs = [
np.zeros((l.shape[0], nc_model)) for l in test_loader.dataset.input_labels
]
# Test saving path # Test saving path
if config.saving: if config.saving:
test_path = join('test', config.saving_path.split('/')[-1]) test_path = join("test", config.saving_path.split("/")[-1])
if not exists(test_path): if not exists(test_path):
makedirs(test_path) makedirs(test_path)
if not exists(join(test_path, 'predictions')): if not exists(join(test_path, "predictions")):
makedirs(join(test_path, 'predictions')) makedirs(join(test_path, "predictions"))
if not exists(join(test_path, 'probs')): if not exists(join(test_path, "probs")):
makedirs(join(test_path, 'probs')) makedirs(join(test_path, "probs"))
if not exists(join(test_path, 'potentials')): if not exists(join(test_path, "potentials")):
makedirs(join(test_path, 'potentials')) makedirs(join(test_path, "potentials"))
else: else:
test_path = None test_path = None
# If on validation directly compute score # If on validation directly compute score
if test_loader.dataset.set == 'validation': if test_loader.dataset.set == "validation":
val_proportions = np.zeros(nc_model, dtype=np.float32) val_proportions = np.zeros(nc_model, dtype=np.float32)
i = 0 i = 0
for label_value in test_loader.dataset.label_values: for label_value in test_loader.dataset.label_values:
if label_value not in test_loader.dataset.ignored_labels: if label_value not in test_loader.dataset.ignored_labels:
val_proportions[i] = np.sum([np.sum(labels == label_value) val_proportions[i] = np.sum(
for labels in test_loader.dataset.validation_labels]) [
np.sum(labels == label_value)
for labels in test_loader.dataset.validation_labels
]
)
i += 1 i += 1
else: else:
val_proportions = None val_proportions = None
@ -235,17 +240,16 @@ class ModelTester:
# Start test loop # Start test loop
while True: while True:
print('Initialize workers') print("Initialize workers")
for i, batch in enumerate(test_loader): for i, batch in enumerate(test_loader):
# New time # New time
t = t[-1:] t = t[-1:]
t += [time.time()] t += [time.time()]
if i == 0: if i == 0:
print('Done in {:.1f}s'.format(t[1] - t[0])) print("Done in {:.1f}s".format(t[1] - t[0]))
if 'cuda' in self.device.type: if "cuda" in self.device.type:
batch.to(self.device) batch.to(self.device)
# Forward pass # Forward pass
@ -266,20 +270,25 @@ class ModelTester:
i0 = 0 i0 = 0
for b_i, length in enumerate(lengths): for b_i, length in enumerate(lengths):
# Get prediction # Get prediction
points = s_points[i0:i0 + length] points = s_points[i0 : i0 + length]
probs = stacked_probs[i0:i0 + length] probs = stacked_probs[i0 : i0 + length]
inds = in_inds[i0:i0 + length] inds = in_inds[i0 : i0 + length]
c_i = cloud_inds[b_i] c_i = cloud_inds[b_i]
if 0 < test_radius_ratio < 1: if 0 < test_radius_ratio < 1:
mask = np.sum(points ** 2, axis=1) < (test_radius_ratio * config.in_radius) ** 2 mask = (
np.sum(points**2, axis=1)
< (test_radius_ratio * config.in_radius) ** 2
)
inds = inds[mask] inds = inds[mask]
probs = probs[mask] probs = probs[mask]
# Update current probs in whole cloud # Update current probs in whole cloud
self.test_probs[c_i][inds] = test_smooth * self.test_probs[c_i][inds] + (1 - test_smooth) * probs self.test_probs[c_i][inds] = (
test_smooth * self.test_probs[c_i][inds]
+ (1 - test_smooth) * probs
)
i0 += length i0 += length
# Average timing # Average timing
@ -292,50 +301,69 @@ class ModelTester:
# Display # Display
if (t[-1] - last_display) > 1.0: if (t[-1] - last_display) > 1.0:
last_display = t[-1] last_display = t[-1]
message = 'e{:03d}-i{:04d} => {:.1f}% (timings : {:4.2f} {:4.2f} {:4.2f})' message = (
print(message.format(test_epoch, i, "e{:03d}-i{:04d} => {:.1f}% (timings : {:4.2f} {:4.2f} {:4.2f})"
100 * i / config.validation_size, )
1000 * (mean_dt[0]), print(
1000 * (mean_dt[1]), message.format(
1000 * (mean_dt[2]))) test_epoch,
i,
100 * i / config.validation_size,
1000 * (mean_dt[0]),
1000 * (mean_dt[1]),
1000 * (mean_dt[2]),
)
)
# Update minimum od potentials # Update minimum od potentials
new_min = torch.min(test_loader.dataset.min_potentials) new_min = torch.min(test_loader.dataset.min_potentials)
print('Test epoch {:d}, end. Min potential = {:.1f}'.format(test_epoch, new_min)) print(
#print([np.mean(pots) for pots in test_loader.dataset.potentials]) "Test epoch {:d}, end. Min potential = {:.1f}".format(
test_epoch, new_min
)
)
# print([np.mean(pots) for pots in test_loader.dataset.potentials])
# Save predicted cloud # Save predicted cloud
if last_min + 1 < new_min: if last_min + 1 < new_min:
# Update last_min # Update last_min
last_min += 1 last_min += 1
# Show vote results (On subcloud so it is not the good values here) # Show vote results (On subcloud so it is not the good values here)
if test_loader.dataset.set == 'validation': if test_loader.dataset.set == "validation":
print('\nConfusion on sub clouds') print("\nConfusion on sub clouds")
Confs = [] Confs = []
for i, file_path in enumerate(test_loader.dataset.files): for i, file_path in enumerate(test_loader.dataset.files):
# Insert false columns for ignored labels # Insert false columns for ignored labels
probs = np.array(self.test_probs[i], copy=True) probs = np.array(self.test_probs[i], copy=True)
for l_ind, label_value in enumerate(test_loader.dataset.label_values): for l_ind, label_value in enumerate(
test_loader.dataset.label_values
):
if label_value in test_loader.dataset.ignored_labels: if label_value in test_loader.dataset.ignored_labels:
probs = np.insert(probs, l_ind, 0, axis=1) probs = np.insert(probs, l_ind, 0, axis=1)
# Predicted labels # Predicted labels
preds = test_loader.dataset.label_values[np.argmax(probs, axis=1)].astype(np.int32) preds = test_loader.dataset.label_values[
np.argmax(probs, axis=1)
].astype(np.int32)
# Targets # Targets
targets = test_loader.dataset.input_labels[i] targets = test_loader.dataset.input_labels[i]
# Confs # Confs
Confs += [fast_confusion(targets, preds, test_loader.dataset.label_values)] Confs += [
fast_confusion(
targets, preds, test_loader.dataset.label_values
)
]
# Regroup confusions # Regroup confusions
C = np.sum(np.stack(Confs), axis=0).astype(np.float32) C = np.sum(np.stack(Confs), axis=0).astype(np.float32)
# Remove ignored labels from confusions # Remove ignored labels from confusions
for l_ind, label_value in reversed(list(enumerate(test_loader.dataset.label_values))): for l_ind, label_value in reversed(
list(enumerate(test_loader.dataset.label_values))
):
if label_value in test_loader.dataset.ignored_labels: if label_value in test_loader.dataset.ignored_labels:
C = np.delete(C, l_ind, axis=0) C = np.delete(C, l_ind, axis=0)
C = np.delete(C, l_ind, axis=1) C = np.delete(C, l_ind, axis=1)
@ -346,20 +374,18 @@ class ModelTester:
# Compute IoUs # Compute IoUs
IoUs = IoU_from_confusions(C) IoUs = IoU_from_confusions(C)
mIoU = np.mean(IoUs) mIoU = np.mean(IoUs)
s = '{:5.2f} | '.format(100 * mIoU) s = "{:5.2f} | ".format(100 * mIoU)
for IoU in IoUs: for IoU in IoUs:
s += '{:5.2f} '.format(100 * IoU) s += "{:5.2f} ".format(100 * IoU)
print(s + '\n') print(s + "\n")
# Save real IoU once in a while # Save real IoU once in a while
if int(np.ceil(new_min)) % 10 == 0: if int(np.ceil(new_min)) % 10 == 0:
# Project predictions # Project predictions
print('\nReproject Vote #{:d}'.format(int(np.floor(new_min)))) print("\nReproject Vote #{:d}".format(int(np.floor(new_min))))
t1 = time.time() t1 = time.time()
proj_probs = [] proj_probs = []
for i, file_path in enumerate(test_loader.dataset.files): for i, file_path in enumerate(test_loader.dataset.files):
# print(i, file_path, test_loader.dataset.test_proj[i].shape, self.test_probs[i].shape) # print(i, file_path, test_loader.dataset.test_proj[i].shape, self.test_probs[i].shape)
# print(test_loader.dataset.test_proj[i].dtype, np.max(test_loader.dataset.test_proj[i])) # print(test_loader.dataset.test_proj[i].dtype, np.max(test_loader.dataset.test_proj[i]))
@ -370,90 +396,116 @@ class ModelTester:
proj_probs += [probs] proj_probs += [probs]
# Insert false columns for ignored labels # Insert false columns for ignored labels
for l_ind, label_value in enumerate(test_loader.dataset.label_values): for l_ind, label_value in enumerate(
test_loader.dataset.label_values
):
if label_value in test_loader.dataset.ignored_labels: if label_value in test_loader.dataset.ignored_labels:
proj_probs[i] = np.insert(proj_probs[i], l_ind, 0, axis=1) proj_probs[i] = np.insert(
proj_probs[i], l_ind, 0, axis=1
)
t2 = time.time() t2 = time.time()
print('Done in {:.1f} s\n'.format(t2 - t1)) print("Done in {:.1f} s\n".format(t2 - t1))
# Show vote results # Show vote results
if test_loader.dataset.set == 'validation': if test_loader.dataset.set == "validation":
print('Confusion on full clouds') print("Confusion on full clouds")
t1 = time.time() t1 = time.time()
Confs = [] Confs = []
for i, file_path in enumerate(test_loader.dataset.files): for i, file_path in enumerate(test_loader.dataset.files):
# Get the predicted labels # Get the predicted labels
preds = test_loader.dataset.label_values[np.argmax(proj_probs[i], axis=1)].astype(np.int32) preds = test_loader.dataset.label_values[
np.argmax(proj_probs[i], axis=1)
].astype(np.int32)
# Confusion # Confusion
targets = test_loader.dataset.validation_labels[i] targets = test_loader.dataset.validation_labels[i]
Confs += [fast_confusion(targets, preds, test_loader.dataset.label_values)] Confs += [
fast_confusion(
targets, preds, test_loader.dataset.label_values
)
]
t2 = time.time() t2 = time.time()
print('Done in {:.1f} s\n'.format(t2 - t1)) print("Done in {:.1f} s\n".format(t2 - t1))
# Regroup confusions # Regroup confusions
C = np.sum(np.stack(Confs), axis=0) C = np.sum(np.stack(Confs), axis=0)
# Remove ignored labels from confusions # Remove ignored labels from confusions
for l_ind, label_value in reversed(list(enumerate(test_loader.dataset.label_values))): for l_ind, label_value in reversed(
list(enumerate(test_loader.dataset.label_values))
):
if label_value in test_loader.dataset.ignored_labels: if label_value in test_loader.dataset.ignored_labels:
C = np.delete(C, l_ind, axis=0) C = np.delete(C, l_ind, axis=0)
C = np.delete(C, l_ind, axis=1) C = np.delete(C, l_ind, axis=1)
IoUs = IoU_from_confusions(C) IoUs = IoU_from_confusions(C)
mIoU = np.mean(IoUs) mIoU = np.mean(IoUs)
s = '{:5.2f} | '.format(100 * mIoU) s = "{:5.2f} | ".format(100 * mIoU)
for IoU in IoUs: for IoU in IoUs:
s += '{:5.2f} '.format(100 * IoU) s += "{:5.2f} ".format(100 * IoU)
print('-' * len(s)) print("-" * len(s))
print(s) print(s)
print('-' * len(s) + '\n') print("-" * len(s) + "\n")
# Save predictions # Save predictions
print('Saving clouds') print("Saving clouds")
t1 = time.time() t1 = time.time()
for i, file_path in enumerate(test_loader.dataset.files): for i, file_path in enumerate(test_loader.dataset.files):
# Get file # Get file
points = test_loader.dataset.load_evaluation_points(file_path) points = test_loader.dataset.load_evaluation_points(file_path)
# Get the predicted labels # Get the predicted labels
preds = test_loader.dataset.label_values[np.argmax(proj_probs[i], axis=1)].astype(np.int32) preds = test_loader.dataset.label_values[
np.argmax(proj_probs[i], axis=1)
].astype(np.int32)
# Save plys # Save plys
cloud_name = file_path.split('/')[-1] cloud_name = file_path.split("/")[-1]
test_name = join(test_path, 'predictions', cloud_name) test_name = join(test_path, "predictions", cloud_name)
write_ply(test_name, write_ply(test_name, [points, preds], ["x", "y", "z", "preds"])
[points, preds], test_name2 = join(test_path, "probs", cloud_name)
['x', 'y', 'z', 'preds']) prob_names = [
test_name2 = join(test_path, 'probs', cloud_name) "_".join(test_loader.dataset.label_to_names[label].split())
prob_names = ['_'.join(test_loader.dataset.label_to_names[label].split()) for label in test_loader.dataset.label_values
for label in test_loader.dataset.label_values] ]
write_ply(test_name2, write_ply(
[points, proj_probs[i]], test_name2,
['x', 'y', 'z'] + prob_names) [points, proj_probs[i]],
["x", "y", "z"] + prob_names,
)
# Save potentials # Save potentials
pot_points = np.array(test_loader.dataset.pot_trees[i].data, copy=False) pot_points = np.array(
pot_name = join(test_path, 'potentials', cloud_name) test_loader.dataset.pot_trees[i].data, copy=False
pots = test_loader.dataset.potentials[i].numpy().astype(np.float32) )
write_ply(pot_name, pot_name = join(test_path, "potentials", cloud_name)
[pot_points.astype(np.float32), pots], pots = (
['x', 'y', 'z', 'pots']) test_loader.dataset.potentials[i].numpy().astype(np.float32)
)
write_ply(
pot_name,
[pot_points.astype(np.float32), pots],
["x", "y", "z", "pots"],
)
# Save ascii preds # Save ascii preds
if test_loader.dataset.set == 'test': if test_loader.dataset.set == "test":
if test_loader.dataset.name.startswith('Semantic3D'): if test_loader.dataset.name.startswith("Semantic3D"):
ascii_name = join(test_path, 'predictions', test_loader.dataset.ascii_files[cloud_name]) ascii_name = join(
test_path,
"predictions",
test_loader.dataset.ascii_files[cloud_name],
)
else: else:
ascii_name = join(test_path, 'predictions', cloud_name[:-4] + '.txt') ascii_name = join(
np.savetxt(ascii_name, preds, fmt='%d') test_path, "predictions", cloud_name[:-4] + ".txt"
)
np.savetxt(ascii_name, preds, fmt="%d")
t2 = time.time() t2 = time.time()
print('Done in {:.1f} s\n'.format(t2 - t1)) print("Done in {:.1f} s\n".format(t2 - t1))
test_epoch += 1 test_epoch += 1
@ -463,7 +515,9 @@ class ModelTester:
return return
def slam_segmentation_test(self, net, test_loader, config, num_votes=100, debug=True): def slam_segmentation_test(
self, net, test_loader, config, num_votes=100, debug=True
):
""" """
Test method for slam segmentation models Test method for slam segmentation models
""" """
@ -485,29 +539,31 @@ class ModelTester:
test_path = None test_path = None
report_path = None report_path = None
if config.saving: if config.saving:
test_path = join('test', config.saving_path.split('/')[-1]) test_path = join("test", config.saving_path.split("/")[-1])
if not exists(test_path): if not exists(test_path):
makedirs(test_path) makedirs(test_path)
report_path = join(test_path, 'reports') report_path = join(test_path, "reports")
if not exists(report_path): if not exists(report_path):
makedirs(report_path) makedirs(report_path)
if test_loader.dataset.set == 'validation': if test_loader.dataset.set == "validation":
for folder in ['val_predictions', 'val_probs']: for folder in ["val_predictions", "val_probs"]:
if not exists(join(test_path, folder)): if not exists(join(test_path, folder)):
makedirs(join(test_path, folder)) makedirs(join(test_path, folder))
else: else:
for folder in ['predictions', 'probs']: for folder in ["predictions", "probs"]:
if not exists(join(test_path, folder)): if not exists(join(test_path, folder)):
makedirs(join(test_path, folder)) makedirs(join(test_path, folder))
# Init validation container # Init validation container
all_f_preds = [] all_f_preds = []
all_f_labels = [] all_f_labels = []
if test_loader.dataset.set == 'validation': if test_loader.dataset.set == "validation":
for i, seq_frames in enumerate(test_loader.dataset.frames): for i, seq_frames in enumerate(test_loader.dataset.frames):
all_f_preds.append([np.zeros((0,), dtype=np.int32) for _ in seq_frames]) all_f_preds.append([np.zeros((0,), dtype=np.int32) for _ in seq_frames])
all_f_labels.append([np.zeros((0,), dtype=np.int32) for _ in seq_frames]) all_f_labels.append(
[np.zeros((0,), dtype=np.int32) for _ in seq_frames]
)
##################### #####################
# Network predictions # Network predictions
@ -523,17 +579,16 @@ class ModelTester:
# Start test loop # Start test loop
while True: while True:
print('Initialize workers') print("Initialize workers")
for i, batch in enumerate(test_loader): for i, batch in enumerate(test_loader):
# New time # New time
t = t[-1:] t = t[-1:]
t += [time.time()] t += [time.time()]
if i == 0: if i == 0:
print('Done in {:.1f}s'.format(t[1] - t[0])) print("Done in {:.1f}s".format(t[1] - t[0]))
if 'cuda' in self.device.type: if "cuda" in self.device.type:
batch.to(self.device) batch.to(self.device)
# Forward pass # Forward pass
@ -555,9 +610,8 @@ class ModelTester:
i0 = 0 i0 = 0
for b_i, length in enumerate(lengths): for b_i, length in enumerate(lengths):
# Get prediction # Get prediction
probs = stk_probs[i0:i0 + length] probs = stk_probs[i0 : i0 + length]
proj_inds = r_inds_list[b_i] proj_inds = r_inds_list[b_i]
proj_mask = r_mask_list[b_i] proj_mask = r_mask_list[b_i]
frame_labels = labels_list[b_i] frame_labels = labels_list[b_i]
@ -573,97 +627,151 @@ class ModelTester:
# Save probs in a binary file (uint8 format for lighter weight) # Save probs in a binary file (uint8 format for lighter weight)
seq_name = test_loader.dataset.sequences[s_ind] seq_name = test_loader.dataset.sequences[s_ind]
if test_loader.dataset.set == 'validation': if test_loader.dataset.set == "validation":
folder = 'val_probs' folder = "val_probs"
pred_folder = 'val_predictions' pred_folder = "val_predictions"
else: else:
folder = 'probs' folder = "probs"
pred_folder = 'predictions' pred_folder = "predictions"
filename = '{:s}_{:07d}.npy'.format(seq_name, f_ind) filename = "{:s}_{:07d}.npy".format(seq_name, f_ind)
filepath = join(test_path, folder, filename) filepath = join(test_path, folder, filename)
if exists(filepath): if exists(filepath):
frame_probs_uint8 = np.load(filepath) frame_probs_uint8 = np.load(filepath)
else: else:
frame_probs_uint8 = np.zeros((proj_mask.shape[0], nc_model), dtype=np.uint8) frame_probs_uint8 = np.zeros(
frame_probs = frame_probs_uint8[proj_mask, :].astype(np.float32) / 255 (proj_mask.shape[0], nc_model), dtype=np.uint8
frame_probs = test_smooth * frame_probs + (1 - test_smooth) * proj_probs )
frame_probs_uint8[proj_mask, :] = (frame_probs * 255).astype(np.uint8) frame_probs = (
frame_probs_uint8[proj_mask, :].astype(np.float32) / 255
)
frame_probs = (
test_smooth * frame_probs + (1 - test_smooth) * proj_probs
)
frame_probs_uint8[proj_mask, :] = (frame_probs * 255).astype(
np.uint8
)
np.save(filepath, frame_probs_uint8) np.save(filepath, frame_probs_uint8)
# Save some prediction in ply format for visual # Save some prediction in ply format for visual
if test_loader.dataset.set == 'validation': if test_loader.dataset.set == "validation":
# Insert false columns for ignored labels # Insert false columns for ignored labels
frame_probs_uint8_bis = frame_probs_uint8.copy() frame_probs_uint8_bis = frame_probs_uint8.copy()
for l_ind, label_value in enumerate(test_loader.dataset.label_values): for l_ind, label_value in enumerate(
test_loader.dataset.label_values
):
if label_value in test_loader.dataset.ignored_labels: if label_value in test_loader.dataset.ignored_labels:
frame_probs_uint8_bis = np.insert(frame_probs_uint8_bis, l_ind, 0, axis=1) frame_probs_uint8_bis = np.insert(
frame_probs_uint8_bis, l_ind, 0, axis=1
)
# Predicted labels # Predicted labels
frame_preds = test_loader.dataset.label_values[np.argmax(frame_probs_uint8_bis, frame_preds = test_loader.dataset.label_values[
axis=1)].astype(np.int32) np.argmax(frame_probs_uint8_bis, axis=1)
].astype(np.int32)
# Save some of the frame pots # Save some of the frame pots
if f_ind % 20 == 0: if f_ind % 20 == 0:
seq_path = join(test_loader.dataset.path, 'sequences', test_loader.dataset.sequences[s_ind]) seq_path = join(
velo_file = join(seq_path, 'velodyne', test_loader.dataset.frames[s_ind][f_ind] + '.bin') test_loader.dataset.path,
"sequences",
test_loader.dataset.sequences[s_ind],
)
velo_file = join(
seq_path,
"velodyne",
test_loader.dataset.frames[s_ind][f_ind] + ".bin",
)
frame_points = np.fromfile(velo_file, dtype=np.float32) frame_points = np.fromfile(velo_file, dtype=np.float32)
frame_points = frame_points.reshape((-1, 4)) frame_points = frame_points.reshape((-1, 4))
predpath = join(test_path, pred_folder, filename[:-4] + '.ply') predpath = join(
#pots = test_loader.dataset.f_potentials[s_ind][f_ind] test_path, pred_folder, filename[:-4] + ".ply"
)
# pots = test_loader.dataset.f_potentials[s_ind][f_ind]
pots = np.zeros((0,)) pots = np.zeros((0,))
if pots.shape[0] > 0: if pots.shape[0] > 0:
write_ply(predpath, write_ply(
[frame_points[:, :3], frame_labels, frame_preds, pots], predpath,
['x', 'y', 'z', 'gt', 'pre', 'pots']) [
frame_points[:, :3],
frame_labels,
frame_preds,
pots,
],
["x", "y", "z", "gt", "pre", "pots"],
)
else: else:
write_ply(predpath, write_ply(
[frame_points[:, :3], frame_labels, frame_preds], predpath,
['x', 'y', 'z', 'gt', 'pre']) [frame_points[:, :3], frame_labels, frame_preds],
["x", "y", "z", "gt", "pre"],
)
# Also Save lbl probabilities # Also Save lbl probabilities
probpath = join(test_path, folder, filename[:-4] + '_probs.ply') probpath = join(
lbl_names = [test_loader.dataset.label_to_names[l] test_path, folder, filename[:-4] + "_probs.ply"
for l in test_loader.dataset.label_values )
if l not in test_loader.dataset.ignored_labels] lbl_names = [
write_ply(probpath, test_loader.dataset.label_to_names[l]
[frame_points[:, :3], frame_probs_uint8], for l in test_loader.dataset.label_values
['x', 'y', 'z'] + lbl_names) if l not in test_loader.dataset.ignored_labels
]
write_ply(
probpath,
[frame_points[:, :3], frame_probs_uint8],
["x", "y", "z"] + lbl_names,
)
# keep frame preds in memory # keep frame preds in memory
all_f_preds[s_ind][f_ind] = frame_preds all_f_preds[s_ind][f_ind] = frame_preds
all_f_labels[s_ind][f_ind] = frame_labels all_f_labels[s_ind][f_ind] = frame_labels
else: else:
# Save some of the frame preds # Save some of the frame preds
if f_inds[b_i, 1] % 100 == 0: if f_inds[b_i, 1] % 100 == 0:
# Insert false columns for ignored labels # Insert false columns for ignored labels
for l_ind, label_value in enumerate(test_loader.dataset.label_values): for l_ind, label_value in enumerate(
test_loader.dataset.label_values
):
if label_value in test_loader.dataset.ignored_labels: if label_value in test_loader.dataset.ignored_labels:
frame_probs_uint8 = np.insert(frame_probs_uint8, l_ind, 0, axis=1) frame_probs_uint8 = np.insert(
frame_probs_uint8, l_ind, 0, axis=1
)
# Predicted labels # Predicted labels
frame_preds = test_loader.dataset.label_values[np.argmax(frame_probs_uint8, frame_preds = test_loader.dataset.label_values[
axis=1)].astype(np.int32) np.argmax(frame_probs_uint8, axis=1)
].astype(np.int32)
# Load points # Load points
seq_path = join(test_loader.dataset.path, 'sequences', test_loader.dataset.sequences[s_ind]) seq_path = join(
velo_file = join(seq_path, 'velodyne', test_loader.dataset.frames[s_ind][f_ind] + '.bin') test_loader.dataset.path,
"sequences",
test_loader.dataset.sequences[s_ind],
)
velo_file = join(
seq_path,
"velodyne",
test_loader.dataset.frames[s_ind][f_ind] + ".bin",
)
frame_points = np.fromfile(velo_file, dtype=np.float32) frame_points = np.fromfile(velo_file, dtype=np.float32)
frame_points = frame_points.reshape((-1, 4)) frame_points = frame_points.reshape((-1, 4))
predpath = join(test_path, pred_folder, filename[:-4] + '.ply') predpath = join(
#pots = test_loader.dataset.f_potentials[s_ind][f_ind] test_path, pred_folder, filename[:-4] + ".ply"
)
# pots = test_loader.dataset.f_potentials[s_ind][f_ind]
pots = np.zeros((0,)) pots = np.zeros((0,))
if pots.shape[0] > 0: if pots.shape[0] > 0:
write_ply(predpath, write_ply(
[frame_points[:, :3], frame_preds, pots], predpath,
['x', 'y', 'z', 'pre', 'pots']) [frame_points[:, :3], frame_preds, pots],
["x", "y", "z", "pre", "pots"],
)
else: else:
write_ply(predpath, write_ply(
[frame_points[:, :3], frame_preds], predpath,
['x', 'y', 'z', 'pre']) [frame_points[:, :3], frame_preds],
["x", "y", "z", "pre"],
)
# Stack all prediction for this epoch # Stack all prediction for this epoch
i0 += length i0 += length
@ -675,30 +783,45 @@ class ModelTester:
# Display # Display
if (t[-1] - last_display) > 1.0: if (t[-1] - last_display) > 1.0:
last_display = t[-1] last_display = t[-1]
message = 'e{:03d}-i{:04d} => {:.1f}% (timings : {:4.2f} {:4.2f} {:4.2f}) / pots {:d} => {:.1f}%' message = "e{:03d}-i{:04d} => {:.1f}% (timings : {:4.2f} {:4.2f} {:4.2f}) / pots {:d} => {:.1f}%"
min_pot = int(torch.floor(torch.min(test_loader.dataset.potentials))) min_pot = int(
pot_num = torch.sum(test_loader.dataset.potentials > min_pot + 0.5).type(torch.int32).item() torch.floor(torch.min(test_loader.dataset.potentials))
current_num = pot_num + (i + 1 - config.validation_size) * config.val_batch_num )
print(message.format(test_epoch, i, pot_num = (
100 * i / config.validation_size, torch.sum(test_loader.dataset.potentials > min_pot + 0.5)
1000 * (mean_dt[0]), .type(torch.int32)
1000 * (mean_dt[1]), .item()
1000 * (mean_dt[2]), )
min_pot, current_num = (
100.0 * current_num / len(test_loader.dataset.potentials))) pot_num
+ (i + 1 - config.validation_size) * config.val_batch_num
)
print(
message.format(
test_epoch,
i,
100 * i / config.validation_size,
1000 * (mean_dt[0]),
1000 * (mean_dt[1]),
1000 * (mean_dt[2]),
min_pot,
100.0 * current_num / len(test_loader.dataset.potentials),
)
)
# Update minimum od potentials # Update minimum od potentials
new_min = torch.min(test_loader.dataset.potentials) new_min = torch.min(test_loader.dataset.potentials)
print('Test epoch {:d}, end. Min potential = {:.1f}'.format(test_epoch, new_min)) print(
"Test epoch {:d}, end. Min potential = {:.1f}".format(
test_epoch, new_min
)
)
if last_min + 1 < new_min: if last_min + 1 < new_min:
# Update last_min # Update last_min
last_min += 1 last_min += 1
if test_loader.dataset.set == 'validation' and last_min % 1 == 0: if test_loader.dataset.set == "validation" and last_min % 1 == 0:
##################################### #####################################
# Results on the whole validation set # Results on the whole validation set
##################################### #####################################
@ -706,13 +829,13 @@ class ModelTester:
# Confusions for our subparts of validation set # Confusions for our subparts of validation set
Confs = np.zeros((len(predictions), nc_tot, nc_tot), dtype=np.int32) Confs = np.zeros((len(predictions), nc_tot, nc_tot), dtype=np.int32)
for i, (preds, truth) in enumerate(zip(predictions, targets)): for i, (preds, truth) in enumerate(zip(predictions, targets)):
# Confusions # Confusions
Confs[i, :, :] = fast_confusion(truth, preds, test_loader.dataset.label_values).astype(np.int32) Confs[i, :, :] = fast_confusion(
truth, preds, test_loader.dataset.label_values
).astype(np.int32)
# Show vote results # Show vote results
print('\nCompute confusion') print("\nCompute confusion")
val_preds = [] val_preds = []
val_labels = [] val_labels = []
@ -723,21 +846,25 @@ class ModelTester:
val_preds = np.hstack(val_preds) val_preds = np.hstack(val_preds)
val_labels = np.hstack(val_labels) val_labels = np.hstack(val_labels)
t2 = time.time() t2 = time.time()
C_tot = fast_confusion(val_labels, val_preds, test_loader.dataset.label_values) C_tot = fast_confusion(
val_labels, val_preds, test_loader.dataset.label_values
)
t3 = time.time() t3 = time.time()
print(' Stacking time : {:.1f}s'.format(t2 - t1)) print(" Stacking time : {:.1f}s".format(t2 - t1))
print('Confusion time : {:.1f}s'.format(t3 - t2)) print("Confusion time : {:.1f}s".format(t3 - t2))
s1 = '\n' s1 = "\n"
for cc in C_tot: for cc in C_tot:
for c in cc: for c in cc:
s1 += '{:7.0f} '.format(c) s1 += "{:7.0f} ".format(c)
s1 += '\n' s1 += "\n"
if debug: if debug:
print(s1) print(s1)
# Remove ignored labels from confusions # Remove ignored labels from confusions
for l_ind, label_value in reversed(list(enumerate(test_loader.dataset.label_values))): for l_ind, label_value in reversed(
list(enumerate(test_loader.dataset.label_values))
):
if label_value in test_loader.dataset.ignored_labels: if label_value in test_loader.dataset.ignored_labels:
C_tot = np.delete(C_tot, l_ind, axis=0) C_tot = np.delete(C_tot, l_ind, axis=0)
C_tot = np.delete(C_tot, l_ind, axis=1) C_tot = np.delete(C_tot, l_ind, axis=1)
@ -747,21 +874,23 @@ class ModelTester:
# Compute IoUs # Compute IoUs
mIoU = np.mean(val_IoUs) mIoU = np.mean(val_IoUs)
s2 = '{:5.2f} | '.format(100 * mIoU) s2 = "{:5.2f} | ".format(100 * mIoU)
for IoU in val_IoUs: for IoU in val_IoUs:
s2 += '{:5.2f} '.format(100 * IoU) s2 += "{:5.2f} ".format(100 * IoU)
print(s2 + '\n') print(s2 + "\n")
# Save a report # Save a report
report_file = join(report_path, 'report_{:04d}.txt'.format(int(np.floor(last_min)))) report_file = join(
str = 'Report of the confusion and metrics\n' report_path, "report_{:04d}.txt".format(int(np.floor(last_min)))
str += '***********************************\n\n\n' )
str += 'Confusion matrix:\n\n' str = "Report of the confusion and metrics\n"
str += "***********************************\n\n\n"
str += "Confusion matrix:\n\n"
str += s1 str += s1
str += '\nIoU values:\n\n' str += "\nIoU values:\n\n"
str += s2 str += s2
str += '\n\n' str += "\n\n"
with open(report_file, 'w') as f: with open(report_file, "w") as f:
f.write(str) f.write(str)
test_epoch += 1 test_epoch += 1
@ -771,28 +900,3 @@ class ModelTester:
break break
return return

View file

@ -24,24 +24,17 @@
# Basic libs # Basic libs
import torch import torch
import torch.nn as nn
import numpy as np import numpy as np
import pickle
import os
from os import makedirs, remove from os import makedirs, remove
from os.path import exists, join from os.path import exists, join
import time import time
import sys
# PLY reader # PLY reader
from utils.ply import read_ply, write_ply from utils.ply import write_ply
# Metrics # Metrics
from utils.metrics import IoU_from_confusions, fast_confusion from utils.metrics import IoU_from_confusions, fast_confusion
from utils.config import Config from utils.config import Config
from sklearn.neighbors import KDTree
from models.blocks import KPConv
# ---------------------------------------------------------------------------------------------------------------------- # ----------------------------------------------------------------------------------------------------------------------
@ -52,7 +45,6 @@ from models.blocks import KPConv
class ModelTrainer: class ModelTrainer:
# Initialization methods # Initialization methods
# ------------------------------------------------------------------------------------------------------------------ # ------------------------------------------------------------------------------------------------------------------
@ -75,14 +67,15 @@ class ModelTrainer:
self.step = 0 self.step = 0
# Optimizer with specific learning rate for deformable KPConv # Optimizer with specific learning rate for deformable KPConv
deform_params = [v for k, v in net.named_parameters() if 'offset' in k] deform_params = [v for k, v in net.named_parameters() if "offset" in k]
other_params = [v for k, v in net.named_parameters() if 'offset' not in k] other_params = [v for k, v in net.named_parameters() if "offset" not in k]
deform_lr = config.learning_rate * config.deform_lr_factor deform_lr = config.learning_rate * config.deform_lr_factor
self.optimizer = torch.optim.SGD([{'params': other_params}, self.optimizer = torch.optim.SGD(
{'params': deform_params, 'lr': deform_lr}], [{"params": other_params}, {"params": deform_params, "lr": deform_lr}],
lr=config.learning_rate, lr=config.learning_rate,
momentum=config.momentum, momentum=config.momentum,
weight_decay=config.weight_decay) weight_decay=config.weight_decay,
)
# Choose to train on CPU or GPU # Choose to train on CPU or GPU
if on_gpu and torch.cuda.is_available(): if on_gpu and torch.cuda.is_available():
@ -95,24 +88,26 @@ class ModelTrainer:
# Load previous checkpoint # Load previous checkpoint
########################## ##########################
if (chkp_path is not None): if chkp_path is not None:
if finetune: if finetune:
checkpoint = torch.load(chkp_path) checkpoint = torch.load(chkp_path)
net.load_state_dict(checkpoint['model_state_dict']) net.load_state_dict(checkpoint["model_state_dict"])
net.train() net.train()
print("Model restored and ready for finetuning.") print("Model restored and ready for finetuning.")
else: else:
checkpoint = torch.load(chkp_path) checkpoint = torch.load(chkp_path)
net.load_state_dict(checkpoint['model_state_dict']) net.load_state_dict(checkpoint["model_state_dict"])
self.optimizer.load_state_dict(checkpoint['optimizer_state_dict']) self.optimizer.load_state_dict(checkpoint["optimizer_state_dict"])
self.epoch = checkpoint['epoch'] self.epoch = checkpoint["epoch"]
net.train() net.train()
print("Model and training state restored.") print("Model and training state restored.")
# Path of the result folder # Path of the result folder
if config.saving: if config.saving:
if config.saving_path is None: if config.saving_path is None:
config.saving_path = time.strftime('results/Log_%Y-%m-%d_%H-%M-%S', time.gmtime()) config.saving_path = time.strftime(
"results/Log_%Y-%m-%d_%H-%M-%S", time.gmtime()
)
if not exists(config.saving_path): if not exists(config.saving_path):
makedirs(config.saving_path) makedirs(config.saving_path)
config.save() config.save()
@ -133,17 +128,17 @@ class ModelTrainer:
if config.saving: if config.saving:
# Training log file # Training log file
with open(join(config.saving_path, 'training.txt'), "w") as file: with open(join(config.saving_path, "training.txt"), "w") as file:
file.write('epochs steps out_loss offset_loss train_accuracy time\n') file.write("epochs steps out_loss offset_loss train_accuracy time\n")
# Killing file (simply delete this file when you want to stop the training) # Killing file (simply delete this file when you want to stop the training)
PID_file = join(config.saving_path, 'running_PID.txt') PID_file = join(config.saving_path, "running_PID.txt")
if not exists(PID_file): if not exists(PID_file):
with open(PID_file, "w") as file: with open(PID_file, "w") as file:
file.write('Launched with PyCharm') file.write("Launched with PyCharm")
# Checkpoints directory # Checkpoints directory
checkpoint_directory = join(config.saving_path, 'checkpoints') checkpoint_directory = join(config.saving_path, "checkpoints")
if not exists(checkpoint_directory): if not exists(checkpoint_directory):
makedirs(checkpoint_directory) makedirs(checkpoint_directory)
else: else:
@ -158,14 +153,12 @@ class ModelTrainer:
# Start training loop # Start training loop
for epoch in range(config.max_epoch): for epoch in range(config.max_epoch):
# Remove File for kill signal # Remove File for kill signal
if epoch == config.max_epoch - 1 and exists(PID_file): if epoch == config.max_epoch - 1 and exists(PID_file):
remove(PID_file) remove(PID_file)
self.step = 0 self.step = 0
for batch in training_loader: for batch in training_loader:
# Check kill signal (running_PID.txt deleted) # Check kill signal (running_PID.txt deleted)
if config.saving and not exists(PID_file): if config.saving and not exists(PID_file):
continue continue
@ -178,7 +171,7 @@ class ModelTrainer:
t = t[-1:] t = t[-1:]
t += [time.time()] t += [time.time()]
if 'cuda' in self.device.type: if "cuda" in self.device.type:
batch.to(self.device) batch.to(self.device)
# zero the parameter gradients # zero the parameter gradients
@ -195,11 +188,12 @@ class ModelTrainer:
loss.backward() loss.backward()
if config.grad_clip_norm > 0: if config.grad_clip_norm > 0:
#torch.nn.utils.clip_grad_norm_(net.parameters(), config.grad_clip_norm) # torch.nn.utils.clip_grad_norm_(net.parameters(), config.grad_clip_norm)
torch.nn.utils.clip_grad_value_(net.parameters(), config.grad_clip_norm) torch.nn.utils.clip_grad_value_(
net.parameters(), config.grad_clip_norm
)
self.optimizer.step() self.optimizer.step()
torch.cuda.empty_cache() torch.cuda.empty_cache()
torch.cuda.synchronize(self.device) torch.cuda.synchronize(self.device)
@ -214,25 +208,33 @@ class ModelTrainer:
# Console display (only one per second) # Console display (only one per second)
if (t[-1] - last_display) > 1.0: if (t[-1] - last_display) > 1.0:
last_display = t[-1] last_display = t[-1]
message = 'e{:03d}-i{:04d} => L={:.3f} acc={:3.0f}% / t(ms): {:5.1f} {:5.1f} {:5.1f})' message = "e{:03d}-i{:04d} => L={:.3f} acc={:3.0f}% / t(ms): {:5.1f} {:5.1f} {:5.1f})"
print(message.format(self.epoch, self.step, print(
loss.item(), message.format(
100*acc, self.epoch,
1000 * mean_dt[0], self.step,
1000 * mean_dt[1], loss.item(),
1000 * mean_dt[2])) 100 * acc,
1000 * mean_dt[0],
1000 * mean_dt[1],
1000 * mean_dt[2],
)
)
# Log file # Log file
if config.saving: if config.saving:
with open(join(config.saving_path, 'training.txt'), "a") as file: with open(join(config.saving_path, "training.txt"), "a") as file:
message = '{:d} {:d} {:.3f} {:.3f} {:.3f} {:.3f}\n' message = "{:d} {:d} {:.3f} {:.3f} {:.3f} {:.3f}\n"
file.write(message.format(self.epoch, file.write(
self.step, message.format(
net.output_loss, self.epoch,
net.reg_loss, self.step,
acc, net.output_loss,
t[-1] - t0)) net.reg_loss,
acc,
t[-1] - t0,
)
)
self.step += 1 self.step += 1
@ -247,7 +249,7 @@ class ModelTrainer:
# Update learning rate # Update learning rate
if self.epoch in config.lr_decays: if self.epoch in config.lr_decays:
for param_group in self.optimizer.param_groups: for param_group in self.optimizer.param_groups:
param_group['lr'] *= config.lr_decays[self.epoch] param_group["lr"] *= config.lr_decays[self.epoch]
# Update epoch # Update epoch
self.epoch += 1 self.epoch += 1
@ -255,18 +257,22 @@ class ModelTrainer:
# Saving # Saving
if config.saving: if config.saving:
# Get current state dict # Get current state dict
save_dict = {'epoch': self.epoch, save_dict = {
'model_state_dict': net.state_dict(), "epoch": self.epoch,
'optimizer_state_dict': self.optimizer.state_dict(), "model_state_dict": net.state_dict(),
'saving_path': config.saving_path} "optimizer_state_dict": self.optimizer.state_dict(),
"saving_path": config.saving_path,
}
# Save current state of the network (for restoring purposes) # Save current state of the network (for restoring purposes)
checkpoint_path = join(checkpoint_directory, 'current_chkp.tar') checkpoint_path = join(checkpoint_directory, "current_chkp.tar")
torch.save(save_dict, checkpoint_path) torch.save(save_dict, checkpoint_path)
# Save checkpoints occasionally # Save checkpoints occasionally
if (self.epoch + 1) % config.checkpoint_gap == 0: if (self.epoch + 1) % config.checkpoint_gap == 0:
checkpoint_path = join(checkpoint_directory, 'chkp_{:04d}.tar'.format(self.epoch + 1)) checkpoint_path = join(
checkpoint_directory, "chkp_{:04d}.tar".format(self.epoch + 1)
)
torch.save(save_dict, checkpoint_path) torch.save(save_dict, checkpoint_path)
# Validation # Validation
@ -274,24 +280,23 @@ class ModelTrainer:
self.validation(net, val_loader, config) self.validation(net, val_loader, config)
net.train() net.train()
print('Finished Training') print("Finished Training")
return return
# Validation methods # Validation methods
# ------------------------------------------------------------------------------------------------------------------ # ------------------------------------------------------------------------------------------------------------------
def validation(self, net, val_loader, config: Config): def validation(self, net, val_loader, config: Config):
if config.dataset_task == "classification":
if config.dataset_task == 'classification':
self.object_classification_validation(net, val_loader, config) self.object_classification_validation(net, val_loader, config)
elif config.dataset_task == 'segmentation': elif config.dataset_task == "segmentation":
self.object_segmentation_validation(net, val_loader, config) self.object_segmentation_validation(net, val_loader, config)
elif config.dataset_task == 'cloud_segmentation': elif config.dataset_task == "cloud_segmentation":
self.cloud_segmentation_validation(net, val_loader, config) self.cloud_segmentation_validation(net, val_loader, config)
elif config.dataset_task == 'slam_segmentation': elif config.dataset_task == "slam_segmentation":
self.slam_segmentation_validation(net, val_loader, config) self.slam_segmentation_validation(net, val_loader, config)
else: else:
raise ValueError('No validation method implemented for this network type') raise ValueError("No validation method implemented for this network type")
def object_classification_validation(self, net, val_loader, config): def object_classification_validation(self, net, val_loader, config):
""" """
@ -313,7 +318,7 @@ class ModelTrainer:
softmax = torch.nn.Softmax(1) softmax = torch.nn.Softmax(1)
# Initialize global prediction over all models # Initialize global prediction over all models
if not hasattr(self, 'val_probs'): if not hasattr(self, "val_probs"):
self.val_probs = np.zeros((val_loader.dataset.num_models, nc_model)) self.val_probs = np.zeros((val_loader.dataset.num_models, nc_model))
##################### #####################
@ -330,12 +335,11 @@ class ModelTrainer:
# Start validation loop # Start validation loop
for batch in val_loader: for batch in val_loader:
# New time # New time
t = t[-1:] t = t[-1:]
t += [time.time()] t += [time.time()]
if 'cuda' in self.device.type: if "cuda" in self.device.type:
batch.to(self.device) batch.to(self.device)
# Forward pass # Forward pass
@ -354,10 +358,14 @@ class ModelTrainer:
# Display # Display
if (t[-1] - last_display) > 1.0: if (t[-1] - last_display) > 1.0:
last_display = t[-1] last_display = t[-1]
message = 'Validation : {:.1f}% (timings : {:4.2f} {:4.2f})' message = "Validation : {:.1f}% (timings : {:4.2f} {:4.2f})"
print(message.format(100 * len(obj_inds) / config.validation_size, print(
1000 * (mean_dt[0]), message.format(
1000 * (mean_dt[1]))) 100 * len(obj_inds) / config.validation_size,
1000 * (mean_dt[0]),
1000 * (mean_dt[1]),
)
)
# Stack all validation predictions # Stack all validation predictions
probs = np.vstack(probs) probs = np.vstack(probs)
@ -368,7 +376,9 @@ class ModelTrainer:
# Voting validation # Voting validation
################### ###################
self.val_probs[obj_inds] = val_smooth * self.val_probs[obj_inds] + (1-val_smooth) * probs self.val_probs[obj_inds] = (
val_smooth * self.val_probs[obj_inds] + (1 - val_smooth) * probs
)
############ ############
# Confusions # Confusions
@ -377,39 +387,38 @@ class ModelTrainer:
validation_labels = np.array(val_loader.dataset.label_values) validation_labels = np.array(val_loader.dataset.label_values)
# Compute classification results # Compute classification results
C1 = fast_confusion(targets, C1 = fast_confusion(targets, np.argmax(probs, axis=1), validation_labels)
np.argmax(probs, axis=1),
validation_labels)
# Compute votes confusion # Compute votes confusion
C2 = fast_confusion(val_loader.dataset.input_labels, C2 = fast_confusion(
np.argmax(self.val_probs, axis=1), val_loader.dataset.input_labels,
validation_labels) np.argmax(self.val_probs, axis=1),
validation_labels,
)
# Saving (optionnal) # Saving (optionnal)
if config.saving: if config.saving:
print("Save confusions") print("Save confusions")
conf_list = [C1, C2] conf_list = [C1, C2]
file_list = ['val_confs.txt', 'vote_confs.txt'] file_list = ["val_confs.txt", "vote_confs.txt"]
for conf, conf_file in zip(conf_list, file_list): for conf, conf_file in zip(conf_list, file_list):
test_file = join(config.saving_path, conf_file) test_file = join(config.saving_path, conf_file)
if exists(test_file): if exists(test_file):
with open(test_file, "a") as text_file: with open(test_file, "a") as text_file:
for line in conf: for line in conf:
for value in line: for value in line:
text_file.write('%d ' % value) text_file.write("%d " % value)
text_file.write('\n') text_file.write("\n")
else: else:
with open(test_file, "w") as text_file: with open(test_file, "w") as text_file:
for line in conf: for line in conf:
for value in line: for value in line:
text_file.write('%d ' % value) text_file.write("%d " % value)
text_file.write('\n') text_file.write("\n")
val_ACC = 100 * np.sum(np.diag(C1)) / (np.sum(C1) + 1e-6) val_ACC = 100 * np.sum(np.diag(C1)) / (np.sum(C1) + 1e-6)
vote_ACC = 100 * np.sum(np.diag(C2)) / (np.sum(C2) + 1e-6) vote_ACC = 100 * np.sum(np.diag(C2)) / (np.sum(C2) + 1e-6)
print('Accuracies : val = {:.1f}% / vote = {:.1f}%'.format(val_ACC, vote_ACC)) print("Accuracies : val = {:.1f}% / vote = {:.1f}%".format(val_ACC, vote_ACC))
return C1 return C1
@ -438,19 +447,25 @@ class ModelTrainer:
# Number of classes predicted by the model # Number of classes predicted by the model
nc_model = config.num_classes nc_model = config.num_classes
#print(nc_tot) # print(nc_tot)
#print(nc_model) # print(nc_model)
# Initiate global prediction over validation clouds # Initiate global prediction over validation clouds
if not hasattr(self, 'validation_probs'): if not hasattr(self, "validation_probs"):
self.validation_probs = [np.zeros((l.shape[0], nc_model)) self.validation_probs = [
for l in val_loader.dataset.input_labels] np.zeros((l.shape[0], nc_model))
for l in val_loader.dataset.input_labels
]
self.val_proportions = np.zeros(nc_model, dtype=np.float32) self.val_proportions = np.zeros(nc_model, dtype=np.float32)
i = 0 i = 0
for label_value in val_loader.dataset.label_values: for label_value in val_loader.dataset.label_values:
if label_value not in val_loader.dataset.ignored_labels: if label_value not in val_loader.dataset.ignored_labels:
self.val_proportions[i] = np.sum([np.sum(labels == label_value) self.val_proportions[i] = np.sum(
for labels in val_loader.dataset.validation_labels]) [
np.sum(labels == label_value)
for labels in val_loader.dataset.validation_labels
]
)
i += 1 i += 1
##################### #####################
@ -464,17 +479,15 @@ class ModelTrainer:
last_display = time.time() last_display = time.time()
mean_dt = np.zeros(1) mean_dt = np.zeros(1)
t1 = time.time() t1 = time.time()
# Start validation loop # Start validation loop
for i, batch in enumerate(val_loader): for i, batch in enumerate(val_loader):
# New time # New time
t = t[-1:] t = t[-1:]
t += [time.time()] t += [time.time()]
if 'cuda' in self.device.type: if "cuda" in self.device.type:
batch.to(self.device) batch.to(self.device)
# Forward pass # Forward pass
@ -493,16 +506,17 @@ class ModelTrainer:
i0 = 0 i0 = 0
for b_i, length in enumerate(lengths): for b_i, length in enumerate(lengths):
# Get prediction # Get prediction
target = labels[i0:i0 + length] target = labels[i0 : i0 + length]
probs = stacked_probs[i0:i0 + length] probs = stacked_probs[i0 : i0 + length]
inds = in_inds[i0:i0 + length] inds = in_inds[i0 : i0 + length]
c_i = cloud_inds[b_i] c_i = cloud_inds[b_i]
# Update current probs in whole cloud # Update current probs in whole cloud
self.validation_probs[c_i][inds] = val_smooth * self.validation_probs[c_i][inds] \ self.validation_probs[c_i][inds] = (
+ (1 - val_smooth) * probs val_smooth * self.validation_probs[c_i][inds]
+ (1 - val_smooth) * probs
)
# Stack all prediction for this epoch # Stack all prediction for this epoch
predictions.append(probs) predictions.append(probs)
@ -516,17 +530,20 @@ class ModelTrainer:
# Display # Display
if (t[-1] - last_display) > 1.0: if (t[-1] - last_display) > 1.0:
last_display = t[-1] last_display = t[-1]
message = 'Validation : {:.1f}% (timings : {:4.2f} {:4.2f})' message = "Validation : {:.1f}% (timings : {:4.2f} {:4.2f})"
print(message.format(100 * i / config.validation_size, print(
1000 * (mean_dt[0]), message.format(
1000 * (mean_dt[1]))) 100 * i / config.validation_size,
1000 * (mean_dt[0]),
1000 * (mean_dt[1]),
)
)
t2 = time.time() t2 = time.time()
# Confusions for our subparts of validation set # Confusions for our subparts of validation set
Confs = np.zeros((len(predictions), nc_tot, nc_tot), dtype=np.int32) Confs = np.zeros((len(predictions), nc_tot, nc_tot), dtype=np.int32)
for i, (probs, truth) in enumerate(zip(predictions, targets)): for i, (probs, truth) in enumerate(zip(predictions, targets)):
# Insert false columns for ignored labels # Insert false columns for ignored labels
for l_ind, label_value in enumerate(val_loader.dataset.label_values): for l_ind, label_value in enumerate(val_loader.dataset.label_values):
if label_value in val_loader.dataset.ignored_labels: if label_value in val_loader.dataset.ignored_labels:
@ -536,8 +553,9 @@ class ModelTrainer:
preds = val_loader.dataset.label_values[np.argmax(probs, axis=1)] preds = val_loader.dataset.label_values[np.argmax(probs, axis=1)]
# Confusions # Confusions
Confs[i, :, :] = fast_confusion(truth, preds, val_loader.dataset.label_values).astype(np.int32) Confs[i, :, :] = fast_confusion(
truth, preds, val_loader.dataset.label_values
).astype(np.int32)
t3 = time.time() t3 = time.time()
@ -545,7 +563,9 @@ class ModelTrainer:
C = np.sum(Confs, axis=0).astype(np.float32) C = np.sum(Confs, axis=0).astype(np.float32)
# Remove ignored labels from confusions # Remove ignored labels from confusions
for l_ind, label_value in reversed(list(enumerate(val_loader.dataset.label_values))): for l_ind, label_value in reversed(
list(enumerate(val_loader.dataset.label_values))
):
if label_value in val_loader.dataset.ignored_labels: if label_value in val_loader.dataset.ignored_labels:
C = np.delete(C, l_ind, axis=0) C = np.delete(C, l_ind, axis=0)
C = np.delete(C, l_ind, axis=1) C = np.delete(C, l_ind, axis=1)
@ -553,7 +573,6 @@ class ModelTrainer:
# Balance with real validation proportions # Balance with real validation proportions
C *= np.expand_dims(self.val_proportions / (np.sum(C, axis=1) + 1e-6), 1) C *= np.expand_dims(self.val_proportions / (np.sum(C, axis=1) + 1e-6), 1)
t4 = time.time() t4 = time.time()
# Objects IoU # Objects IoU
@ -563,15 +582,14 @@ class ModelTrainer:
# Saving (optionnal) # Saving (optionnal)
if config.saving: if config.saving:
# Name of saving file # Name of saving file
test_file = join(config.saving_path, 'val_IoUs.txt') test_file = join(config.saving_path, "val_IoUs.txt")
# Line to write: # Line to write:
line = '' line = ""
for IoU in IoUs: for IoU in IoUs:
line += '{:.3f} '.format(IoU) line += "{:.3f} ".format(IoU)
line = line + '\n' line = line + "\n"
# Write in file # Write in file
if exists(test_file): if exists(test_file):
@ -583,33 +601,36 @@ class ModelTrainer:
# Save potentials # Save potentials
if val_loader.dataset.use_potentials: if val_loader.dataset.use_potentials:
pot_path = join(config.saving_path, 'potentials') pot_path = join(config.saving_path, "potentials")
if not exists(pot_path): if not exists(pot_path):
makedirs(pot_path) makedirs(pot_path)
files = val_loader.dataset.files files = val_loader.dataset.files
for i, file_path in enumerate(files): for i, file_path in enumerate(files):
pot_points = np.array(val_loader.dataset.pot_trees[i].data, copy=False) pot_points = np.array(
cloud_name = file_path.split('/')[-1] val_loader.dataset.pot_trees[i].data, copy=False
)
cloud_name = file_path.split("/")[-1]
pot_name = join(pot_path, cloud_name) pot_name = join(pot_path, cloud_name)
pots = val_loader.dataset.potentials[i].numpy().astype(np.float32) pots = val_loader.dataset.potentials[i].numpy().astype(np.float32)
write_ply(pot_name, write_ply(
[pot_points.astype(np.float32), pots], pot_name,
['x', 'y', 'z', 'pots']) [pot_points.astype(np.float32), pots],
["x", "y", "z", "pots"],
)
t6 = time.time() t6 = time.time()
# Print instance mean # Print instance mean
mIoU = 100 * np.mean(IoUs) mIoU = 100 * np.mean(IoUs)
print('{:s} mean IoU = {:.1f}%'.format(config.dataset, mIoU)) print("{:s} mean IoU = {:.1f}%".format(config.dataset, mIoU))
# Save predicted cloud occasionally # Save predicted cloud occasionally
if config.saving and (self.epoch + 1) % config.checkpoint_gap == 0: if config.saving and (self.epoch + 1) % config.checkpoint_gap == 0:
val_path = join(config.saving_path, 'val_preds_{:d}'.format(self.epoch + 1)) val_path = join(config.saving_path, "val_preds_{:d}".format(self.epoch + 1))
if not exists(val_path): if not exists(val_path):
makedirs(val_path) makedirs(val_path)
files = val_loader.dataset.files files = val_loader.dataset.files
for i, file_path in enumerate(files): for i, file_path in enumerate(files):
# Get points # Get points
points = val_loader.dataset.load_evaluation_points(file_path) points = val_loader.dataset.load_evaluation_points(file_path)
@ -622,34 +643,36 @@ class ModelTrainer:
sub_probs = np.insert(sub_probs, l_ind, 0, axis=1) sub_probs = np.insert(sub_probs, l_ind, 0, axis=1)
# Get the predicted labels # Get the predicted labels
sub_preds = val_loader.dataset.label_values[np.argmax(sub_probs, axis=1).astype(np.int32)] sub_preds = val_loader.dataset.label_values[
np.argmax(sub_probs, axis=1).astype(np.int32)
]
# Reproject preds on the evaluations points # Reproject preds on the evaluations points
preds = (sub_preds[val_loader.dataset.test_proj[i]]).astype(np.int32) preds = (sub_preds[val_loader.dataset.test_proj[i]]).astype(np.int32)
# Path of saved validation file # Path of saved validation file
cloud_name = file_path.split('/')[-1] cloud_name = file_path.split("/")[-1]
val_name = join(val_path, cloud_name) val_name = join(val_path, cloud_name)
# Save file # Save file
labels = val_loader.dataset.validation_labels[i].astype(np.int32) labels = val_loader.dataset.validation_labels[i].astype(np.int32)
write_ply(val_name, write_ply(
[points, preds, labels], val_name, [points, preds, labels], ["x", "y", "z", "preds", "class"]
['x', 'y', 'z', 'preds', 'class']) )
# Display timings # Display timings
t7 = time.time() t7 = time.time()
if debug: if debug:
print('\n************************\n') print("\n************************\n")
print('Validation timings:') print("Validation timings:")
print('Init ...... {:.1f}s'.format(t1 - t0)) print("Init ...... {:.1f}s".format(t1 - t0))
print('Loop ...... {:.1f}s'.format(t2 - t1)) print("Loop ...... {:.1f}s".format(t2 - t1))
print('Confs ..... {:.1f}s'.format(t3 - t2)) print("Confs ..... {:.1f}s".format(t3 - t2))
print('Confs bis . {:.1f}s'.format(t4 - t3)) print("Confs bis . {:.1f}s".format(t4 - t3))
print('IoU ....... {:.1f}s'.format(t5 - t4)) print("IoU ....... {:.1f}s".format(t5 - t4))
print('Save1 ..... {:.1f}s'.format(t6 - t5)) print("Save1 ..... {:.1f}s".format(t6 - t5))
print('Save2 ..... {:.1f}s'.format(t7 - t6)) print("Save2 ..... {:.1f}s".format(t7 - t6))
print('\n************************\n') print("\n************************\n")
return return
@ -669,12 +692,11 @@ class ModelTrainer:
return return
# Choose validation smoothing parameter (0 for no smothing, 0.99 for big smoothing) # Choose validation smoothing parameter (0 for no smothing, 0.99 for big smoothing)
val_smooth = 0.95
softmax = torch.nn.Softmax(1) softmax = torch.nn.Softmax(1)
# Create folder for validation predictions # Create folder for validation predictions
if not exists (join(config.saving_path, 'val_preds')): if not exists(join(config.saving_path, "val_preds")):
makedirs(join(config.saving_path, 'val_preds')) makedirs(join(config.saving_path, "val_preds"))
# initiate the dataset validation containers # initiate the dataset validation containers
val_loader.dataset.val_points = [] val_loader.dataset.val_points = []
@ -696,17 +718,15 @@ class ModelTrainer:
last_display = time.time() last_display = time.time()
mean_dt = np.zeros(1) mean_dt = np.zeros(1)
t1 = time.time() t1 = time.time()
# Start validation loop # Start validation loop
for i, batch in enumerate(val_loader): for i, batch in enumerate(val_loader):
# New time # New time
t = t[-1:] t = t[-1:]
t += [time.time()] t += [time.time()]
if 'cuda' in self.device.type: if "cuda" in self.device.type:
batch.to(self.device) batch.to(self.device)
# Forward pass # Forward pass
@ -726,9 +746,8 @@ class ModelTrainer:
i0 = 0 i0 = 0
for b_i, length in enumerate(lengths): for b_i, length in enumerate(lengths):
# Get prediction # Get prediction
probs = stk_probs[i0:i0 + length] probs = stk_probs[i0 : i0 + length]
proj_inds = r_inds_list[b_i] proj_inds = r_inds_list[b_i]
proj_mask = r_mask_list[b_i] proj_mask = r_mask_list[b_i]
frame_labels = labels_list[b_i] frame_labels = labels_list[b_i]
@ -751,8 +770,10 @@ class ModelTrainer:
preds = val_loader.dataset.label_values[np.argmax(proj_probs, axis=1)] preds = val_loader.dataset.label_values[np.argmax(proj_probs, axis=1)]
# Save predictions in a binary file # Save predictions in a binary file
filename = '{:s}_{:07d}.npy'.format(val_loader.dataset.sequences[s_ind], f_ind) filename = "{:s}_{:07d}.npy".format(
filepath = join(config.saving_path, 'val_preds', filename) val_loader.dataset.sequences[s_ind], f_ind
)
filepath = join(config.saving_path, "val_preds", filename)
if exists(filepath): if exists(filepath):
frame_preds = np.load(filepath) frame_preds = np.load(filepath)
else: else:
@ -762,18 +783,30 @@ class ModelTrainer:
# Save some of the frame pots # Save some of the frame pots
if f_ind % 20 == 0: if f_ind % 20 == 0:
seq_path = join(val_loader.dataset.path, 'sequences', val_loader.dataset.sequences[s_ind]) seq_path = join(
velo_file = join(seq_path, 'velodyne', val_loader.dataset.frames[s_ind][f_ind] + '.bin') val_loader.dataset.path,
"sequences",
val_loader.dataset.sequences[s_ind],
)
velo_file = join(
seq_path,
"velodyne",
val_loader.dataset.frames[s_ind][f_ind] + ".bin",
)
frame_points = np.fromfile(velo_file, dtype=np.float32) frame_points = np.fromfile(velo_file, dtype=np.float32)
frame_points = frame_points.reshape((-1, 4)) frame_points = frame_points.reshape((-1, 4))
write_ply(filepath[:-4] + '_pots.ply', write_ply(
[frame_points[:, :3], frame_labels, frame_preds], filepath[:-4] + "_pots.ply",
['x', 'y', 'z', 'gt', 'pre']) [frame_points[:, :3], frame_labels, frame_preds],
["x", "y", "z", "gt", "pre"],
)
# Update validation confusions # Update validation confusions
frame_C = fast_confusion(frame_labels, frame_C = fast_confusion(
frame_preds.astype(np.int32), frame_labels,
val_loader.dataset.label_values) frame_preds.astype(np.int32),
val_loader.dataset.label_values,
)
val_loader.dataset.val_confs[s_ind][f_ind, :, :] = frame_C val_loader.dataset.val_confs[s_ind][f_ind, :, :] = frame_C
# Stack all prediction for this epoch # Stack all prediction for this epoch
@ -790,19 +823,24 @@ class ModelTrainer:
# Display # Display
if (t[-1] - last_display) > 1.0: if (t[-1] - last_display) > 1.0:
last_display = t[-1] last_display = t[-1]
message = 'Validation : {:.1f}% (timings : {:4.2f} {:4.2f})' message = "Validation : {:.1f}% (timings : {:4.2f} {:4.2f})"
print(message.format(100 * i / config.validation_size, print(
1000 * (mean_dt[0]), message.format(
1000 * (mean_dt[1]))) 100 * i / config.validation_size,
1000 * (mean_dt[0]),
1000 * (mean_dt[1]),
)
)
t2 = time.time() t2 = time.time()
# Confusions for our subparts of validation set # Confusions for our subparts of validation set
Confs = np.zeros((len(predictions), nc_tot, nc_tot), dtype=np.int32) Confs = np.zeros((len(predictions), nc_tot, nc_tot), dtype=np.int32)
for i, (preds, truth) in enumerate(zip(predictions, targets)): for i, (preds, truth) in enumerate(zip(predictions, targets)):
# Confusions # Confusions
Confs[i, :, :] = fast_confusion(truth, preds, val_loader.dataset.label_values).astype(np.int32) Confs[i, :, :] = fast_confusion(
truth, preds, val_loader.dataset.label_values
).astype(np.int32)
t3 = time.time() t3 = time.time()
@ -814,10 +852,14 @@ class ModelTrainer:
C = np.sum(Confs, axis=0).astype(np.float32) C = np.sum(Confs, axis=0).astype(np.float32)
# Balance with real validation proportions # Balance with real validation proportions
C *= np.expand_dims(val_loader.dataset.class_proportions / (np.sum(C, axis=1) + 1e-6), 1) C *= np.expand_dims(
val_loader.dataset.class_proportions / (np.sum(C, axis=1) + 1e-6), 1
)
# Remove ignored labels from confusions # Remove ignored labels from confusions
for l_ind, label_value in reversed(list(enumerate(val_loader.dataset.label_values))): for l_ind, label_value in reversed(
list(enumerate(val_loader.dataset.label_values))
):
if label_value in val_loader.dataset.ignored_labels: if label_value in val_loader.dataset.ignored_labels:
C = np.delete(C, l_ind, axis=0) C = np.delete(C, l_ind, axis=0)
C = np.delete(C, l_ind, axis=1) C = np.delete(C, l_ind, axis=1)
@ -832,19 +874,25 @@ class ModelTrainer:
t4 = time.time() t4 = time.time()
# Sum all validation confusions # Sum all validation confusions
C_tot = [np.sum(seq_C, axis=0) for seq_C in val_loader.dataset.val_confs if len(seq_C) > 0] C_tot = [
np.sum(seq_C, axis=0)
for seq_C in val_loader.dataset.val_confs
if len(seq_C) > 0
]
C_tot = np.sum(np.stack(C_tot, axis=0), axis=0) C_tot = np.sum(np.stack(C_tot, axis=0), axis=0)
if debug: if debug:
s = '\n' s = "\n"
for cc in C_tot: for cc in C_tot:
for c in cc: for c in cc:
s += '{:8.1f} '.format(c) s += "{:8.1f} ".format(c)
s += '\n' s += "\n"
print(s) print(s)
# Remove ignored labels from confusions # Remove ignored labels from confusions
for l_ind, label_value in reversed(list(enumerate(val_loader.dataset.label_values))): for l_ind, label_value in reversed(
list(enumerate(val_loader.dataset.label_values))
):
if label_value in val_loader.dataset.ignored_labels: if label_value in val_loader.dataset.ignored_labels:
C_tot = np.delete(C_tot, l_ind, axis=0) C_tot = np.delete(C_tot, l_ind, axis=0)
C_tot = np.delete(C_tot, l_ind, axis=1) C_tot = np.delete(C_tot, l_ind, axis=1)
@ -856,19 +904,17 @@ class ModelTrainer:
# Saving (optionnal) # Saving (optionnal)
if config.saving: if config.saving:
IoU_list = [IoUs, val_IoUs] IoU_list = [IoUs, val_IoUs]
file_list = ['subpart_IoUs.txt', 'val_IoUs.txt'] file_list = ["subpart_IoUs.txt", "val_IoUs.txt"]
for IoUs_to_save, IoU_file in zip(IoU_list, file_list): for IoUs_to_save, IoU_file in zip(IoU_list, file_list):
# Name of saving file # Name of saving file
test_file = join(config.saving_path, IoU_file) test_file = join(config.saving_path, IoU_file)
# Line to write: # Line to write:
line = '' line = ""
for IoU in IoUs_to_save: for IoU in IoUs_to_save:
line += '{:.3f} '.format(IoU) line += "{:.3f} ".format(IoU)
line = line + '\n' line = line + "\n"
# Write in file # Write in file
if exists(test_file): if exists(test_file):
@ -880,57 +926,22 @@ class ModelTrainer:
# Print instance mean # Print instance mean
mIoU = 100 * np.mean(IoUs) mIoU = 100 * np.mean(IoUs)
print('{:s} : subpart mIoU = {:.1f} %'.format(config.dataset, mIoU)) print("{:s} : subpart mIoU = {:.1f} %".format(config.dataset, mIoU))
mIoU = 100 * np.mean(val_IoUs) mIoU = 100 * np.mean(val_IoUs)
print('{:s} : val mIoU = {:.1f} %'.format(config.dataset, mIoU)) print("{:s} : val mIoU = {:.1f} %".format(config.dataset, mIoU))
t6 = time.time() t6 = time.time()
# Display timings # Display timings
if debug: if debug:
print('\n************************\n') print("\n************************\n")
print('Validation timings:') print("Validation timings:")
print('Init ...... {:.1f}s'.format(t1 - t0)) print("Init ...... {:.1f}s".format(t1 - t0))
print('Loop ...... {:.1f}s'.format(t2 - t1)) print("Loop ...... {:.1f}s".format(t2 - t1))
print('Confs ..... {:.1f}s'.format(t3 - t2)) print("Confs ..... {:.1f}s".format(t3 - t2))
print('IoU1 ...... {:.1f}s'.format(t4 - t3)) print("IoU1 ...... {:.1f}s".format(t4 - t3))
print('IoU2 ...... {:.1f}s'.format(t5 - t4)) print("IoU2 ...... {:.1f}s".format(t5 - t4))
print('Save ...... {:.1f}s'.format(t6 - t5)) print("Save ...... {:.1f}s".format(t6 - t5))
print('\n************************\n') print("\n************************\n")
return return

View file

@ -26,19 +26,18 @@
import torch import torch
import numpy as np import numpy as np
from sklearn.neighbors import KDTree from sklearn.neighbors import KDTree
from os import makedirs, remove, rename, listdir from os import listdir
from os.path import exists, join from os.path import join
import time import time
from mayavi import mlab from mayavi import mlab
import sys
from models.blocks import KPConv from models.blocks import KPConv
# PLY reader # PLY reader
from utils.ply import write_ply, read_ply from utils.ply import write_ply
# Configuration class # Configuration class
from utils.config import Config, bcolors from utils.config import bcolors
# ---------------------------------------------------------------------------------------------------------------------- # ----------------------------------------------------------------------------------------------------------------------
@ -49,7 +48,6 @@ from utils.config import Config, bcolors
class ModelVisualizer: class ModelVisualizer:
# Initialization methods # Initialization methods
# ------------------------------------------------------------------------------------------------------------------ # ------------------------------------------------------------------------------------------------------------------
@ -81,13 +79,13 @@ class ModelVisualizer:
checkpoint = torch.load(chkp_path) checkpoint = torch.load(chkp_path)
new_dict = {} new_dict = {}
for k, v in checkpoint['model_state_dict'].items(): for k, v in checkpoint["model_state_dict"].items():
if 'blocs' in k: if "blocs" in k:
k = k.replace('blocs', 'blocks') k = k.replace("blocs", "blocks")
new_dict[k] = v new_dict[k] = v
net.load_state_dict(new_dict) net.load_state_dict(new_dict)
self.epoch = checkpoint['epoch'] self.epoch = checkpoint["epoch"]
net.eval() net.eval()
print("\nModel state restored from {:s}.".format(chkp_path)) print("\nModel state restored from {:s}.".format(chkp_path))
@ -105,8 +103,10 @@ class ModelVisualizer:
# First choose the visualized deformations # First choose the visualized deformations
########################################## ##########################################
print('\nList of the deformable convolution available (chosen one highlighted in green)') print(
fmt_str = ' {:}{:2d} > KPConv(r={:.3f}, Din={:d}, Dout={:d}){:}' "\nList of the deformable convolution available (chosen one highlighted in green)"
)
fmt_str = " {:}{:2d} > KPConv(r={:.3f}, Din={:d}, Dout={:d}){:}"
deform_convs = [] deform_convs = []
for m in net.modules(): for m in net.modules():
if isinstance(m, KPConv) and m.deformable: if isinstance(m, KPConv) and m.deformable:
@ -114,27 +114,34 @@ class ModelVisualizer:
color = bcolors.OKGREEN color = bcolors.OKGREEN
else: else:
color = bcolors.FAIL color = bcolors.FAIL
print(fmt_str.format(color, len(deform_convs), m.radius, m.in_channels, m.out_channels, bcolors.ENDC)) print(
fmt_str.format(
color,
len(deform_convs),
m.radius,
m.in_channels,
m.out_channels,
bcolors.ENDC,
)
)
deform_convs.append(m) deform_convs.append(m)
################ ################
# Initialization # Initialization
################ ################
print('\n****************************************************\n') print("\n****************************************************\n")
# Loop variables # Loop variables
t0 = time.time() time.time()
t = [time.time()] t = [time.time()]
last_display = time.time() time.time()
mean_dt = np.zeros(1) np.zeros(1)
count = 0 count = 0
# Start training loop # Start training loop
for epoch in range(config.max_epoch): for epoch in range(config.max_epoch):
for batch in loader: for batch in loader:
################## ##################
# Processing batch # Processing batch
################## ##################
@ -143,16 +150,20 @@ class ModelVisualizer:
t = t[-1:] t = t[-1:]
t += [time.time()] t += [time.time()]
if 'cuda' in self.device.type: if "cuda" in self.device.type:
batch.to(self.device) batch.to(self.device)
# Forward pass # Forward pass
outputs = net(batch, config) net(batch, config)
original_KP = deform_convs[deform_idx].kernel_points.cpu().detach().numpy() original_KP = (
stacked_deformed_KP = deform_convs[deform_idx].deformed_KP.cpu().detach().numpy() deform_convs[deform_idx].kernel_points.cpu().detach().numpy()
)
stacked_deformed_KP = (
deform_convs[deform_idx].deformed_KP.cpu().detach().numpy()
)
count += batch.lengths[0].shape[0] count += batch.lengths[0].shape[0]
if 'cuda' in self.device.type: if "cuda" in self.device.type:
torch.cuda.synchronize(self.device) torch.cuda.synchronize(self.device)
# Find layer # Find layer
@ -171,17 +182,23 @@ class ModelVisualizer:
lookuptrees = [] lookuptrees = []
i0 = 0 i0 = 0
for b_i, length in enumerate(batch.lengths[0]): for b_i, length in enumerate(batch.lengths[0]):
in_points.append(batch.points[0][i0:i0 + length].cpu().detach().numpy()) in_points.append(
batch.points[0][i0 : i0 + length].cpu().detach().numpy()
)
if batch.features.shape[1] == 4: if batch.features.shape[1] == 4:
in_colors.append(batch.features[i0:i0 + length, 1:].cpu().detach().numpy()) in_colors.append(
batch.features[i0 : i0 + length, 1:].cpu().detach().numpy()
)
else: else:
in_colors.append(None) in_colors.append(None)
i0 += length i0 += length
i0 = 0 i0 = 0
for b_i, length in enumerate(batch.lengths[l]): for b_i, length in enumerate(batch.lengths[l]):
points.append(batch.points[l][i0:i0 + length].cpu().detach().numpy()) points.append(
deformed_KP.append(stacked_deformed_KP[i0:i0 + length]) batch.points[l][i0 : i0 + length].cpu().detach().numpy()
)
deformed_KP.append(stacked_deformed_KP[i0 : i0 + length])
lookuptrees.append(KDTree(points[-1])) lookuptrees.append(KDTree(points[-1]))
i0 += length i0 += length
@ -190,7 +207,9 @@ class ModelVisualizer:
########################### ###########################
# Create figure for features # Create figure for features
fig1 = mlab.figure('Deformations', bgcolor=(1.0, 1.0, 1.0), size=(1280, 920)) fig1 = mlab.figure(
"Deformations", bgcolor=(1.0, 1.0, 1.0), size=(1280, 920)
)
fig1.scene.parallel_projection = False fig1.scene.parallel_projection = False
# Indices # Indices
@ -204,26 +223,41 @@ class ModelVisualizer:
aim_point = np.zeros((1, 3)) aim_point = np.zeros((1, 3))
def picker_callback(picker): def picker_callback(picker):
""" Picker callback: this get called when on pick events. """Picker callback: this get called when on pick events."""
"""
global plots, aim_point global plots, aim_point
if 'in_points' in plots: if "in_points" in plots:
if plots['in_points'].actor.actor._vtk_obj in [o._vtk_obj for o in picker.actors]: if plots["in_points"].actor.actor._vtk_obj in [
point_rez = plots['in_points'].glyph.glyph_source.glyph_source.output.points.to_array().shape[0] o._vtk_obj for o in picker.actors
]:
point_rez = (
plots["in_points"]
.glyph.glyph_source.glyph_source.output.points.to_array()
.shape[0]
)
new_point_i = int(np.floor(picker.point_id / point_rez)) new_point_i = int(np.floor(picker.point_id / point_rez))
if new_point_i < len(plots['in_points'].mlab_source.points): if new_point_i < len(plots["in_points"].mlab_source.points):
# Get closest point in the layer we are interested in # Get closest point in the layer we are interested in
aim_point = plots['in_points'].mlab_source.points[new_point_i:new_point_i + 1] aim_point = plots["in_points"].mlab_source.points[
new_point_i : new_point_i + 1
]
update_scene() update_scene()
if 'points' in plots: if "points" in plots:
if plots['points'].actor.actor._vtk_obj in [o._vtk_obj for o in picker.actors]: if plots["points"].actor.actor._vtk_obj in [
point_rez = plots['points'].glyph.glyph_source.glyph_source.output.points.to_array().shape[0] o._vtk_obj for o in picker.actors
]:
point_rez = (
plots["points"]
.glyph.glyph_source.glyph_source.output.points.to_array()
.shape[0]
)
new_point_i = int(np.floor(picker.point_id / point_rez)) new_point_i = int(np.floor(picker.point_id / point_rez))
if new_point_i < len(plots['points'].mlab_source.points): if new_point_i < len(plots["points"].mlab_source.points):
# Get closest point in the layer we are interested in # Get closest point in the layer we are interested in
aim_point = plots['points'].mlab_source.points[new_point_i:new_point_i + 1] aim_point = plots["points"].mlab_source.points[
new_point_i : new_point_i + 1
]
update_scene() update_scene()
def update_scene(): def update_scene():
@ -243,61 +277,68 @@ class ModelVisualizer:
p = points[obj_i] p = points[obj_i]
# Rescale points for visu # Rescale points for visu
p = (p * 1.5 / config.in_radius) p = p * 1.5 / config.in_radius
# Show point cloud # Show point cloud
if show_in_p <= 1: if show_in_p <= 1:
plots['points'] = mlab.points3d(p[:, 0], plots["points"] = mlab.points3d(
p[:, 1], p[:, 0],
p[:, 2], p[:, 1],
resolution=8, p[:, 2],
scale_factor=p_scale, resolution=8,
scale_mode='none', scale_factor=p_scale,
color=(0, 1, 1), scale_mode="none",
figure=fig1) color=(0, 1, 1),
figure=fig1,
)
if show_in_p >= 1: if show_in_p >= 1:
# Get points and colors # Get points and colors
in_p = in_points[obj_i] in_p = in_points[obj_i]
in_p = (in_p * 1.5 / config.in_radius) in_p = in_p * 1.5 / config.in_radius
# Color point cloud if possible # Color point cloud if possible
in_c = in_colors[obj_i] in_c = in_colors[obj_i]
if in_c is not None: if in_c is not None:
# Primitives # Primitives
scalars = np.arange(len(in_p)) # Key point: set an integer for each point scalars = np.arange(
len(in_p)
) # Key point: set an integer for each point
# Define color table (including alpha), which must be uint8 and [0,255] # Define color table (including alpha), which must be uint8 and [0,255]
colors = np.hstack((in_c, np.ones_like(in_c[:, :1]))) colors = np.hstack((in_c, np.ones_like(in_c[:, :1])))
colors = (colors * 255).astype(np.uint8) colors = (colors * 255).astype(np.uint8)
plots['in_points'] = mlab.points3d(in_p[:, 0], plots["in_points"] = mlab.points3d(
in_p[:, 1], in_p[:, 0],
in_p[:, 2], in_p[:, 1],
scalars, in_p[:, 2],
resolution=8, scalars,
scale_factor=p_scale*0.8, resolution=8,
scale_mode='none', scale_factor=p_scale * 0.8,
figure=fig1) scale_mode="none",
plots['in_points'].module_manager.scalar_lut_manager.lut.table = colors figure=fig1,
)
plots[
"in_points"
].module_manager.scalar_lut_manager.lut.table = colors
else: else:
plots["in_points"] = mlab.points3d(
plots['in_points'] = mlab.points3d(in_p[:, 0], in_p[:, 0],
in_p[:, 1], in_p[:, 1],
in_p[:, 2], in_p[:, 2],
resolution=8, resolution=8,
scale_factor=p_scale*0.8, scale_factor=p_scale * 0.8,
scale_mode='none', scale_mode="none",
figure=fig1) figure=fig1,
)
# Get KP locations # Get KP locations
rescaled_aim_point = aim_point * config.in_radius / 1.5 rescaled_aim_point = aim_point * config.in_radius / 1.5
point_i = lookuptrees[obj_i].query(rescaled_aim_point, return_distance=False)[0][0] point_i = lookuptrees[obj_i].query(
rescaled_aim_point, return_distance=False
)[0][0]
if offsets: if offsets:
KP = points[obj_i][point_i] + deformed_KP[obj_i][point_i] KP = points[obj_i][point_i] + deformed_KP[obj_i][point_i]
scals = np.ones_like(KP[:, 0]) scals = np.ones_like(KP[:, 0])
@ -305,35 +346,46 @@ class ModelVisualizer:
KP = points[obj_i][point_i] + original_KP KP = points[obj_i][point_i] + original_KP
scals = np.zeros_like(KP[:, 0]) scals = np.zeros_like(KP[:, 0])
KP = (KP * 1.5 / config.in_radius) KP = KP * 1.5 / config.in_radius
plots['KP'] = mlab.points3d(KP[:, 0],
KP[:, 1],
KP[:, 2],
scals,
colormap='autumn',
resolution=8,
scale_factor=1.2*p_scale,
scale_mode='none',
vmin=0,
vmax=1,
figure=fig1)
plots["KP"] = mlab.points3d(
KP[:, 0],
KP[:, 1],
KP[:, 2],
scals,
colormap="autumn",
resolution=8,
scale_factor=1.2 * p_scale,
scale_mode="none",
vmin=0,
vmax=1,
figure=fig1,
)
if True: if True:
plots['center'] = mlab.points3d(p[point_i, 0], plots["center"] = mlab.points3d(
p[point_i, 1], p[point_i, 0],
p[point_i, 2], p[point_i, 1],
scale_factor=1.1*p_scale, p[point_i, 2],
scale_mode='none', scale_factor=1.1 * p_scale,
color=(0, 1, 0), scale_mode="none",
figure=fig1) color=(0, 1, 0),
figure=fig1,
)
# New title # New title
plots['title'] = mlab.title(str(obj_i), color=(0, 0, 0), size=0.3, height=0.01) plots["title"] = mlab.title(
text = '<--- (press g for previous)' + 50 * ' ' + '(press h for next) --->' str(obj_i), color=(0, 0, 0), size=0.3, height=0.01
plots['text'] = mlab.text(0.01, 0.01, text, color=(0, 0, 0), width=0.98) )
plots['orient'] = mlab.orientation_axes() text = (
"<--- (press g for previous)"
+ 50 * " "
+ "(press h for next) --->"
)
plots["text"] = mlab.text(
0.01, 0.01, text, color=(0, 0, 0), width=0.98
)
plots["orient"] = mlab.orientation_axes()
# Set the saved view # Set the saved view
mlab.view(*v) mlab.view(*v)
@ -347,12 +399,10 @@ class ModelVisualizer:
# Get KP locations # Get KP locations
KP_def = points[obj_i][point_i] + deformed_KP[obj_i][point_i] KP_def = points[obj_i][point_i] + deformed_KP[obj_i][point_i]
KP_def = (KP_def * 1.5 / config.in_radius) KP_def = KP_def * 1.5 / config.in_radius
KP_def_color = (1, 0, 0)
KP_rigid = points[obj_i][point_i] + original_KP KP_rigid = points[obj_i][point_i] + original_KP
KP_rigid = (KP_rigid * 1.5 / config.in_radius) KP_rigid = KP_rigid * 1.5 / config.in_radius
KP_rigid_color = (1, 0.7, 0)
if offsets: if offsets:
t_list = np.linspace(0, 1, 150, dtype=np.float32) t_list = np.linspace(0, 1, 150, dtype=np.float32)
@ -362,10 +412,12 @@ class ModelVisualizer:
@mlab.animate(delay=10) @mlab.animate(delay=10)
def anim(): def anim():
for t in t_list: for t in t_list:
plots['KP'].mlab_source.set(x=t * KP_def[:, 0] + (1 - t) * KP_rigid[:, 0], plots["KP"].mlab_source.set(
y=t * KP_def[:, 1] + (1 - t) * KP_rigid[:, 1], x=t * KP_def[:, 0] + (1 - t) * KP_rigid[:, 0],
z=t * KP_def[:, 2] + (1 - t) * KP_rigid[:, 2], y=t * KP_def[:, 1] + (1 - t) * KP_rigid[:, 1],
scalars=t * np.ones_like(KP_def[:, 0])) z=t * KP_def[:, 2] + (1 - t) * KP_rigid[:, 2],
scalars=t * np.ones_like(KP_def[:, 0]),
)
yield yield
@ -376,58 +428,63 @@ class ModelVisualizer:
def keyboard_callback(vtk_obj, event): def keyboard_callback(vtk_obj, event):
global obj_i, point_i, offsets, p_scale, show_in_p global obj_i, point_i, offsets, p_scale, show_in_p
if vtk_obj.GetKeyCode() in ['b', 'B']: if vtk_obj.GetKeyCode() in ["b", "B"]:
p_scale /= 1.5 p_scale /= 1.5
update_scene() update_scene()
elif vtk_obj.GetKeyCode() in ['n', 'N']: elif vtk_obj.GetKeyCode() in ["n", "N"]:
p_scale *= 1.5 p_scale *= 1.5
update_scene() update_scene()
if vtk_obj.GetKeyCode() in ['g', 'G']: if vtk_obj.GetKeyCode() in ["g", "G"]:
obj_i = (obj_i - 1) % len(deformed_KP) obj_i = (obj_i - 1) % len(deformed_KP)
point_i = 0 point_i = 0
update_scene() update_scene()
elif vtk_obj.GetKeyCode() in ['h', 'H']: elif vtk_obj.GetKeyCode() in ["h", "H"]:
obj_i = (obj_i + 1) % len(deformed_KP) obj_i = (obj_i + 1) % len(deformed_KP)
point_i = 0 point_i = 0
update_scene() update_scene()
elif vtk_obj.GetKeyCode() in ['k', 'K']: elif vtk_obj.GetKeyCode() in ["k", "K"]:
offsets = not offsets offsets = not offsets
animate_kernel() animate_kernel()
elif vtk_obj.GetKeyCode() in ['z', 'Z']: elif vtk_obj.GetKeyCode() in ["z", "Z"]:
show_in_p = (show_in_p + 1) % 3 show_in_p = (show_in_p + 1) % 3
update_scene() update_scene()
elif vtk_obj.GetKeyCode() in ['0']: elif vtk_obj.GetKeyCode() in ["0"]:
print("Saving")
print('Saving')
# Find a new name # Find a new name
file_i = 0 file_i = 0
file_name = 'KP_{:03d}.ply'.format(file_i) file_name = "KP_{:03d}.ply".format(file_i)
files = [f for f in listdir('KP_clouds') if f.endswith('.ply')] files = [f for f in listdir("KP_clouds") if f.endswith(".ply")]
while file_name in files: while file_name in files:
file_i += 1 file_i += 1
file_name = 'KP_{:03d}.ply'.format(file_i) file_name = "KP_{:03d}.ply".format(file_i)
KP_deform = points[obj_i][point_i] + deformed_KP[obj_i][point_i] KP_deform = points[obj_i][point_i] + deformed_KP[obj_i][point_i]
KP_normal = points[obj_i][point_i] + original_KP KP_normal = points[obj_i][point_i] + original_KP
# Save # Save
write_ply(join('KP_clouds', file_name), write_ply(
[in_points[obj_i], in_colors[obj_i]], join("KP_clouds", file_name),
['x', 'y', 'z', 'red', 'green', 'blue']) [in_points[obj_i], in_colors[obj_i]],
write_ply(join('KP_clouds', 'KP_{:03d}_deform.ply'.format(file_i)), ["x", "y", "z", "red", "green", "blue"],
[KP_deform], )
['x', 'y', 'z']) write_ply(
write_ply(join('KP_clouds', 'KP_{:03d}_normal.ply'.format(file_i)), join("KP_clouds", "KP_{:03d}_deform.ply".format(file_i)),
[KP_normal], [KP_deform],
['x', 'y', 'z']) ["x", "y", "z"],
print('OK') )
write_ply(
join("KP_clouds", "KP_{:03d}_normal.ply".format(file_i)),
[KP_normal],
["x", "y", "z"],
)
print("OK")
return return
@ -435,7 +492,7 @@ class ModelVisualizer:
pick_func = fig1.on_mouse_pick(picker_callback) pick_func = fig1.on_mouse_pick(picker_callback)
pick_func.tolerance = 0.01 pick_func.tolerance = 0.01
update_scene() update_scene()
fig1.scene.interactor.add_observer('KeyPressEvent', keyboard_callback) fig1.scene.interactor.add_observer("KeyPressEvent", keyboard_callback)
mlab.show() mlab.show()
return return
@ -445,13 +502,12 @@ class ModelVisualizer:
def show_ModelNet_models(all_points): def show_ModelNet_models(all_points):
########################### ###########################
# Interactive visualization # Interactive visualization
########################### ###########################
# Create figure for features # Create figure for features
fig1 = mlab.figure('Models', bgcolor=(1, 1, 1), size=(1000, 800)) fig1 = mlab.figure("Models", bgcolor=(1, 1, 1), size=(1000, 800))
fig1.scene.parallel_projection = False fig1.scene.parallel_projection = False
# Indices # Indices
@ -459,7 +515,6 @@ def show_ModelNet_models(all_points):
file_i = 0 file_i = 0
def update_scene(): def update_scene():
# clear figure # clear figure
mlab.clf(fig1) mlab.clf(fig1)
@ -470,17 +525,19 @@ def show_ModelNet_models(all_points):
points = (points * 1.5 + np.array([1.0, 1.0, 1.0])) * 50.0 points = (points * 1.5 + np.array([1.0, 1.0, 1.0])) * 50.0
# Show point clouds colorized with activations # Show point clouds colorized with activations
activations = mlab.points3d(points[:, 0], mlab.points3d(
points[:, 1], points[:, 0],
points[:, 2], points[:, 1],
points[:, 2], points[:, 2],
scale_factor=3.0, points[:, 2],
scale_mode='none', scale_factor=3.0,
figure=fig1) scale_mode="none",
figure=fig1,
)
# New title # New title
mlab.title(str(file_i), color=(0, 0, 0), size=0.3, height=0.01) mlab.title(str(file_i), color=(0, 0, 0), size=0.3, height=0.01)
text = '<--- (press g for previous)' + 50 * ' ' + '(press h for next) --->' text = "<--- (press g for previous)" + 50 * " " + "(press h for next) --->"
mlab.text(0.01, 0.01, text, color=(0, 0, 0), width=0.98) mlab.text(0.01, 0.01, text, color=(0, 0, 0), width=0.98)
mlab.orientation_axes() mlab.orientation_axes()
@ -489,13 +546,11 @@ def show_ModelNet_models(all_points):
def keyboard_callback(vtk_obj, event): def keyboard_callback(vtk_obj, event):
global file_i global file_i
if vtk_obj.GetKeyCode() in ['g', 'G']: if vtk_obj.GetKeyCode() in ["g", "G"]:
file_i = (file_i - 1) % len(all_points) file_i = (file_i - 1) % len(all_points)
update_scene() update_scene()
elif vtk_obj.GetKeyCode() in ['h', 'H']: elif vtk_obj.GetKeyCode() in ["h", "H"]:
file_i = (file_i + 1) % len(all_points) file_i = (file_i + 1) % len(all_points)
update_scene() update_scene()
@ -503,29 +558,5 @@ def show_ModelNet_models(all_points):
# Draw a first plot # Draw a first plot
update_scene() update_scene()
fig1.scene.interactor.add_observer('KeyPressEvent', keyboard_callback) fig1.scene.interactor.add_observer("KeyPressEvent", keyboard_callback)
mlab.show() mlab.show()

View file

@ -22,11 +22,8 @@
# #
# Common libs # Common libs
import signal
import os import os
import numpy as np import numpy as np
import sys
import torch
# Dataset # Dataset
from datasetss.ModelNet40 import * from datasetss.ModelNet40 import *
@ -44,20 +41,25 @@ from models.architectures import KPCNN, KPFCNN
# \***************/ # \***************/
# #
def model_choice(chosen_log):
def model_choice(chosen_log):
########################### ###########################
# Call the test initializer # Call the test initializer
########################### ###########################
# Automatically retrieve the last trained model # Automatically retrieve the last trained model
if chosen_log in ['last_ModelNet40', 'last_ShapeNetPart', 'last_S3DIS']: if chosen_log in ["last_ModelNet40", "last_ShapeNetPart", "last_S3DIS"]:
# Dataset name # Dataset name
test_dataset = '_'.join(chosen_log.split('_')[1:]) test_dataset = "_".join(chosen_log.split("_")[1:])
# List all training logs # List all training logs
logs = np.sort([os.path.join('results', f) for f in os.listdir('results') if f.startswith('Log')]) logs = np.sort(
[
os.path.join("results", f)
for f in os.listdir("results")
if f.startswith("Log")
]
)
# Find the last log of asked dataset # Find the last log of asked dataset
for log in logs[::-1]: for log in logs[::-1]:
@ -67,12 +69,12 @@ def model_choice(chosen_log):
chosen_log = log chosen_log = log
break break
if chosen_log in ['last_ModelNet40', 'last_ShapeNetPart', 'last_S3DIS']: if chosen_log in ["last_ModelNet40", "last_ShapeNetPart", "last_S3DIS"]:
raise ValueError('No log of the dataset "' + test_dataset + '" found') raise ValueError('No log of the dataset "' + test_dataset + '" found')
# Check if log exists # Check if log exists
if not os.path.exists(chosen_log): if not os.path.exists(chosen_log):
raise ValueError('The given log does not exists: ' + chosen_log) raise ValueError("The given log does not exists: " + chosen_log)
return chosen_log return chosen_log
@ -83,8 +85,7 @@ def model_choice(chosen_log):
# \***************/ # \***************/
# #
if __name__ == '__main__': if __name__ == "__main__":
############################### ###############################
# Choose the model to visualize # Choose the model to visualize
############################### ###############################
@ -94,7 +95,7 @@ if __name__ == '__main__':
# > 'last_XXX': Automatically retrieve the last trained model on dataset XXX # > 'last_XXX': Automatically retrieve the last trained model on dataset XXX
# > 'results/Log_YYYY-MM-DD_HH-MM-SS': Directly provide the path of a trained model # > 'results/Log_YYYY-MM-DD_HH-MM-SS': Directly provide the path of a trained model
chosen_log = 'results/Log_2020-04-23_19-42-18' chosen_log = "results/Log_2020-04-23_19-42-18"
# Choose the index of the checkpoint to load OR None if you want to load the current checkpoint # Choose the index of the checkpoint to load OR None if you want to load the current checkpoint
chkp_idx = None chkp_idx = None
@ -110,25 +111,25 @@ if __name__ == '__main__':
############################ ############################
# Set which gpu is going to be used # Set which gpu is going to be used
GPU_ID = '0' GPU_ID = "0"
# Set GPU visible device # Set GPU visible device
os.environ['CUDA_VISIBLE_DEVICES'] = GPU_ID os.environ["CUDA_VISIBLE_DEVICES"] = GPU_ID
############### ###############
# Previous chkp # Previous chkp
############### ###############
# Find all checkpoints in the chosen training folder # Find all checkpoints in the chosen training folder
chkp_path = os.path.join(chosen_log, 'checkpoints') chkp_path = os.path.join(chosen_log, "checkpoints")
chkps = [f for f in os.listdir(chkp_path) if f[:4] == 'chkp'] chkps = [f for f in os.listdir(chkp_path) if f[:4] == "chkp"]
# Find which snapshot to restore # Find which snapshot to restore
if chkp_idx is None: if chkp_idx is None:
chosen_chkp = 'current_chkp.tar' chosen_chkp = "current_chkp.tar"
else: else:
chosen_chkp = np.sort(chkps)[chkp_idx] chosen_chkp = np.sort(chkps)[chkp_idx]
chosen_chkp = os.path.join(chosen_log, 'checkpoints', chosen_chkp) chosen_chkp = os.path.join(chosen_log, "checkpoints", chosen_chkp)
# Initialize configuration class # Initialize configuration class
config = Config() config = Config()
@ -150,53 +151,54 @@ if __name__ == '__main__':
############## ##############
print() print()
print('Data Preparation') print("Data Preparation")
print('****************') print("****************")
# Initiate dataset # Initiate dataset
if config.dataset.startswith('ModelNet40'): if config.dataset.startswith("ModelNet40"):
test_dataset = ModelNet40Dataset(config, train=False) test_dataset = ModelNet40Dataset(config, train=False)
test_sampler = ModelNet40Sampler(test_dataset) test_sampler = ModelNet40Sampler(test_dataset)
collate_fn = ModelNet40Collate collate_fn = ModelNet40Collate
elif config.dataset == 'S3DIS': elif config.dataset == "S3DIS":
test_dataset = S3DISDataset(config, set='validation', use_potentials=True) test_dataset = S3DISDataset(config, set="validation", use_potentials=True)
test_sampler = S3DISSampler(test_dataset) test_sampler = S3DISSampler(test_dataset)
collate_fn = S3DISCollate collate_fn = S3DISCollate
else: else:
raise ValueError('Unsupported dataset : ' + config.dataset) raise ValueError("Unsupported dataset : " + config.dataset)
# Data loader # Data loader
test_loader = DataLoader(test_dataset, test_loader = DataLoader(
batch_size=1, test_dataset,
sampler=test_sampler, batch_size=1,
collate_fn=collate_fn, sampler=test_sampler,
num_workers=config.input_threads, collate_fn=collate_fn,
pin_memory=True) num_workers=config.input_threads,
pin_memory=True,
)
# Calibrate samplers # Calibrate samplers
test_sampler.calibration(test_loader, verbose=True) test_sampler.calibration(test_loader, verbose=True)
print('\nModel Preparation') print("\nModel Preparation")
print('*****************') print("*****************")
# Define network model # Define network model
t1 = time.time() t1 = time.time()
if config.dataset_task == 'classification': if config.dataset_task == "classification":
net = KPCNN(config) net = KPCNN(config)
elif config.dataset_task in ['cloud_segmentation', 'slam_segmentation']: elif config.dataset_task in ["cloud_segmentation", "slam_segmentation"]:
net = KPFCNN(config, test_dataset.label_values, test_dataset.ignored_labels) net = KPFCNN(config, test_dataset.label_values, test_dataset.ignored_labels)
else: else:
raise ValueError('Unsupported dataset_task for deformation visu: ' + config.dataset_task) raise ValueError(
"Unsupported dataset_task for deformation visu: " + config.dataset_task
)
# Define a visualizer class # Define a visualizer class
visualizer = ModelVisualizer(net, config, chkp_path=chosen_chkp, on_gpu=False) visualizer = ModelVisualizer(net, config, chkp_path=chosen_chkp, on_gpu=False)
print('Done in {:.1f}s\n'.format(time.time() - t1)) print("Done in {:.1f}s\n".format(time.time() - t1))
print('\nStart visualization') print("\nStart visualization")
print('*******************') print("*******************")
# Training # Training
visualizer.show_deformable_kernels(net, test_loader, config, deform_idx) visualizer.show_deformable_kernels(net, test_loader, config, deform_idx)