diff --git a/cpp_wrappers/cpp_neighbors/setup.py b/cpp_wrappers/cpp_neighbors/setup.py index 8f53a9c..0006a89 100644 --- a/cpp_wrappers/cpp_neighbors/setup.py +++ b/cpp_wrappers/cpp_neighbors/setup.py @@ -7,22 +7,20 @@ import numpy.distutils.misc_util # Adding sources of the project # ***************************** -SOURCES = ["../cpp_utils/cloud/cloud.cpp", - "neighbors/neighbors.cpp", - "wrapper.cpp"] - -module = Extension(name="radius_neighbors", - sources=SOURCES, - extra_compile_args=['-std=c++11', - '-D_GLIBCXX_USE_CXX11_ABI=0']) - - -setup(ext_modules=[module], include_dirs=numpy.distutils.misc_util.get_numpy_include_dirs()) - - - - - +SOURCES = [ + "../cpp_utils/cloud/cloud.cpp", + "neighbors/neighbors.cpp", + "wrapper.cpp", +] +module = Extension( + name="radius_neighbors", + sources=SOURCES, + extra_compile_args=["-std=c++11", "-D_GLIBCXX_USE_CXX11_ABI=0"], +) +setup( + ext_modules=[module], + include_dirs=numpy.distutils.misc_util.get_numpy_include_dirs(), +) diff --git a/cpp_wrappers/cpp_subsampling/setup.py b/cpp_wrappers/cpp_subsampling/setup.py index 3206299..170ed62 100644 --- a/cpp_wrappers/cpp_subsampling/setup.py +++ b/cpp_wrappers/cpp_subsampling/setup.py @@ -7,22 +7,20 @@ import numpy.distutils.misc_util # Adding sources of the project # ***************************** -SOURCES = ["../cpp_utils/cloud/cloud.cpp", - "grid_subsampling/grid_subsampling.cpp", - "wrapper.cpp"] - -module = Extension(name="grid_subsampling", - sources=SOURCES, - extra_compile_args=['-std=c++11', - '-D_GLIBCXX_USE_CXX11_ABI=0']) - - -setup(ext_modules=[module], include_dirs=numpy.distutils.misc_util.get_numpy_include_dirs()) - - - - - +SOURCES = [ + "../cpp_utils/cloud/cloud.cpp", + "grid_subsampling/grid_subsampling.cpp", + "wrapper.cpp", +] +module = Extension( + name="grid_subsampling", + sources=SOURCES, + extra_compile_args=["-std=c++11", "-D_GLIBCXX_USE_CXX11_ABI=0"], +) +setup( + ext_modules=[module], + include_dirs=numpy.distutils.misc_util.get_numpy_include_dirs(), +) diff --git a/datasetss/ModelNet40.py b/datasetss/ModelNet40.py index 1caad81..be399f5 100644 --- a/datasetss/ModelNet40.py +++ b/datasetss/ModelNet40.py @@ -27,11 +27,9 @@ import time import numpy as np import pickle import torch -import math # OS functions -from os import listdir from os.path import exists, join # Dataset parent class @@ -55,53 +53,55 @@ class ModelNet40Dataset(PointCloudDataset): """ This dataset is small enough to be stored in-memory, so load all point clouds here """ - PointCloudDataset.__init__(self, 'ModelNet40') + PointCloudDataset.__init__(self, "ModelNet40") ############ # Parameters ############ # Dict from labels to names - self.label_to_names = {0: 'airplane', - 1: 'bathtub', - 2: 'bed', - 3: 'bench', - 4: 'bookshelf', - 5: 'bottle', - 6: 'bowl', - 7: 'car', - 8: 'chair', - 9: 'cone', - 10: 'cup', - 11: 'curtain', - 12: 'desk', - 13: 'door', - 14: 'dresser', - 15: 'flower_pot', - 16: 'glass_box', - 17: 'guitar', - 18: 'keyboard', - 19: 'lamp', - 20: 'laptop', - 21: 'mantel', - 22: 'monitor', - 23: 'night_stand', - 24: 'person', - 25: 'piano', - 26: 'plant', - 27: 'radio', - 28: 'range_hood', - 29: 'sink', - 30: 'sofa', - 31: 'stairs', - 32: 'stool', - 33: 'table', - 34: 'tent', - 35: 'toilet', - 36: 'tv_stand', - 37: 'vase', - 38: 'wardrobe', - 39: 'xbox'} + self.label_to_names = { + 0: "airplane", + 1: "bathtub", + 2: "bed", + 3: "bench", + 4: "bookshelf", + 5: "bottle", + 6: "bowl", + 7: "car", + 8: "chair", + 9: "cone", + 10: "cup", + 11: "curtain", + 12: "desk", + 13: "door", + 14: "dresser", + 15: "flower_pot", + 16: "glass_box", + 17: "guitar", + 18: "keyboard", + 19: "lamp", + 20: "laptop", + 21: "mantel", + 22: "monitor", + 23: "night_stand", + 24: "person", + 25: "piano", + 26: "plant", + 27: "radio", + 28: "range_hood", + 29: "sink", + 30: "sofa", + 31: "stairs", + 32: "stool", + 33: "table", + 34: "tent", + 35: "toilet", + 36: "tv_stand", + 37: "vase", + 38: "wardrobe", + 39: "xbox", + } # Initialize a bunch of variables concerning class labels self.init_labels() @@ -110,10 +110,10 @@ class ModelNet40Dataset(PointCloudDataset): self.ignored_labels = np.array([]) # Dataset folder - self.path = './Data/ModelNet40' + self.path = "./Data/ModelNet40" # Type of task conducted on this dataset - self.dataset_task = 'classification' + self.dataset_task = "classification" # Update number of class and data task in configuration config.num_classes = self.num_classes @@ -128,22 +128,31 @@ class ModelNet40Dataset(PointCloudDataset): # Number of models and models used per epoch if self.train: self.num_models = 9843 - if config.epoch_steps and config.epoch_steps * config.batch_num < self.num_models: + if ( + config.epoch_steps + and config.epoch_steps * config.batch_num < self.num_models + ): self.epoch_n = config.epoch_steps * config.batch_num else: self.epoch_n = self.num_models else: self.num_models = 2468 - self.epoch_n = min(self.num_models, config.validation_size * config.batch_num) + self.epoch_n = min( + self.num_models, config.validation_size * config.batch_num + ) ############# # Load models ############# if 0 < self.config.first_subsampling_dl <= 0.01: - raise ValueError('subsampling_parameter too low (should be over 1 cm') + raise ValueError("subsampling_parameter too low (should be over 1 cm") - self.input_points, self.input_normals, self.input_labels = self.load_subsampled_clouds(orient_correction) + ( + self.input_points, + self.input_normals, + self.input_labels, + ) = self.load_subsampled_clouds(orient_correction) return @@ -171,7 +180,6 @@ class ModelNet40Dataset(PointCloudDataset): R_list = [] for p_i in idx_list: - # Get points and labels points = self.input_points[p_i].astype(np.float32) normals = self.input_normals[p_i].astype(np.float32) @@ -192,7 +200,7 @@ class ModelNet40Dataset(PointCloudDataset): # Concatenate batch ################### - #show_ModelNet_examples(tp_list, cloud_normals=tn_list) + # show_ModelNet_examples(tp_list, cloud_normals=tn_list) stacked_points = np.concatenate(tp_list, axis=0) stacked_normals = np.concatenate(tn_list, axis=0) @@ -209,7 +217,9 @@ class ModelNet40Dataset(PointCloudDataset): elif self.config.in_features_dim == 4: stacked_features = np.hstack((stacked_features, stacked_normals)) else: - raise ValueError('Only accepted input dimensions are 1, 4 and 7 (without and with XYZ)') + raise ValueError( + "Only accepted input dimensions are 1, 4 and 7 (without and with XYZ)" + ) ####################### # Create network inputs @@ -219,10 +229,9 @@ class ModelNet40Dataset(PointCloudDataset): # # Get the whole input list - input_list = self.classification_inputs(stacked_points, - stacked_features, - labels, - stack_lengths) + input_list = self.classification_inputs( + stacked_points, stacked_features, labels, stack_lengths + ) # Add scale and rotation for testing input_list += [scales, rots, model_inds] @@ -230,31 +239,38 @@ class ModelNet40Dataset(PointCloudDataset): return input_list def load_subsampled_clouds(self, orient_correction): - # Restart timer t0 = time.time() # Load wanted points if possible if self.train: - split ='training' + split = "training" else: - split = 'test' + split = "test" - print('\nLoading {:s} points subsampled at {:.3f}'.format(split, self.config.first_subsampling_dl)) - filename = join(self.path, '{:s}_{:.3f}_record.pkl'.format(split, self.config.first_subsampling_dl)) + print( + "\nLoading {:s} points subsampled at {:.3f}".format( + split, self.config.first_subsampling_dl + ) + ) + filename = join( + self.path, + "{:s}_{:.3f}_record.pkl".format(split, self.config.first_subsampling_dl), + ) if exists(filename): - with open(filename, 'rb') as file: + with open(filename, "rb") as file: input_points, input_normals, input_labels = pickle.load(file) # Else compute them from original points else: - # Collect training file names if self.train: - names = np.loadtxt(join(self.path, 'modelnet40_train.txt'), dtype=np.str) + names = np.loadtxt( + join(self.path, "modelnet40_train.txt"), dtype=np.str + ) else: - names = np.loadtxt(join(self.path, 'modelnet40_test.txt'), dtype=np.str) + names = np.loadtxt(join(self.path, "modelnet40_test.txt"), dtype=np.str) # Initialize containers input_points = [] @@ -263,49 +279,54 @@ class ModelNet40Dataset(PointCloudDataset): # Advanced display N = len(names) progress_n = 30 - fmt_str = '[{:<' + str(progress_n) + '}] {:5.1f}%' + fmt_str = "[{:<" + str(progress_n) + "}] {:5.1f}%" # Collect point clouds for i, cloud_name in enumerate(names): - # Read points - class_folder = '_'.join(cloud_name.split('_')[:-1]) - txt_file = join(self.path, class_folder, cloud_name) + '.txt' - data = np.loadtxt(txt_file, delimiter=',', dtype=np.float32) + class_folder = "_".join(cloud_name.split("_")[:-1]) + txt_file = join(self.path, class_folder, cloud_name) + ".txt" + data = np.loadtxt(txt_file, delimiter=",", dtype=np.float32) # Subsample them if self.config.first_subsampling_dl > 0: - points, normals = grid_subsampling(data[:, :3], - features=data[:, 3:], - sampleDl=self.config.first_subsampling_dl) + points, normals = grid_subsampling( + data[:, :3], + features=data[:, 3:], + sampleDl=self.config.first_subsampling_dl, + ) else: points = data[:, :3] normals = data[:, 3:] - print('', end='\r') - print(fmt_str.format('#' * ((i * progress_n) // N), 100 * i / N), end='', flush=True) + print("", end="\r") + print( + fmt_str.format("#" * ((i * progress_n) // N), 100 * i / N), + end="", + flush=True, + ) # Add to list input_points += [points] input_normals += [normals] - print('', end='\r') - print(fmt_str.format('#' * progress_n, 100), end='', flush=True) + print("", end="\r") + print(fmt_str.format("#" * progress_n, 100), end="", flush=True) print() # Get labels - label_names = ['_'.join(name.split('_')[:-1]) for name in names] + label_names = ["_".join(name.split("_")[:-1]) for name in names] input_labels = np.array([self.name_to_label[name] for name in label_names]) # Save for later use - with open(filename, 'wb') as file: - pickle.dump((input_points, - input_normals, - input_labels), file) + with open(filename, "wb") as file: + pickle.dump((input_points, input_normals, input_labels), file) lengths = [p.shape[0] for p in input_points] sizes = [l * 4 * 6 for l in lengths] - print('{:.1f} MB loaded in {:.1f}s'.format(np.sum(sizes) * 1e-6, time.time() - t0)) + print( + "{:.1f} MB loaded in {:.1f}s".format(np.sum(sizes) * 1e-6, time.time() - t0) + ) if orient_correction: input_points = [pp[:, [0, 2, 1]] for pp in input_points] @@ -313,6 +334,7 @@ class ModelNet40Dataset(PointCloudDataset): return input_points, input_normals, input_labels + # ---------------------------------------------------------------------------------------------------------------------- # # Utility classes definition @@ -322,7 +344,9 @@ class ModelNet40Dataset(PointCloudDataset): class ModelNet40Sampler(Sampler): """Sampler for ModelNet40""" - def __init__(self, dataset: ModelNet40Dataset, use_potential=True, balance_labels=False): + def __init__( + self, dataset: ModelNet40Dataset, use_potential=True, balance_labels=False + ): Sampler.__init__(self, dataset) # Does the sampler use potential for regular sampling @@ -356,18 +380,18 @@ class ModelNet40Sampler(Sampler): if self.use_potential: if self.balance_labels: - gen_indices = [] pick_n = self.dataset.epoch_n // self.dataset.num_classes + 1 for i, l in enumerate(self.dataset.label_values): - # Get the potentials of the objects of this class label_inds = np.where(np.equal(self.dataset.input_labels, l))[0] class_potentials = self.potentials[label_inds] # Get the indices to generate thanks to potentials if pick_n < class_potentials.shape[0]: - pick_indices = np.argpartition(class_potentials, pick_n)[:pick_n] + pick_indices = np.argpartition(class_potentials, pick_n)[ + :pick_n + ] else: pick_indices = np.random.permutation(class_potentials.shape[0]) class_indices = label_inds[pick_indices] @@ -377,17 +401,20 @@ class ModelNet40Sampler(Sampler): gen_indices = np.random.permutation(np.hstack(gen_indices)) else: - # Get indices with the minimum potential if self.dataset.epoch_n < self.potentials.shape[0]: - gen_indices = np.argpartition(self.potentials, self.dataset.epoch_n)[:self.dataset.epoch_n] + gen_indices = np.argpartition( + self.potentials, self.dataset.epoch_n + )[: self.dataset.epoch_n] else: gen_indices = np.random.permutation(self.potentials.shape[0]) gen_indices = np.random.permutation(gen_indices) # Update potentials (Change the order for the next epoch) self.potentials[gen_indices] = np.ceil(self.potentials[gen_indices]) - self.potentials[gen_indices] += np.random.rand(gen_indices.shape[0]) * 0.1 + 0.1 + self.potentials[gen_indices] += ( + np.random.rand(gen_indices.shape[0]) * 0.1 + 0.1 + ) else: if self.balance_labels: @@ -399,7 +426,9 @@ class ModelNet40Sampler(Sampler): gen_indices += [rand_inds] gen_indices = np.random.permutation(np.hstack(gen_indices)) else: - gen_indices = np.random.permutation(self.dataset.num_models)[:self.dataset.epoch_n] + gen_indices = np.random.permutation(self.dataset.num_models)[ + : self.dataset.epoch_n + ] ################ # Generator loop @@ -411,7 +440,6 @@ class ModelNet40Sampler(Sampler): # Generator loop for p_i in gen_indices: - # Size of picked cloud n = self.dataset.input_points[p_i].shape[0] @@ -450,7 +478,7 @@ class ModelNet40Sampler(Sampler): # Previously saved calibration ############################## - print('\nStarting Calibration (use verbose=True for more details)') + print("\nStarting Calibration (use verbose=True for more details)") t0 = time.time() redo = False @@ -459,39 +487,40 @@ class ModelNet40Sampler(Sampler): # *********** # Load batch_limit dictionary - batch_lim_file = join(self.dataset.path, 'batch_limits.pkl') + batch_lim_file = join(self.dataset.path, "batch_limits.pkl") if exists(batch_lim_file): - with open(batch_lim_file, 'rb') as file: + with open(batch_lim_file, "rb") as file: batch_lim_dict = pickle.load(file) else: batch_lim_dict = {} # Check if the batch limit associated with current parameters exists - key = '{:.3f}_{:d}'.format(self.dataset.config.first_subsampling_dl, - self.dataset.config.batch_num) + key = "{:.3f}_{:d}".format( + self.dataset.config.first_subsampling_dl, self.dataset.config.batch_num + ) if key in batch_lim_dict: self.batch_limit = batch_lim_dict[key] else: redo = True if verbose: - print('\nPrevious calibration found:') - print('Check batch limit dictionary') + print("\nPrevious calibration found:") + print("Check batch limit dictionary") if key in batch_lim_dict: color = bcolors.OKGREEN v = str(int(batch_lim_dict[key])) else: color = bcolors.FAIL - v = '?' - print('{:}\"{:s}\": {:s}{:}'.format(color, key, v, bcolors.ENDC)) + v = "?" + print('{:}"{:s}": {:s}{:}'.format(color, key, v, bcolors.ENDC)) # Neighbors limit # *************** # Load neighb_limits dictionary - neighb_lim_file = join(self.dataset.path, 'neighbors_limits.pkl') + neighb_lim_file = join(self.dataset.path, "neighbors_limits.pkl") if exists(neighb_lim_file): - with open(neighb_lim_file, 'rb') as file: + with open(neighb_lim_file, "rb") as file: neighb_lim_dict = pickle.load(file) else: neighb_lim_dict = {} @@ -499,14 +528,13 @@ class ModelNet40Sampler(Sampler): # Check if the limit associated with current parameters exists (for each layer) neighb_limits = [] for layer_ind in range(self.dataset.config.num_layers): - dl = self.dataset.config.first_subsampling_dl * (2**layer_ind) if self.dataset.config.deform_layers[layer_ind]: r = dl * self.dataset.config.deform_radius else: r = dl * self.dataset.config.conv_radius - key = '{:.3f}_{:.3f}'.format(dl, r) + key = "{:.3f}_{:.3f}".format(dl, r) if key in neighb_lim_dict: neighb_limits += [neighb_lim_dict[key]] @@ -516,34 +544,37 @@ class ModelNet40Sampler(Sampler): redo = True if verbose: - print('Check neighbors limit dictionary') + print("Check neighbors limit dictionary") for layer_ind in range(self.dataset.config.num_layers): dl = self.dataset.config.first_subsampling_dl * (2**layer_ind) if self.dataset.config.deform_layers[layer_ind]: r = dl * self.dataset.config.deform_radius else: r = dl * self.dataset.config.conv_radius - key = '{:.3f}_{:.3f}'.format(dl, r) + key = "{:.3f}_{:.3f}".format(dl, r) if key in neighb_lim_dict: color = bcolors.OKGREEN v = str(neighb_lim_dict[key]) else: color = bcolors.FAIL - v = '?' - print('{:}\"{:s}\": {:s}{:}'.format(color, key, v, bcolors.ENDC)) + v = "?" + print('{:}"{:s}": {:s}{:}'.format(color, key, v, bcolors.ENDC)) if redo: - ############################ # Neighbors calib parameters ############################ # From config parameter, compute higher bound of neighbors number in a neighborhood - hist_n = int(np.ceil(4 / 3 * np.pi * (self.dataset.config.conv_radius + 1) ** 3)) + hist_n = int( + np.ceil(4 / 3 * np.pi * (self.dataset.config.conv_radius + 1) ** 3) + ) # Histogram of neighborhood sizes - neighb_hists = np.zeros((self.dataset.config.num_layers, hist_n), dtype=np.int32) + neighb_hists = np.zeros( + (self.dataset.config.num_layers, hist_n), dtype=np.int32 + ) ######################## # Batch calib parameters @@ -573,9 +604,11 @@ class ModelNet40Sampler(Sampler): for epoch in range(10): for batch_i, batch in enumerate(dataloader): - # Update neighborhood histogram - counts = [np.sum(neighb_mat.numpy() < neighb_mat.shape[0], axis=1) for neighb_mat in batch.neighbors] + counts = [ + np.sum(neighb_mat.numpy() < neighb_mat.shape[0], axis=1) + for neighb_mat in batch.neighbors + ] hists = [np.bincount(c, minlength=hist_n)[:hist_n] for c in counts] neighb_hists += np.vstack(hists) @@ -612,69 +645,68 @@ class ModelNet40Sampler(Sampler): # Console display (only one per second) if verbose and (t - last_display) > 1.0: last_display = t - message = 'Step {:5d} estim_b ={:5.2f} batch_limit ={:7d}' - print(message.format(i, - estim_b, - int(self.batch_limit))) + message = "Step {:5d} estim_b ={:5.2f} batch_limit ={:7d}" + print(message.format(i, estim_b, int(self.batch_limit))) if breaking: break # Use collected neighbor histogram to get neighbors limit cumsum = np.cumsum(neighb_hists.T, axis=0) - percentiles = np.sum(cumsum < (untouched_ratio * cumsum[hist_n - 1, :]), axis=0) + percentiles = np.sum( + cumsum < (untouched_ratio * cumsum[hist_n - 1, :]), axis=0 + ) self.dataset.neighborhood_limits = percentiles if verbose: - # Crop histogram while np.sum(neighb_hists[:, -1]) == 0: neighb_hists = neighb_hists[:, :-1] hist_n = neighb_hists.shape[1] - print('\n**************************************************\n') - line0 = 'neighbors_num ' + print("\n**************************************************\n") + line0 = "neighbors_num " for layer in range(neighb_hists.shape[0]): - line0 += '| layer {:2d} '.format(layer) + line0 += "| layer {:2d} ".format(layer) print(line0) for neighb_size in range(hist_n): - line0 = ' {:4d} '.format(neighb_size) + line0 = " {:4d} ".format(neighb_size) for layer in range(neighb_hists.shape[0]): if neighb_size > percentiles[layer]: color = bcolors.FAIL else: color = bcolors.OKGREEN - line0 += '|{:}{:10d}{:} '.format(color, - neighb_hists[layer, neighb_size], - bcolors.ENDC) + line0 += "|{:}{:10d}{:} ".format( + color, neighb_hists[layer, neighb_size], bcolors.ENDC + ) print(line0) - print('\n**************************************************\n') - print('\nchosen neighbors limits: ', percentiles) + print("\n**************************************************\n") + print("\nchosen neighbors limits: ", percentiles) print() # Save batch_limit dictionary - key = '{:.3f}_{:d}'.format(self.dataset.config.first_subsampling_dl, - self.dataset.config.batch_num) + key = "{:.3f}_{:d}".format( + self.dataset.config.first_subsampling_dl, self.dataset.config.batch_num + ) batch_lim_dict[key] = self.batch_limit - with open(batch_lim_file, 'wb') as file: + with open(batch_lim_file, "wb") as file: pickle.dump(batch_lim_dict, file) # Save neighb_limit dictionary for layer_ind in range(self.dataset.config.num_layers): - dl = self.dataset.config.first_subsampling_dl * (2 ** layer_ind) + dl = self.dataset.config.first_subsampling_dl * (2**layer_ind) if self.dataset.config.deform_layers[layer_ind]: r = dl * self.dataset.config.deform_radius else: r = dl * self.dataset.config.conv_radius - key = '{:.3f}_{:.3f}'.format(dl, r) + key = "{:.3f}_{:.3f}".format(dl, r) neighb_lim_dict[key] = self.dataset.neighborhood_limits[layer_ind] - with open(neighb_lim_file, 'wb') as file: + with open(neighb_lim_file, "wb") as file: pickle.dump(neighb_lim_dict, file) - - print('Calibration done in {:.1f}s\n'.format(time.time() - t0)) + print("Calibration done in {:.1f}s\n".format(time.time() - t0)) return @@ -682,7 +714,6 @@ class ModelNet40CustomBatch: """Custom batch definition with memory pinning for ModelNet40""" def __init__(self, input_list): - # Get rid of batch dimension input_list = input_list[0] @@ -691,13 +722,21 @@ class ModelNet40CustomBatch: # Extract input tensors from the list of numpy array ind = 0 - self.points = [torch.from_numpy(nparray) for nparray in input_list[ind:ind+L]] + self.points = [ + torch.from_numpy(nparray) for nparray in input_list[ind : ind + L] + ] ind += L - self.neighbors = [torch.from_numpy(nparray) for nparray in input_list[ind:ind+L]] + self.neighbors = [ + torch.from_numpy(nparray) for nparray in input_list[ind : ind + L] + ] ind += L - self.pools = [torch.from_numpy(nparray) for nparray in input_list[ind:ind+L]] + self.pools = [ + torch.from_numpy(nparray) for nparray in input_list[ind : ind + L] + ] ind += L - self.lengths = [torch.from_numpy(nparray) for nparray in input_list[ind:ind+L]] + self.lengths = [ + torch.from_numpy(nparray) for nparray in input_list[ind : ind + L] + ] ind += L self.features = torch.from_numpy(input_list[ind]) ind += 1 @@ -729,7 +768,6 @@ class ModelNet40CustomBatch: return self def to(self, device): - self.points = [in_tensor.to(device) for in_tensor in self.points] self.neighbors = [in_tensor.to(device) for in_tensor in self.neighbors] self.pools = [in_tensor.to(device) for in_tensor in self.pools] @@ -744,15 +782,15 @@ class ModelNet40CustomBatch: def unstack_points(self, layer=None): """Unstack the points""" - return self.unstack_elements('points', layer) + return self.unstack_elements("points", layer) def unstack_neighbors(self, layer=None): """Unstack the neighbors indices""" - return self.unstack_elements('neighbors', layer) + return self.unstack_elements("neighbors", layer) def unstack_pools(self, layer=None): """Unstack the pooling indices""" - return self.unstack_elements('pools', layer) + return self.unstack_elements("pools", layer) def unstack_elements(self, element_name, layer=None, to_numpy=True): """ @@ -760,34 +798,31 @@ class ModelNet40CustomBatch: layers """ - if element_name == 'points': + if element_name == "points": elements = self.points - elif element_name == 'neighbors': + elif element_name == "neighbors": elements = self.neighbors - elif element_name == 'pools': + elif element_name == "pools": elements = self.pools[:-1] else: - raise ValueError('Unknown element name: {:s}'.format(element_name)) + raise ValueError("Unknown element name: {:s}".format(element_name)) all_p_list = [] for layer_i, layer_elems in enumerate(elements): - if layer is None or layer == layer_i: - i0 = 0 p_list = [] - if element_name == 'pools': - lengths = self.lengths[layer_i+1] + if element_name == "pools": + lengths = self.lengths[layer_i + 1] else: lengths = self.lengths[layer_i] for b_i, length in enumerate(lengths): - - elem = layer_elems[i0:i0 + length] - if element_name == 'neighbors': + elem = layer_elems[i0 : i0 + length] + if element_name == "neighbors": elem[elem >= self.points[layer_i].shape[0]] = -1 elem[elem >= 0] -= i0 - elif element_name == 'pools': + elif element_name == "pools": elem[elem >= self.points[layer_i].shape[0]] = -1 elem[elem >= 0] -= torch.sum(self.lengths[layer_i][:b_i]) i0 += length @@ -819,16 +854,15 @@ def debug_sampling(dataset, sampler, loader): """Shows which labels are sampled according to strategy chosen""" label_sum = np.zeros((dataset.num_classes), dtype=np.int32) for epoch in range(10): - for batch_i, (points, normals, labels, indices, in_sizes) in enumerate(loader): # print(batch_i, tuple(points.shape), tuple(normals.shape), labels, indices, in_sizes) label_sum += np.bincount(labels.numpy(), minlength=dataset.num_classes) print(label_sum) - #print(sampler.potentials[:6]) + # print(sampler.potentials[:6]) - print('******************') - print('*******************************************') + print("******************") + print("*******************************************") _, counts = np.unique(dataset.input_labels, return_counts=True) print(counts) @@ -843,7 +877,6 @@ def debug_timing(dataset, sampler, loader): estim_b = dataset.config.batch_num for epoch in range(10): - for batch_i, batch in enumerate(loader): # print(batch_i, tuple(points.shape), tuple(normals.shape), labels, indices, in_sizes) @@ -864,56 +897,49 @@ def debug_timing(dataset, sampler, loader): # Console display (only one per second) if (t[-1] - last_display) > -1.0: last_display = t[-1] - message = 'Step {:08d} -> (ms/batch) {:8.2f} {:8.2f} / batch = {:.2f}' - print(message.format(batch_i, - 1000 * mean_dt[0], - 1000 * mean_dt[1], - estim_b)) + message = "Step {:08d} -> (ms/batch) {:8.2f} {:8.2f} / batch = {:.2f}" + print( + message.format( + batch_i, 1000 * mean_dt[0], 1000 * mean_dt[1], estim_b + ) + ) - print('************* Epoch ended *************') + print("************* Epoch ended *************") _, counts = np.unique(dataset.input_labels, return_counts=True) print(counts) def debug_show_clouds(dataset, sampler, loader): - - for epoch in range(10): - - clouds = [] - cloud_normals = [] - cloud_labels = [] - L = dataset.config.num_layers for batch_i, batch in enumerate(loader): - # Print characteristics of input tensors - print('\nPoints tensors') + print("\nPoints tensors") for i in range(L): print(batch.points[i].dtype, batch.points[i].shape) - print('\nNeigbors tensors') + print("\nNeigbors tensors") for i in range(L): print(batch.neighbors[i].dtype, batch.neighbors[i].shape) - print('\nPools tensors') + print("\nPools tensors") for i in range(L): print(batch.pools[i].dtype, batch.pools[i].shape) - print('\nStack lengths') + print("\nStack lengths") for i in range(L): print(batch.lengths[i].dtype, batch.lengths[i].shape) - print('\nFeatures') + print("\nFeatures") print(batch.features.dtype, batch.features.shape) - print('\nLabels') + print("\nLabels") print(batch.labels.dtype, batch.labels.shape) - print('\nAugment Scales') + print("\nAugment Scales") print(batch.scales.dtype, batch.scales.shape) - print('\nAugment Rotations') + print("\nAugment Rotations") print(batch.rots.dtype, batch.rots.shape) - print('\nModel indices') + print("\nModel indices") print(batch.model_inds.dtype, batch.model_inds.shape) - print('\nAre input tensors pinned') + print("\nAre input tensors pinned") print(batch.neighbors[0].is_pinned()) print(batch.neighbors[-1].is_pinned()) print(batch.points[0].is_pinned()) @@ -925,7 +951,7 @@ def debug_show_clouds(dataset, sampler, loader): show_input_batch(batch) - print('*******************************************') + print("*******************************************") _, counts = np.unique(dataset.input_labels, return_counts=True) print(counts) @@ -939,7 +965,6 @@ def debug_batch_and_neighbors_calib(dataset, sampler, loader): mean_dt = np.zeros(2) for epoch in range(10): - for batch_i, input_list in enumerate(loader): # print(batch_i, tuple(points.shape), tuple(normals.shape), labels, indices, in_sizes) @@ -957,12 +982,10 @@ def debug_batch_and_neighbors_calib(dataset, sampler, loader): # Console display (only one per second) if (t[-1] - last_display) > 1.0: last_display = t[-1] - message = 'Step {:08d} -> Average timings (ms/batch) {:8.2f} {:8.2f} ' - print(message.format(batch_i, - 1000 * mean_dt[0], - 1000 * mean_dt[1])) + message = "Step {:08d} -> Average timings (ms/batch) {:8.2f} {:8.2f} " + print(message.format(batch_i, 1000 * mean_dt[0], 1000 * mean_dt[1])) - print('************* Epoch ended *************') + print("************* Epoch ended *************") _, counts = np.unique(dataset.input_labels, return_counts=True) print(counts) @@ -976,7 +999,6 @@ class ModelNet40WorkerInitDebug: return def __call__(self, worker_id): - # Print workers info worker_info = get_worker_info() print(worker_info) @@ -985,11 +1007,10 @@ class ModelNet40WorkerInitDebug: dataset = worker_info.dataset # the dataset copy in this worker process # In windows, each worker has its own copy of the dataset. In Linux, this is shared in memory - print(dataset.input_labels.__array_interface__['data']) - print(worker_info.dataset.input_labels.__array_interface__['data']) - print(self.dataset.input_labels.__array_interface__['data']) + print(dataset.input_labels.__array_interface__["data"]) + print(worker_info.dataset.input_labels.__array_interface__["data"]) + print(self.dataset.input_labels.__array_interface__["data"]) # configure the dataset to only process the split workload return - diff --git a/datasetss/NPM3D.py b/datasetss/NPM3D.py index 87700f3..4a5be4b 100644 --- a/datasetss/NPM3D.py +++ b/datasetss/NPM3D.py @@ -27,13 +27,11 @@ import time import numpy as np import pickle import torch -import math import warnings from multiprocessing import Lock # OS functions -from os import listdir -from os.path import exists, join, isdir +from os.path import exists, join # Dataset parent class from datasetss.common import PointCloudDataset @@ -53,28 +51,29 @@ from utils.config import bcolors class NPM3DDataset(PointCloudDataset): """Class to handle NPM3D dataset.""" - def __init__(self, config, set='training', use_potentials=True, load_data=True): + def __init__(self, config, set="training", use_potentials=True, load_data=True): """ This dataset is small enough to be stored in-memory, so load all point clouds here """ - PointCloudDataset.__init__(self, 'NPM3D') + PointCloudDataset.__init__(self, "NPM3D") ############ # Parameters ############ # Dict from labels to names - self.label_to_names = {0: 'unclassified', - 1: 'ground', - 2: 'building', - 3: 'pole', # pole - road sign - traffic light - 4: 'bollard', # bollard - small pole - 5: 'trash', # trash can - 6: 'barrier', - 7: 'pedestrian', - 8: 'car', - 9: 'natural' # natural - vegetation - } + self.label_to_names = { + 0: "unclassified", + 1: "ground", + 2: "building", + 3: "pole", # pole - road sign - traffic light + 4: "bollard", # bollard - small pole + 5: "trash", # trash can + 6: "barrier", + 7: "pedestrian", + 8: "car", + 9: "natural", # natural - vegetation + } # Initialize a bunch of variables concerning class labels self.init_labels() @@ -83,10 +82,10 @@ class NPM3DDataset(PointCloudDataset): self.ignored_labels = np.array([0]) # Dataset folder - self.path = './Data/Paris' + self.path = "./Data/Paris" # Type of task conducted on this dataset - self.dataset_task = 'cloud_segmentation' + self.dataset_task = "cloud_segmentation" # Update number of class and data task in configuration config.num_classes = self.num_classes - len(self.ignored_labels) @@ -103,14 +102,22 @@ class NPM3DDataset(PointCloudDataset): # Path of the training files # self.train_path = 'original_ply' - self.train_path = 'train' - self.original_ply_path = 'original_ply' + self.train_path = "train" + self.original_ply_path = "original_ply" # List of files to process ply_path = join(self.path, self.train_path) # Proportion of validation scenes - self.cloud_names = ['Lille1_1', 'Lille1_2', 'Lille2', 'Paris', 'ajaccio_2', 'ajaccio_57', 'dijon_9'] + self.cloud_names = [ + "Lille1_1", + "Lille1_2", + "Lille2", + "Paris", + "ajaccio_2", + "ajaccio_57", + "dijon_9", + ] self.all_splits = [0, 1, 2, 3, 4, 5, 6] self.validation_split = 1 # self.test_cloud_names = ['ajaccio_2', 'ajaccio_57', 'dijon_9'] @@ -118,12 +125,12 @@ class NPM3DDataset(PointCloudDataset): self.train_splits = [0, 2, 3] # Number of models used per epoch - if self.set == 'training': + if self.set == "training": self.epoch_n = config.epoch_steps * config.batch_num - elif self.set in ['validation', 'test', 'ERF']: + elif self.set in ["validation", "test", "ERF"]: self.epoch_n = config.validation_size * config.batch_num else: - raise ValueError('Unknown set for NPM3D data: ', self.set) + raise ValueError("Unknown set for NPM3D data: ", self.set) # Stop data is not needed if not load_data: @@ -142,32 +149,41 @@ class NPM3DDataset(PointCloudDataset): # List of training files self.files = [] for i, f in enumerate(self.cloud_names): - if self.set == 'training': + if self.set == "training": if self.all_splits[i] in self.train_splits: - self.files += [join(ply_path, f + '.ply')] - elif self.set in ['validation', 'ERF']: + self.files += [join(ply_path, f + ".ply")] + elif self.set in ["validation", "ERF"]: if self.all_splits[i] == self.validation_split: - self.files += [join(ply_path, f + '.ply')] - elif self.set == 'test': + self.files += [join(ply_path, f + ".ply")] + elif self.set == "test": if self.all_splits[i] in self.test_splits: - self.files += [join(ply_path, f + '.ply')] + self.files += [join(ply_path, f + ".ply")] else: - raise ValueError('Unknown set for NPM3D data: ', self.set) - print('The set is ' + str(self.set)) + raise ValueError("Unknown set for NPM3D data: ", self.set) + print("The set is " + str(self.set)) - if self.set == 'training': - self.cloud_names = [f for i, f in enumerate(self.cloud_names) - if self.all_splits[i] in self.train_splits] - elif self.set in ['validation', 'ERF']: - self.cloud_names = [f for i, f in enumerate(self.cloud_names) - if self.all_splits[i] == self.validation_split] - elif self.set == 'test': - self.cloud_names = [f for i, f in enumerate(self.cloud_names) - if self.all_splits[i] in self.test_splits] - print('The files are ' + str(self.cloud_names)) + if self.set == "training": + self.cloud_names = [ + f + for i, f in enumerate(self.cloud_names) + if self.all_splits[i] in self.train_splits + ] + elif self.set in ["validation", "ERF"]: + self.cloud_names = [ + f + for i, f in enumerate(self.cloud_names) + if self.all_splits[i] == self.validation_split + ] + elif self.set == "test": + self.cloud_names = [ + f + for i, f in enumerate(self.cloud_names) + if self.all_splits[i] in self.test_splits + ] + print("The files are " + str(self.cloud_names)) if 0 < self.config.first_subsampling_dl <= 0.01: - raise ValueError('subsampling_parameter too low (should be over 1 cm') + raise ValueError("subsampling_parameter too low (should be over 1 cm") # Initiate containers self.input_trees = [] @@ -195,20 +211,28 @@ class NPM3DDataset(PointCloudDataset): self.min_potentials = [] self.argmin_potentials = [] for i, tree in enumerate(self.pot_trees): - self.potentials += [torch.from_numpy(np.random.rand(tree.data.shape[0]) * 1e-3)] + self.potentials += [ + torch.from_numpy(np.random.rand(tree.data.shape[0]) * 1e-3) + ] min_ind = int(torch.argmin(self.potentials[-1])) self.argmin_potentials += [min_ind] self.min_potentials += [float(self.potentials[-1][min_ind])] # Share potential memory - self.argmin_potentials = torch.from_numpy(np.array(self.argmin_potentials, dtype=np.int64)) - self.min_potentials = torch.from_numpy(np.array(self.min_potentials, dtype=np.float64)) + self.argmin_potentials = torch.from_numpy( + np.array(self.argmin_potentials, dtype=np.int64) + ) + self.min_potentials = torch.from_numpy( + np.array(self.min_potentials, dtype=np.float64) + ) self.argmin_potentials.share_memory_() self.min_potentials.share_memory_() for i, _ in enumerate(self.pot_trees): self.potentials[i].share_memory_() - self.worker_waiting = torch.tensor([0 for _ in range(config.input_threads)], dtype=torch.int32) + self.worker_waiting = torch.tensor( + [0 for _ in range(config.input_threads)], dtype=torch.int32 + ) self.worker_waiting.share_memory_() self.epoch_inds = None self.epoch_i = 0 @@ -217,7 +241,9 @@ class NPM3DDataset(PointCloudDataset): self.potentials = None self.min_potentials = None self.argmin_potentials = None - self.epoch_inds = torch.from_numpy(np.zeros((2, self.epoch_n), dtype=np.int64)) + self.epoch_inds = torch.from_numpy( + np.zeros((2, self.epoch_n), dtype=np.int64) + ) self.epoch_i = torch.from_numpy(np.zeros((1,), dtype=np.int64)) self.epoch_i.share_memory_() self.epoch_inds.share_memory_() @@ -225,7 +251,7 @@ class NPM3DDataset(PointCloudDataset): self.worker_lock = Lock() # For ERF visualization, we want only one cloud per batch and no randomness - if self.set == 'ERF': + if self.set == "ERF": self.batch_limit = torch.tensor([1], dtype=torch.float32) self.batch_limit.share_memory_() np.random.seed(42) @@ -250,7 +276,6 @@ class NPM3DDataset(PointCloudDataset): return self.random_item(batch_i) def potential_item(self, batch_i, debug_workers=False): - t = [time.time()] # Initiate concatanation lists @@ -272,36 +297,34 @@ class NPM3DDataset(PointCloudDataset): wid = None while True: - t += [time.time()] if debug_workers: - message = '' + message = "" for wi in range(info.num_workers): if wi == wid: - message += ' {:}X{:} '.format(bcolors.FAIL, bcolors.ENDC) + message += " {:}X{:} ".format(bcolors.FAIL, bcolors.ENDC) elif self.worker_waiting[wi] == 0: - message += ' ' + message += " " elif self.worker_waiting[wi] == 1: - message += ' | ' + message += " | " elif self.worker_waiting[wi] == 2: - message += ' o ' + message += " o " print(message) self.worker_waiting[wid] = 0 with self.worker_lock: - if debug_workers: - message = '' + message = "" for wi in range(info.num_workers): if wi == wid: - message += ' {:}v{:} '.format(bcolors.OKGREEN, bcolors.ENDC) + message += " {:}v{:} ".format(bcolors.OKGREEN, bcolors.ENDC) elif self.worker_waiting[wi] == 0: - message += ' ' + message += " " elif self.worker_waiting[wi] == 1: - message += ' | ' + message += " | " elif self.worker_waiting[wi] == 2: - message += ' o ' + message += " o " print(message) self.worker_waiting[wid] = 1 @@ -316,24 +339,28 @@ class NPM3DDataset(PointCloudDataset): center_point = pot_points[point_ind, :].reshape(1, -1) # Add a small noise to center point - if self.set != 'ERF': - center_point += np.random.normal(scale=self.config.in_radius / 10, size=center_point.shape) + if self.set != "ERF": + center_point += np.random.normal( + scale=self.config.in_radius / 10, size=center_point.shape + ) # Indices of points in input region - pot_inds, dists = self.pot_trees[cloud_ind].query_radius(center_point, - r=self.config.in_radius, - return_distance=True) + pot_inds, dists = self.pot_trees[cloud_ind].query_radius( + center_point, r=self.config.in_radius, return_distance=True + ) d2s = np.square(dists[0]) pot_inds = pot_inds[0] # Update potentials (Tukey weights) - if self.set != 'ERF': + if self.set != "ERF": tukeys = np.square(1 - d2s / np.square(self.config.in_radius)) tukeys[d2s > np.square(self.config.in_radius)] = 0 self.potentials[cloud_ind][pot_inds] += tukeys min_ind = torch.argmin(self.potentials[cloud_ind]) - self.min_potentials[[cloud_ind]] = self.potentials[cloud_ind][min_ind] + self.min_potentials[[cloud_ind]] = self.potentials[cloud_ind][ + min_ind + ] self.argmin_potentials[[cloud_ind]] = min_ind t += [time.time()] @@ -342,8 +369,9 @@ class NPM3DDataset(PointCloudDataset): points = np.array(self.input_trees[cloud_ind].data, copy=False) # Indices of points in input region - input_inds = self.input_trees[cloud_ind].query_radius(center_point, - r=self.config.in_radius)[0] + input_inds = self.input_trees[cloud_ind].query_radius( + center_point, r=self.config.in_radius + )[0] t += [time.time()] @@ -354,7 +382,9 @@ class NPM3DDataset(PointCloudDataset): if n < 2: failed_attempts += 1 if failed_attempts > 100 * self.config.batch_num: - raise ValueError('It seems this dataset only containes empty input spheres') + raise ValueError( + "It seems this dataset only containes empty input spheres" + ) t += [time.time()] t += [time.time()] continue @@ -362,7 +392,7 @@ class NPM3DDataset(PointCloudDataset): # Collect labels and colors input_points = (points[input_inds] - center_point).astype(np.float32) # input_colors = self.input_colors[cloud_ind][input_inds] - if self.set in ['test', 'ERF']: + if self.set in ["test", "ERF"]: input_labels = np.zeros(input_points.shape[0]) else: input_labels = self.input_labels[cloud_ind][input_inds] @@ -379,7 +409,9 @@ class NPM3DDataset(PointCloudDataset): # Get original height as additional feature # input_features = np.hstack((input_colors, input_points[:, 2:] + center_point[:, 2:])).astype(np.float32) - input_features = np.hstack((input_points[:, 2:] + center_point[:, 2:])).astype(np.float32) + input_features = np.hstack( + (input_points[:, 2:] + center_point[:, 2:]) + ).astype(np.float32) t += [time.time()] @@ -428,7 +460,9 @@ class NPM3DDataset(PointCloudDataset): elif self.config.in_features_dim == 5: stacked_features = np.hstack((stacked_features, features)) else: - raise ValueError('Only accepted input dimensions are 1, 4 and 7 (without and with XYZ)') + raise ValueError( + "Only accepted input dimensions are 1, 4 and 7 (without and with XYZ)" + ) ####################### # Create network inputs @@ -440,10 +474,9 @@ class NPM3DDataset(PointCloudDataset): t += [time.time()] # Get the whole input list - input_list = self.segmentation_inputs(stacked_points, - stacked_features, - labels, - stack_lengths) + input_list = self.segmentation_inputs( + stacked_points, stacked_features, labels, stack_lengths + ) t += [time.time()] @@ -451,16 +484,16 @@ class NPM3DDataset(PointCloudDataset): input_list += [scales, rots, cloud_inds, point_inds, input_inds] if debug_workers: - message = '' + message = "" for wi in range(info.num_workers): if wi == wid: - message += ' {:}0{:} '.format(bcolors.OKBLUE, bcolors.ENDC) + message += " {:}0{:} ".format(bcolors.OKBLUE, bcolors.ENDC) elif self.worker_waiting[wi] == 0: - message += ' ' + message += " " elif self.worker_waiting[wi] == 1: - message += ' | ' + message += " | " elif self.worker_waiting[wi] == 2: - message += ' o ' + message += " o " print(message) self.worker_waiting[wid] = 2 @@ -469,51 +502,65 @@ class NPM3DDataset(PointCloudDataset): # Display timings debugT = False if debugT: - print('\n************************\n') - print('Timings:') + print("\n************************\n") + print("Timings:") ti = 0 N = 5 - mess = 'Init ...... {:5.1f}ms /' - loop_times = [1000 * (t[ti + N * i + 1] - t[ti + N * i]) for i in range(len(stack_lengths))] + mess = "Init ...... {:5.1f}ms /" + loop_times = [ + 1000 * (t[ti + N * i + 1] - t[ti + N * i]) + for i in range(len(stack_lengths)) + ] for dt in loop_times: - mess += ' {:5.1f}'.format(dt) + mess += " {:5.1f}".format(dt) print(mess.format(np.sum(loop_times))) ti += 1 - mess = 'Pots ...... {:5.1f}ms /' - loop_times = [1000 * (t[ti + N * i + 1] - t[ti + N * i]) for i in range(len(stack_lengths))] + mess = "Pots ...... {:5.1f}ms /" + loop_times = [ + 1000 * (t[ti + N * i + 1] - t[ti + N * i]) + for i in range(len(stack_lengths)) + ] for dt in loop_times: - mess += ' {:5.1f}'.format(dt) + mess += " {:5.1f}".format(dt) print(mess.format(np.sum(loop_times))) ti += 1 - mess = 'Sphere .... {:5.1f}ms /' - loop_times = [1000 * (t[ti + N * i + 1] - t[ti + N * i]) for i in range(len(stack_lengths))] + mess = "Sphere .... {:5.1f}ms /" + loop_times = [ + 1000 * (t[ti + N * i + 1] - t[ti + N * i]) + for i in range(len(stack_lengths)) + ] for dt in loop_times: - mess += ' {:5.1f}'.format(dt) + mess += " {:5.1f}".format(dt) print(mess.format(np.sum(loop_times))) ti += 1 - mess = 'Collect ... {:5.1f}ms /' - loop_times = [1000 * (t[ti + N * i + 1] - t[ti + N * i]) for i in range(len(stack_lengths))] + mess = "Collect ... {:5.1f}ms /" + loop_times = [ + 1000 * (t[ti + N * i + 1] - t[ti + N * i]) + for i in range(len(stack_lengths)) + ] for dt in loop_times: - mess += ' {:5.1f}'.format(dt) + mess += " {:5.1f}".format(dt) print(mess.format(np.sum(loop_times))) ti += 1 - mess = 'Augment ... {:5.1f}ms /' - loop_times = [1000 * (t[ti + N * i + 1] - t[ti + N * i]) for i in range(len(stack_lengths))] + mess = "Augment ... {:5.1f}ms /" + loop_times = [ + 1000 * (t[ti + N * i + 1] - t[ti + N * i]) + for i in range(len(stack_lengths)) + ] for dt in loop_times: - mess += ' {:5.1f}'.format(dt) + mess += " {:5.1f}".format(dt) print(mess.format(np.sum(loop_times))) ti += N * (len(stack_lengths) - 1) + 1 - print('concat .... {:5.1f}ms'.format(1000 * (t[ti + 1] - t[ti]))) + print("concat .... {:5.1f}ms".format(1000 * (t[ti + 1] - t[ti]))) ti += 1 - print('input ..... {:5.1f}ms'.format(1000 * (t[ti + 1] - t[ti]))) + print("input ..... {:5.1f}ms".format(1000 * (t[ti + 1] - t[ti]))) ti += 1 - print('stack ..... {:5.1f}ms'.format(1000 * (t[ti + 1] - t[ti]))) + print("stack ..... {:5.1f}ms".format(1000 * (t[ti + 1] - t[ti]))) ti += 1 - print('\n************************\n') + print("\n************************\n") return input_list def random_item(self, batch_i): - # Initiate concatanation lists p_list = [] f_list = [] @@ -527,9 +574,7 @@ class NPM3DDataset(PointCloudDataset): failed_attempts = 0 while True: - with self.worker_lock: - # Get potential minimum cloud_ind = int(self.epoch_inds[0, self.epoch_i]) point_ind = int(self.epoch_inds[1, self.epoch_i]) @@ -546,12 +591,15 @@ class NPM3DDataset(PointCloudDataset): center_point = points[point_ind, :].reshape(1, -1) # Add a small noise to center point - if self.set != 'ERF': - center_point += np.random.normal(scale=self.config.in_radius / 10, size=center_point.shape) + if self.set != "ERF": + center_point += np.random.normal( + scale=self.config.in_radius / 10, size=center_point.shape + ) # Indices of points in input region - input_inds = self.input_trees[cloud_ind].query_radius(center_point, - r=self.config.in_radius)[0] + input_inds = self.input_trees[cloud_ind].query_radius( + center_point, r=self.config.in_radius + )[0] # Number collected n = input_inds.shape[0] @@ -560,13 +608,15 @@ class NPM3DDataset(PointCloudDataset): if n < 2: failed_attempts += 1 if failed_attempts > 100 * self.config.batch_num: - raise ValueError('It seems this dataset only containes empty input spheres') + raise ValueError( + "It seems this dataset only containes empty input spheres" + ) continue # Collect labels and colors input_points = (points[input_inds] - center_point).astype(np.float32) # input_colors = self.input_colors[cloud_ind][input_inds] - if self.set in ['test', 'ERF']: + if self.set in ["test", "ERF"]: input_labels = np.zeros(input_points.shape[0]) else: input_labels = self.input_labels[cloud_ind][input_inds] @@ -580,7 +630,9 @@ class NPM3DDataset(PointCloudDataset): # input_colors *= 0 # Get original height as additional feature - input_features = np.hstack((input_points[:, 2:] + center_point[:, 2:])).astype(np.float32) + input_features = np.hstack( + (input_points[:, 2:] + center_point[:, 2:]) + ).astype(np.float32) # Stack batch p_list += [input_points] @@ -627,7 +679,9 @@ class NPM3DDataset(PointCloudDataset): elif self.config.in_features_dim == 5: stacked_features = np.hstack((stacked_features, features)) else: - raise ValueError('Only accepted input dimensions are 1, 4 and 7 (without and with XYZ)') + raise ValueError( + "Only accepted input dimensions are 1, 4 and 7 (without and with XYZ)" + ) ####################### # Create network inputs @@ -637,10 +691,9 @@ class NPM3DDataset(PointCloudDataset): # # Get the whole input list - input_list = self.segmentation_inputs(stacked_points, - stacked_features, - labels, - stack_lengths) + input_list = self.segmentation_inputs( + stacked_points, stacked_features, labels, stack_lengths + ) # Add scale and rotation for testing input_list += [scales, rots, cloud_inds, point_inds, input_inds] @@ -648,8 +701,7 @@ class NPM3DDataset(PointCloudDataset): return input_list def prepare_NPM3D_ply(self): - - print('\nPreparing ply files') + print("\nPreparing ply files") t0 = time.time() # Folder for the ply files @@ -658,18 +710,19 @@ class NPM3DDataset(PointCloudDataset): makedirs(ply_path) for cloud_name in self.cloud_names: - # Pass if the cloud has already been computed - cloud_file = join(ply_path, cloud_name + '.ply') + cloud_file = join(ply_path, cloud_name + ".ply") if exists(cloud_file): continue - original_ply = read_ply(join(self.path, self.original_ply_path, cloud_name + '.ply')) + original_ply = read_ply( + join(self.path, self.original_ply_path, cloud_name + ".ply") + ) # Initiate containers - cloud_x = original_ply['x'] - cloud_y = original_ply['y'] - cloud_z = original_ply['z'] + cloud_x = original_ply["x"] + cloud_y = original_ply["y"] + cloud_z = original_ply["z"] cloud_x = cloud_x - (cloud_x.min()) cloud_y = cloud_y - (cloud_y.min()) cloud_z = cloud_z - (cloud_z.min()) @@ -688,30 +741,34 @@ class NPM3DDataset(PointCloudDataset): cloud_points = np.hstack((cloud_x, cloud_y, cloud_z)) # Labels - if cloud_name in ['ajaccio_2', 'ajaccio_57', 'dijon_9']: - - field_names = ['x', 'y', 'z'] - write_ply(join(ply_path, cloud_name + '.ply'), cloud_points, field_names) + if cloud_name in ["ajaccio_2", "ajaccio_57", "dijon_9"]: + field_names = ["x", "y", "z"] + write_ply( + join(ply_path, cloud_name + ".ply"), cloud_points, field_names + ) else: - labels = original_ply['class'] + labels = original_ply["class"] labels = labels.astype(np.int32) labels = labels.reshape(len(labels), 1) # Save as ply - field_names = ['x', 'y', 'z', 'class'] - write_ply(join(ply_path, cloud_name + '.ply'), [cloud_points, labels], field_names) + field_names = ["x", "y", "z", "class"] + write_ply( + join(ply_path, cloud_name + ".ply"), + [cloud_points, labels], + field_names, + ) - print('Done in {:.1f}s'.format(time.time() - t0)) + print("Done in {:.1f}s".format(time.time() - t0)) return def load_subsampled_clouds(self): - # Parameter dl = self.config.first_subsampling_dl # Create path for files - tree_path = join(self.path, 'input_{:.3f}'.format(dl)) + tree_path = join(self.path, "input_{:.3f}".format(dl)) if not exists(tree_path): makedirs(tree_path) @@ -720,7 +777,6 @@ class NPM3DDataset(PointCloudDataset): ############## for i, file_path in enumerate(self.files): - # Restart timer t0 = time.time() @@ -728,40 +784,48 @@ class NPM3DDataset(PointCloudDataset): cloud_name = self.cloud_names[i] # Name of the input files - KDTree_file = join(tree_path, '{:s}_KDTree.pkl'.format(cloud_name)) - sub_ply_file = join(tree_path, '{:s}.ply'.format(cloud_name)) + KDTree_file = join(tree_path, "{:s}_KDTree.pkl".format(cloud_name)) + sub_ply_file = join(tree_path, "{:s}.ply".format(cloud_name)) # Check if inputs have already been computed if exists(KDTree_file): - print('\nFound KDTree for cloud {:s}, subsampled at {:.3f}'.format(cloud_name, dl)) + print( + "\nFound KDTree for cloud {:s}, subsampled at {:.3f}".format( + cloud_name, dl + ) + ) # read ply with data data = read_ply(sub_ply_file) # sub_colors = np.vstack((data['red'], data['green'], data['blue'])).T - sub_labels = data['class'] + sub_labels = data["class"] # Read pkl with search tree - with open(KDTree_file, 'rb') as f: + with open(KDTree_file, "rb") as f: search_tree = pickle.load(f) else: - print('\nPreparing KDTree for cloud {:s}, subsampled at {:.3f}'.format(cloud_name, dl)) + print( + "\nPreparing KDTree for cloud {:s}, subsampled at {:.3f}".format( + cloud_name, dl + ) + ) # Read ply file data = read_ply(file_path) - points = np.vstack((data['x'], data['y'], data['z'])).T + points = np.vstack((data["x"], data["y"], data["z"])).T # colors = np.vstack((data['red'], data['green'], data['blue'])).T # Fake labels for test data - if self.set == 'test': + if self.set == "test": labels = np.zeros((data.shape[0],), dtype=np.int32) else: - labels = data['class'] + labels = data["class"] # Subsample cloud - sub_points, sub_labels = grid_subsampling(points, - labels=labels, - sampleDl=dl) + sub_points, sub_labels = grid_subsampling( + points, labels=labels, sampleDl=dl + ) # Rescale float color and squeeze label # sub_colors = sub_colors / 255 @@ -773,13 +837,13 @@ class NPM3DDataset(PointCloudDataset): # search_tree.fit(sub_points) # Save KDTree - with open(KDTree_file, 'wb') as f: + with open(KDTree_file, "wb") as f: pickle.dump(search_tree, f) # Save ply - write_ply(sub_ply_file, - [sub_points, sub_labels], - ['x', 'y', 'z', 'class']) + write_ply( + sub_ply_file, [sub_points, sub_labels], ["x", "y", "z", "class"] + ) # Fill data containers self.input_trees += [search_tree] @@ -787,7 +851,7 @@ class NPM3DDataset(PointCloudDataset): self.input_labels += [sub_labels] size = sub_labels.shape[0] * 4 * 7 - print('{:.1f} MB loaded in {:.1f}s'.format(size * 1e-6, time.time() - t0)) + print("{:.1f} MB loaded in {:.1f}s".format(size * 1e-6, time.time() - t0)) ############################ # Coarse potential locations @@ -795,7 +859,7 @@ class NPM3DDataset(PointCloudDataset): # Only necessary for validation and test sets if self.use_potentials: - print('\nPreparing potentials') + print("\nPreparing potentials") # Restart timer t0 = time.time() @@ -804,36 +868,39 @@ class NPM3DDataset(PointCloudDataset): cloud_ind = 0 for i, file_path in enumerate(self.files): - # Get cloud name cloud_name = self.cloud_names[i] # Name of the input files - coarse_KDTree_file = join(tree_path, '{:s}_coarse_KDTree.pkl'.format(cloud_name)) + coarse_KDTree_file = join( + tree_path, "{:s}_coarse_KDTree.pkl".format(cloud_name) + ) # Check if inputs have already been computed if exists(coarse_KDTree_file): # Read pkl with search tree - with open(coarse_KDTree_file, 'rb') as f: + with open(coarse_KDTree_file, "rb") as f: search_tree = pickle.load(f) else: # Subsample cloud sub_points = np.array(self.input_trees[cloud_ind].data, copy=False) - coarse_points = grid_subsampling(sub_points.astype(np.float32), sampleDl=pot_dl) + coarse_points = grid_subsampling( + sub_points.astype(np.float32), sampleDl=pot_dl + ) # Get chosen neighborhoods search_tree = KDTree(coarse_points, leaf_size=10) # Save KDTree - with open(coarse_KDTree_file, 'wb') as f: + with open(coarse_KDTree_file, "wb") as f: pickle.dump(search_tree, f) # Fill data containers self.pot_trees += [search_tree] cloud_ind += 1 - print('Done in {:.1f}s'.format(time.time() - t0)) + print("Done in {:.1f}s".format(time.time() - t0)) ###################### # Reprojection indices @@ -843,13 +910,11 @@ class NPM3DDataset(PointCloudDataset): self.num_clouds = len(self.input_trees) # Only necessary for validation and test sets - if self.set in ['validation', 'test']: - - print('\nPreparing reprojection indices for testing') + if self.set in ["validation", "test"]: + print("\nPreparing reprojection indices for testing") # Get validation/test reprojection indices for i, file_path in enumerate(self.files): - # Restart timer t0 = time.time() @@ -857,21 +922,21 @@ class NPM3DDataset(PointCloudDataset): cloud_name = self.cloud_names[i] # File name for saving - proj_file = join(tree_path, '{:s}_proj.pkl'.format(cloud_name)) + proj_file = join(tree_path, "{:s}_proj.pkl".format(cloud_name)) # Try to load previous indices if exists(proj_file): - with open(proj_file, 'rb') as f: + with open(proj_file, "rb") as f: proj_inds, labels = pickle.load(f) else: data = read_ply(file_path) - points = np.vstack((data['x'], data['y'], data['z'])).T + points = np.vstack((data["x"], data["y"], data["z"])).T # Fake labels - if self.set == 'test': + if self.set == "test": labels = np.zeros((data.shape[0],), dtype=np.int32) else: - labels = data['class'] + labels = data["class"] # Compute projection inds idxs = self.input_trees[i].query(points, return_distance=False) @@ -879,12 +944,12 @@ class NPM3DDataset(PointCloudDataset): proj_inds = np.squeeze(idxs).astype(np.int32) # Save - with open(proj_file, 'wb') as f: + with open(proj_file, "wb") as f: pickle.dump([proj_inds, labels], f) self.test_proj += [proj_inds] self.validation_labels += [labels] - print('{:s} done in {:.1f}s'.format(cloud_name, time.time() - t0)) + print("{:s} done in {:.1f}s".format(cloud_name, time.time() - t0)) print() return @@ -896,7 +961,7 @@ class NPM3DDataset(PointCloudDataset): # Get original points data = read_ply(file_path) - return np.vstack((data['x'], data['y'], data['z'])).T + return np.vstack((data["x"], data["y"], data["z"])).T # ---------------------------------------------------------------------------------------------------------------------- @@ -915,7 +980,7 @@ class NPM3DSampler(Sampler): self.dataset = dataset # Number of step per epoch - if dataset.set == 'training': + if dataset.set == "training": self.N = dataset.config.epoch_steps else: self.N = dataset.config.validation_size @@ -929,7 +994,6 @@ class NPM3DSampler(Sampler): """ if not self.dataset.use_potentials: - # Initiate current epoch ind self.dataset.epoch_i *= 0 self.dataset.epoch_inds *= 0 @@ -942,16 +1006,23 @@ class NPM3DSampler(Sampler): random_pick_n = int(np.ceil(num_centers / self.dataset.config.num_classes)) # Choose random points of each class for each cloud - epoch_indices = np.zeros((2, 0), dtype=np.int64) + np.zeros((2, 0), dtype=np.int64) for label_ind, label in enumerate(self.dataset.label_values): if label not in self.dataset.ignored_labels: - # Gather indices of the points with this label in all the input clouds all_label_indices = [] for cloud_ind, cloud_labels in enumerate(self.dataset.input_labels): label_indices = np.where(np.equal(cloud_labels, label))[0] all_label_indices.append( - np.vstack((np.full(label_indices.shape, cloud_ind, dtype=np.int64), label_indices))) + np.vstack( + ( + np.full( + label_indices.shape, cloud_ind, dtype=np.int64 + ), + label_indices, + ) + ) + ) # Stack them: [2, N1+N2+...] all_label_indices = np.hstack(all_label_indices) @@ -962,24 +1033,39 @@ class NPM3DSampler(Sampler): chosen_label_inds = np.zeros((2, 0), dtype=np.int64) while chosen_label_inds.shape[1] < random_pick_n: chosen_label_inds = np.hstack( - (chosen_label_inds, all_label_indices[:, np.random.permutation(N_inds)])) - warnings.warn('When choosing random epoch indices (use_potentials=False), \ + ( + chosen_label_inds, + all_label_indices[:, np.random.permutation(N_inds)], + ) + ) + warnings.warn( + "When choosing random epoch indices (use_potentials=False), \ class {:d}: {:s} only had {:d} available points, while we \ - needed {:d}. Repeating indices in the same epoch'.format(label, - self.dataset.label_names[ - label_ind], - N_inds, - random_pick_n)) + needed {:d}. Repeating indices in the same epoch".format( + label, + self.dataset.label_names[label_ind], + N_inds, + random_pick_n, + ) + ) elif N_inds < 50 * random_pick_n: - rand_inds = np.random.choice(N_inds, size=random_pick_n, replace=False) + rand_inds = np.random.choice( + N_inds, size=random_pick_n, replace=False + ) chosen_label_inds = all_label_indices[:, rand_inds] else: chosen_label_inds = np.zeros((2, 0), dtype=np.int64) while chosen_label_inds.shape[1] < random_pick_n: - rand_inds = np.unique(np.random.choice(N_inds, size=2 * random_pick_n, replace=True)) - chosen_label_inds = np.hstack((chosen_label_inds, all_label_indices[:, rand_inds])) + rand_inds = np.unique( + np.random.choice( + N_inds, size=2 * random_pick_n, replace=True + ) + ) + chosen_label_inds = np.hstack( + (chosen_label_inds, all_label_indices[:, rand_inds]) + ) chosen_label_inds = chosen_label_inds[:, :random_pick_n] # Stack for each label @@ -1030,7 +1116,6 @@ class NPM3DSampler(Sampler): for epoch in range(10): for i, test in enumerate(self): - # New time t = t[-1:] t += [time.time()] @@ -1069,17 +1154,23 @@ class NPM3DSampler(Sampler): # Console display (only one per second) if (t[-1] - last_display) > 1.0: last_display = t[-1] - message = 'Step {:5d} estim_b ={:5.2f} batch_limit ={:7d}, // {:.1f}ms {:.1f}ms' - print(message.format(i, - estim_b, - int(self.dataset.batch_limit), - 1000 * mean_dt[0], - 1000 * mean_dt[1])) + message = "Step {:5d} estim_b ={:5.2f} batch_limit ={:7d}, // {:.1f}ms {:.1f}ms" + print( + message.format( + i, + estim_b, + int(self.dataset.batch_limit), + 1000 * mean_dt[0], + 1000 * mean_dt[1], + ) + ) if breaking: break - def calibration(self, dataloader, untouched_ratio=0.9, verbose=False, force_redo=False): + def calibration( + self, dataloader, untouched_ratio=0.9, verbose=False, force_redo=False + ): """ Method performing batch and neighbors calibration. Batch calibration: Set "batch_limit" (the maximum number of points allowed in every batch) so that the @@ -1092,7 +1183,7 @@ class NPM3DSampler(Sampler): # Previously saved calibration ############################## - print('\nStarting Calibration (use verbose=True for more details)') + print("\nStarting Calibration (use verbose=True for more details)") t0 = time.time() redo = force_redo @@ -1101,45 +1192,47 @@ class NPM3DSampler(Sampler): # *********** # Load batch_limit dictionary - batch_lim_file = join(self.dataset.path, 'batch_limits.pkl') + batch_lim_file = join(self.dataset.path, "batch_limits.pkl") if exists(batch_lim_file): - with open(batch_lim_file, 'rb') as file: + with open(batch_lim_file, "rb") as file: batch_lim_dict = pickle.load(file) else: batch_lim_dict = {} # Check if the batch limit associated with current parameters exists if self.dataset.use_potentials: - sampler_method = 'potentials' + sampler_method = "potentials" else: - sampler_method = 'random' - key = '{:s}_{:.3f}_{:.3f}_{:d}'.format(sampler_method, - self.dataset.config.in_radius, - self.dataset.config.first_subsampling_dl, - self.dataset.config.batch_num) + sampler_method = "random" + key = "{:s}_{:.3f}_{:.3f}_{:d}".format( + sampler_method, + self.dataset.config.in_radius, + self.dataset.config.first_subsampling_dl, + self.dataset.config.batch_num, + ) if not redo and key in batch_lim_dict: self.dataset.batch_limit[0] = batch_lim_dict[key] else: redo = True if verbose: - print('\nPrevious calibration found:') - print('Check batch limit dictionary') + print("\nPrevious calibration found:") + print("Check batch limit dictionary") if key in batch_lim_dict: color = bcolors.OKGREEN v = str(int(batch_lim_dict[key])) else: color = bcolors.FAIL - v = '?' - print('{:}\"{:s}\": {:s}{:}'.format(color, key, v, bcolors.ENDC)) + v = "?" + print('{:}"{:s}": {:s}{:}'.format(color, key, v, bcolors.ENDC)) # Neighbors limit # *************** # Load neighb_limits dictionary - neighb_lim_file = join(self.dataset.path, 'neighbors_limits.pkl') + neighb_lim_file = join(self.dataset.path, "neighbors_limits.pkl") if exists(neighb_lim_file): - with open(neighb_lim_file, 'rb') as file: + with open(neighb_lim_file, "rb") as file: neighb_lim_dict = pickle.load(file) else: neighb_lim_dict = {} @@ -1147,14 +1240,13 @@ class NPM3DSampler(Sampler): # Check if the limit associated with current parameters exists (for each layer) neighb_limits = [] for layer_ind in range(self.dataset.config.num_layers): - - dl = self.dataset.config.first_subsampling_dl * (2 ** layer_ind) + dl = self.dataset.config.first_subsampling_dl * (2**layer_ind) if self.dataset.config.deform_layers[layer_ind]: r = dl * self.dataset.config.deform_radius else: r = dl * self.dataset.config.conv_radius - key = '{:.3f}_{:.3f}'.format(dl, r) + key = "{:.3f}_{:.3f}".format(dl, r) if key in neighb_lim_dict: neighb_limits += [neighb_lim_dict[key]] @@ -1164,34 +1256,37 @@ class NPM3DSampler(Sampler): redo = True if verbose: - print('Check neighbors limit dictionary') + print("Check neighbors limit dictionary") for layer_ind in range(self.dataset.config.num_layers): - dl = self.dataset.config.first_subsampling_dl * (2 ** layer_ind) + dl = self.dataset.config.first_subsampling_dl * (2**layer_ind) if self.dataset.config.deform_layers[layer_ind]: r = dl * self.dataset.config.deform_radius else: r = dl * self.dataset.config.conv_radius - key = '{:.3f}_{:.3f}'.format(dl, r) + key = "{:.3f}_{:.3f}".format(dl, r) if key in neighb_lim_dict: color = bcolors.OKGREEN v = str(neighb_lim_dict[key]) else: color = bcolors.FAIL - v = '?' - print('{:}\"{:s}\": {:s}{:}'.format(color, key, v, bcolors.ENDC)) + v = "?" + print('{:}"{:s}": {:s}{:}'.format(color, key, v, bcolors.ENDC)) if redo: - ############################ # Neighbors calib parameters ############################ # From config parameter, compute higher bound of neighbors number in a neighborhood - hist_n = int(np.ceil(4 / 3 * np.pi * (self.dataset.config.deform_radius + 1) ** 3)) + hist_n = int( + np.ceil(4 / 3 * np.pi * (self.dataset.config.deform_radius + 1) ** 3) + ) # Histogram of neighborhood sizes - neighb_hists = np.zeros((self.dataset.config.num_layers, hist_n), dtype=np.int32) + neighb_hists = np.zeros( + (self.dataset.config.num_layers, hist_n), dtype=np.int32 + ) ######################## # Batch calib parameters @@ -1238,10 +1333,11 @@ class NPM3DSampler(Sampler): sample_batches = 999 for epoch in range((sample_batches // self.N) + 1): for batch_i, batch in enumerate(dataloader): - # Update neighborhood histogram - counts = [np.sum(neighb_mat.numpy() < neighb_mat.shape[0], axis=1) for neighb_mat in - batch.neighbors] + counts = [ + np.sum(neighb_mat.numpy() < neighb_mat.shape[0], axis=1) + for neighb_mat in batch.neighbors + ] hists = [np.bincount(c, minlength=hist_n)[:hist_n] for c in counts] neighb_hists += np.vstack(hists) @@ -1288,10 +1384,8 @@ class NPM3DSampler(Sampler): # Console display (only one per second) if verbose and (t - last_display) > 1.0: last_display = t - message = 'Step {:5d} estim_b ={:5.2f} batch_limit ={:7d}' - print(message.format(i, - estim_b, - int(self.dataset.batch_limit))) + message = "Step {:5d} estim_b ={:5.2f} batch_limit ={:7d}" + print(message.format(i, estim_b, int(self.dataset.batch_limit))) # Debug plots debug_in.append(int(batch.points[0].shape[0])) @@ -1307,7 +1401,8 @@ class NPM3DSampler(Sampler): import matplotlib.pyplot as plt print( - "ERROR: It seems that the calibration have not reached convergence. Here are some plot to understand why:") + "ERROR: It seems that the calibration have not reached convergence. Here are some plot to understand why:" + ) print("If you notice unstability, reduce the expected_N value") print("If convergece is too slow, increase the expected_N value") @@ -1321,68 +1416,69 @@ class NPM3DSampler(Sampler): plt.show() - a = 1 / 0 - # Use collected neighbor histogram to get neighbors limit cumsum = np.cumsum(neighb_hists.T, axis=0) - percentiles = np.sum(cumsum < (untouched_ratio * cumsum[hist_n - 1, :]), axis=0) + percentiles = np.sum( + cumsum < (untouched_ratio * cumsum[hist_n - 1, :]), axis=0 + ) self.dataset.neighborhood_limits = percentiles if verbose: - # Crop histogram while np.sum(neighb_hists[:, -1]) == 0: neighb_hists = neighb_hists[:, :-1] hist_n = neighb_hists.shape[1] - print('\n**************************************************\n') - line0 = 'neighbors_num ' + print("\n**************************************************\n") + line0 = "neighbors_num " for layer in range(neighb_hists.shape[0]): - line0 += '| layer {:2d} '.format(layer) + line0 += "| layer {:2d} ".format(layer) print(line0) for neighb_size in range(hist_n): - line0 = ' {:4d} '.format(neighb_size) + line0 = " {:4d} ".format(neighb_size) for layer in range(neighb_hists.shape[0]): if neighb_size > percentiles[layer]: color = bcolors.FAIL else: color = bcolors.OKGREEN - line0 += '|{:}{:10d}{:} '.format(color, - neighb_hists[layer, neighb_size], - bcolors.ENDC) + line0 += "|{:}{:10d}{:} ".format( + color, neighb_hists[layer, neighb_size], bcolors.ENDC + ) print(line0) - print('\n**************************************************\n') - print('\nchosen neighbors limits: ', percentiles) + print("\n**************************************************\n") + print("\nchosen neighbors limits: ", percentiles) print() # Save batch_limit dictionary if self.dataset.use_potentials: - sampler_method = 'potentials' + sampler_method = "potentials" else: - sampler_method = 'random' - key = '{:s}_{:.3f}_{:.3f}_{:d}'.format(sampler_method, - self.dataset.config.in_radius, - self.dataset.config.first_subsampling_dl, - self.dataset.config.batch_num) + sampler_method = "random" + key = "{:s}_{:.3f}_{:.3f}_{:d}".format( + sampler_method, + self.dataset.config.in_radius, + self.dataset.config.first_subsampling_dl, + self.dataset.config.batch_num, + ) batch_lim_dict[key] = float(self.dataset.batch_limit) - with open(batch_lim_file, 'wb') as file: + with open(batch_lim_file, "wb") as file: pickle.dump(batch_lim_dict, file) # Save neighb_limit dictionary for layer_ind in range(self.dataset.config.num_layers): - dl = self.dataset.config.first_subsampling_dl * (2 ** layer_ind) + dl = self.dataset.config.first_subsampling_dl * (2**layer_ind) if self.dataset.config.deform_layers[layer_ind]: r = dl * self.dataset.config.deform_radius else: r = dl * self.dataset.config.conv_radius - key = '{:.3f}_{:.3f}'.format(dl, r) + key = "{:.3f}_{:.3f}".format(dl, r) neighb_lim_dict[key] = self.dataset.neighborhood_limits[layer_ind] - with open(neighb_lim_file, 'wb') as file: + with open(neighb_lim_file, "wb") as file: pickle.dump(neighb_lim_dict, file) - print('Calibration done in {:.1f}s\n'.format(time.time() - t0)) + print("Calibration done in {:.1f}s\n".format(time.time() - t0)) return @@ -1390,7 +1486,6 @@ class NPM3DCustomBatch: """Custom batch definition with memory pinning for NPM3D""" def __init__(self, input_list): - # Get rid of batch dimension input_list = input_list[0] @@ -1399,15 +1494,25 @@ class NPM3DCustomBatch: # Extract input tensors from the list of numpy array ind = 0 - self.points = [torch.from_numpy(nparray) for nparray in input_list[ind:ind + L]] + self.points = [ + torch.from_numpy(nparray) for nparray in input_list[ind : ind + L] + ] ind += L - self.neighbors = [torch.from_numpy(nparray) for nparray in input_list[ind:ind + L]] + self.neighbors = [ + torch.from_numpy(nparray) for nparray in input_list[ind : ind + L] + ] ind += L - self.pools = [torch.from_numpy(nparray) for nparray in input_list[ind:ind + L]] + self.pools = [ + torch.from_numpy(nparray) for nparray in input_list[ind : ind + L] + ] ind += L - self.upsamples = [torch.from_numpy(nparray) for nparray in input_list[ind:ind + L]] + self.upsamples = [ + torch.from_numpy(nparray) for nparray in input_list[ind : ind + L] + ] ind += L - self.lengths = [torch.from_numpy(nparray) for nparray in input_list[ind:ind + L]] + self.lengths = [ + torch.from_numpy(nparray) for nparray in input_list[ind : ind + L] + ] ind += L self.features = torch.from_numpy(input_list[ind]) ind += 1 @@ -1446,7 +1551,6 @@ class NPM3DCustomBatch: return self def to(self, device): - self.points = [in_tensor.to(device) for in_tensor in self.points] self.neighbors = [in_tensor.to(device) for in_tensor in self.neighbors] self.pools = [in_tensor.to(device) for in_tensor in self.pools] @@ -1464,15 +1568,15 @@ class NPM3DCustomBatch: def unstack_points(self, layer=None): """Unstack the points""" - return self.unstack_elements('points', layer) + return self.unstack_elements("points", layer) def unstack_neighbors(self, layer=None): """Unstack the neighbors indices""" - return self.unstack_elements('neighbors', layer) + return self.unstack_elements("neighbors", layer) def unstack_pools(self, layer=None): """Unstack the pooling indices""" - return self.unstack_elements('pools', layer) + return self.unstack_elements("pools", layer) def unstack_elements(self, element_name, layer=None, to_numpy=True): """ @@ -1480,34 +1584,31 @@ class NPM3DCustomBatch: layers """ - if element_name == 'points': + if element_name == "points": elements = self.points - elif element_name == 'neighbors': + elif element_name == "neighbors": elements = self.neighbors - elif element_name == 'pools': + elif element_name == "pools": elements = self.pools[:-1] else: - raise ValueError('Unknown element name: {:s}'.format(element_name)) + raise ValueError("Unknown element name: {:s}".format(element_name)) all_p_list = [] for layer_i, layer_elems in enumerate(elements): - if layer is None or layer == layer_i: - i0 = 0 p_list = [] - if element_name == 'pools': + if element_name == "pools": lengths = self.lengths[layer_i + 1] else: lengths = self.lengths[layer_i] for b_i, length in enumerate(lengths): - - elem = layer_elems[i0:i0 + length] - if element_name == 'neighbors': + elem = layer_elems[i0 : i0 + length] + if element_name == "neighbors": elem[elem >= self.points[layer_i].shape[0]] = -1 elem[elem >= 0] -= i0 - elif element_name == 'pools': + elif element_name == "pools": elem[elem >= self.points[layer_i].shape[0]] = -1 elem[elem >= 0] -= torch.sum(self.lengths[layer_i][:b_i]) i0 += length @@ -1539,13 +1640,12 @@ def debug_upsampling(dataset, loader): """Shows which labels are sampled according to strategy chosen""" for epoch in range(10): - for batch_i, batch in enumerate(loader): pc1 = batch.points[1].numpy() pc2 = batch.points[2].numpy() up1 = batch.upsamples[1].numpy() - print(pc1.shape, '=>', pc2.shape) + print(pc1.shape, "=>", pc2.shape) print(up1.shape, np.max(up1)) pc2 = np.vstack((pc2, np.zeros_like(pc2[:1, :]))) @@ -1554,14 +1654,14 @@ def debug_upsampling(dataset, loader): p0 = pc1[10, :] neighbs0 = up1[10, :] neighbs0 = pc2[neighbs0, :] - p0 - d2 = np.sum(neighbs0 ** 2, axis=1) + d2 = np.sum(neighbs0**2, axis=1) print(neighbs0.shape) print(neighbs0[:5]) print(d2[:5]) - print('******************') - print('*******************************************') + print("******************") + print("*******************************************") _, counts = np.unique(dataset.input_labels, return_counts=True) print(counts) @@ -1577,7 +1677,6 @@ def debug_timing(dataset, loader): estim_N = 0 for epoch in range(10): - for batch_i, batch in enumerate(loader): # print(batch_i, tuple(points.shape), tuple(normals.shape), labels, indices, in_sizes) @@ -1599,14 +1698,14 @@ def debug_timing(dataset, loader): # Console display (only one per second) if (t[-1] - last_display) > -1.0: last_display = t[-1] - message = 'Step {:08d} -> (ms/batch) {:8.2f} {:8.2f} / batch = {:.2f} - {:.0f}' - print(message.format(batch_i, - 1000 * mean_dt[0], - 1000 * mean_dt[1], - estim_b, - estim_N)) + message = "Step {:08d} -> (ms/batch) {:8.2f} {:8.2f} / batch = {:.2f} - {:.0f}" + print( + message.format( + batch_i, 1000 * mean_dt[0], 1000 * mean_dt[1], estim_b, estim_N + ) + ) - print('************* Epoch ended *************') + print("************* Epoch ended *************") _, counts = np.unique(dataset.input_labels, return_counts=True) print(counts) @@ -1614,40 +1713,34 @@ def debug_timing(dataset, loader): def debug_show_clouds(dataset, loader): for epoch in range(10): - - clouds = [] - cloud_normals = [] - cloud_labels = [] - L = dataset.config.num_layers for batch_i, batch in enumerate(loader): - # Print characteristics of input tensors - print('\nPoints tensors') + print("\nPoints tensors") for i in range(L): print(batch.points[i].dtype, batch.points[i].shape) - print('\nNeigbors tensors') + print("\nNeigbors tensors") for i in range(L): print(batch.neighbors[i].dtype, batch.neighbors[i].shape) - print('\nPools tensors') + print("\nPools tensors") for i in range(L): print(batch.pools[i].dtype, batch.pools[i].shape) - print('\nStack lengths') + print("\nStack lengths") for i in range(L): print(batch.lengths[i].dtype, batch.lengths[i].shape) - print('\nFeatures') + print("\nFeatures") print(batch.features.dtype, batch.features.shape) - print('\nLabels') + print("\nLabels") print(batch.labels.dtype, batch.labels.shape) - print('\nAugment Scales') + print("\nAugment Scales") print(batch.scales.dtype, batch.scales.shape) - print('\nAugment Rotations') + print("\nAugment Rotations") print(batch.rots.dtype, batch.rots.shape) - print('\nModel indices') + print("\nModel indices") print(batch.model_inds.dtype, batch.model_inds.shape) - print('\nAre input tensors pinned') + print("\nAre input tensors pinned") print(batch.neighbors[0].is_pinned()) print(batch.neighbors[-1].is_pinned()) print(batch.points[0].is_pinned()) @@ -1659,7 +1752,7 @@ def debug_show_clouds(dataset, loader): show_input_batch(batch) - print('*******************************************') + print("*******************************************") _, counts = np.unique(dataset.input_labels, return_counts=True) print(counts) @@ -1673,7 +1766,6 @@ def debug_batch_and_neighbors_calib(dataset, loader): mean_dt = np.zeros(2) for epoch in range(10): - for batch_i, input_list in enumerate(loader): # print(batch_i, tuple(points.shape), tuple(normals.shape), labels, indices, in_sizes) @@ -1691,12 +1783,10 @@ def debug_batch_and_neighbors_calib(dataset, loader): # Console display (only one per second) if (t[-1] - last_display) > 1.0: last_display = t[-1] - message = 'Step {:08d} -> Average timings (ms/batch) {:8.2f} {:8.2f} ' - print(message.format(batch_i, - 1000 * mean_dt[0], - 1000 * mean_dt[1])) + message = "Step {:08d} -> Average timings (ms/batch) {:8.2f} {:8.2f} " + print(message.format(batch_i, 1000 * mean_dt[0], 1000 * mean_dt[1])) - print('************* Epoch ended *************') + print("************* Epoch ended *************") _, counts = np.unique(dataset.input_labels, return_counts=True) print(counts) diff --git a/datasetss/S3DIS.py b/datasetss/S3DIS.py index 6e1b43f..83371ed 100644 --- a/datasetss/S3DIS.py +++ b/datasetss/S3DIS.py @@ -27,7 +27,6 @@ import time import numpy as np import pickle import torch -import math import warnings from multiprocessing import Lock @@ -54,30 +53,32 @@ from utils.config import bcolors class S3DISDataset(PointCloudDataset): """Class to handle S3DIS dataset.""" - def __init__(self, config, set='training', use_potentials=True, load_data=True): + def __init__(self, config, set="training", use_potentials=True, load_data=True): """ This dataset is small enough to be stored in-memory, so load all point clouds here """ - PointCloudDataset.__init__(self, 'S3DIS') + PointCloudDataset.__init__(self, "S3DIS") ############ # Parameters ############ # Dict from labels to names - self.label_to_names = {0: 'ceiling', - 1: 'floor', - 2: 'wall', - 3: 'beam', - 4: 'column', - 5: 'window', - 6: 'door', - 7: 'chair', - 8: 'table', - 9: 'bookcase', - 10: 'sofa', - 11: 'board', - 12: 'clutter'} + self.label_to_names = { + 0: "ceiling", + 1: "floor", + 2: "wall", + 3: "beam", + 4: "column", + 5: "window", + 6: "door", + 7: "chair", + 8: "table", + 9: "bookcase", + 10: "sofa", + 11: "board", + 12: "clutter", + } # Initialize a bunch of variables concerning class labels self.init_labels() @@ -86,10 +87,10 @@ class S3DISDataset(PointCloudDataset): self.ignored_labels = np.array([]) # Dataset folder - self.path = './Data/S3DIS' + self.path = "./Data/S3DIS" # Type of task conducted on this dataset - self.dataset_task = 'cloud_segmentation' + self.dataset_task = "cloud_segmentation" # Update number of class and data task in configuration config.num_classes = self.num_classes - len(self.ignored_labels) @@ -105,23 +106,23 @@ class S3DISDataset(PointCloudDataset): self.use_potentials = use_potentials # Path of the training files - self.train_path = 'original_ply' + self.train_path = "original_ply" # List of files to process ply_path = join(self.path, self.train_path) # Proportion of validation scenes - self.cloud_names = ['Area_1', 'Area_2', 'Area_3', 'Area_4', 'Area_5', 'Area_6'] + self.cloud_names = ["Area_1", "Area_2", "Area_3", "Area_4", "Area_5", "Area_6"] self.all_splits = [0, 1, 2, 3, 4, 5] self.validation_split = 4 # Number of models used per epoch - if self.set == 'training': + if self.set == "training": self.epoch_n = config.epoch_steps * config.batch_num - elif self.set in ['validation', 'test', 'ERF']: + elif self.set in ["validation", "test", "ERF"]: self.epoch_n = config.validation_size * config.batch_num else: - raise ValueError('Unknown set for S3DIS data: ', self.set) + raise ValueError("Unknown set for S3DIS data: ", self.set) # Stop data is not needed if not load_data: @@ -140,24 +141,30 @@ class S3DISDataset(PointCloudDataset): # List of training files self.files = [] for i, f in enumerate(self.cloud_names): - if self.set == 'training': + if self.set == "training": if self.all_splits[i] != self.validation_split: - self.files += [join(ply_path, f + '.ply')] - elif self.set in ['validation', 'test', 'ERF']: + self.files += [join(ply_path, f + ".ply")] + elif self.set in ["validation", "test", "ERF"]: if self.all_splits[i] == self.validation_split: - self.files += [join(ply_path, f + '.ply')] + self.files += [join(ply_path, f + ".ply")] else: - raise ValueError('Unknown set for S3DIS data: ', self.set) + raise ValueError("Unknown set for S3DIS data: ", self.set) - if self.set == 'training': - self.cloud_names = [f for i, f in enumerate(self.cloud_names) - if self.all_splits[i] != self.validation_split] - elif self.set in ['validation', 'test', 'ERF']: - self.cloud_names = [f for i, f in enumerate(self.cloud_names) - if self.all_splits[i] == self.validation_split] + if self.set == "training": + self.cloud_names = [ + f + for i, f in enumerate(self.cloud_names) + if self.all_splits[i] != self.validation_split + ] + elif self.set in ["validation", "test", "ERF"]: + self.cloud_names = [ + f + for i, f in enumerate(self.cloud_names) + if self.all_splits[i] == self.validation_split + ] if 0 < self.config.first_subsampling_dl <= 0.01: - raise ValueError('subsampling_parameter too low (should be over 1 cm') + raise ValueError("subsampling_parameter too low (should be over 1 cm") # Initiate containers self.input_trees = [] @@ -185,20 +192,28 @@ class S3DISDataset(PointCloudDataset): self.min_potentials = [] self.argmin_potentials = [] for i, tree in enumerate(self.pot_trees): - self.potentials += [torch.from_numpy(np.random.rand(tree.data.shape[0]) * 1e-3)] + self.potentials += [ + torch.from_numpy(np.random.rand(tree.data.shape[0]) * 1e-3) + ] min_ind = int(torch.argmin(self.potentials[-1])) self.argmin_potentials += [min_ind] self.min_potentials += [float(self.potentials[-1][min_ind])] # Share potential memory - self.argmin_potentials = torch.from_numpy(np.array(self.argmin_potentials, dtype=np.int64)) - self.min_potentials = torch.from_numpy(np.array(self.min_potentials, dtype=np.float64)) + self.argmin_potentials = torch.from_numpy( + np.array(self.argmin_potentials, dtype=np.int64) + ) + self.min_potentials = torch.from_numpy( + np.array(self.min_potentials, dtype=np.float64) + ) self.argmin_potentials.share_memory_() self.min_potentials.share_memory_() for i, _ in enumerate(self.pot_trees): self.potentials[i].share_memory_() - self.worker_waiting = torch.tensor([0 for _ in range(config.input_threads)], dtype=torch.int32) + self.worker_waiting = torch.tensor( + [0 for _ in range(config.input_threads)], dtype=torch.int32 + ) self.worker_waiting.share_memory_() self.epoch_inds = None self.epoch_i = 0 @@ -207,7 +222,9 @@ class S3DISDataset(PointCloudDataset): self.potentials = None self.min_potentials = None self.argmin_potentials = None - self.epoch_inds = torch.from_numpy(np.zeros((2, self.epoch_n), dtype=np.int64)) + self.epoch_inds = torch.from_numpy( + np.zeros((2, self.epoch_n), dtype=np.int64) + ) self.epoch_i = torch.from_numpy(np.zeros((1,), dtype=np.int64)) self.epoch_i.share_memory_() self.epoch_inds.share_memory_() @@ -215,7 +232,7 @@ class S3DISDataset(PointCloudDataset): self.worker_lock = Lock() # For ERF visualization, we want only one cloud per batch and no randomness - if self.set == 'ERF': + if self.set == "ERF": self.batch_limit = torch.tensor([1], dtype=torch.float32) self.batch_limit.share_memory_() np.random.seed(42) @@ -240,7 +257,6 @@ class S3DISDataset(PointCloudDataset): return self.random_item(batch_i) def potential_item(self, batch_i, debug_workers=False): - t = [time.time()] # Initiate concatanation lists @@ -262,36 +278,34 @@ class S3DISDataset(PointCloudDataset): wid = None while True: - t += [time.time()] if debug_workers: - message = '' + message = "" for wi in range(info.num_workers): if wi == wid: - message += ' {:}X{:} '.format(bcolors.FAIL, bcolors.ENDC) + message += " {:}X{:} ".format(bcolors.FAIL, bcolors.ENDC) elif self.worker_waiting[wi] == 0: - message += ' ' + message += " " elif self.worker_waiting[wi] == 1: - message += ' | ' + message += " | " elif self.worker_waiting[wi] == 2: - message += ' o ' + message += " o " print(message) self.worker_waiting[wid] = 0 with self.worker_lock: - if debug_workers: - message = '' + message = "" for wi in range(info.num_workers): if wi == wid: - message += ' {:}v{:} '.format(bcolors.OKGREEN, bcolors.ENDC) + message += " {:}v{:} ".format(bcolors.OKGREEN, bcolors.ENDC) elif self.worker_waiting[wi] == 0: - message += ' ' + message += " " elif self.worker_waiting[wi] == 1: - message += ' | ' + message += " | " elif self.worker_waiting[wi] == 2: - message += ' o ' + message += " o " print(message) self.worker_waiting[wid] = 1 @@ -306,24 +320,28 @@ class S3DISDataset(PointCloudDataset): center_point = pot_points[point_ind, :].reshape(1, -1) # Add a small noise to center point - if self.set != 'ERF': - center_point += np.random.normal(scale=self.config.in_radius / 10, size=center_point.shape) + if self.set != "ERF": + center_point += np.random.normal( + scale=self.config.in_radius / 10, size=center_point.shape + ) # Indices of points in input region - pot_inds, dists = self.pot_trees[cloud_ind].query_radius(center_point, - r=self.config.in_radius, - return_distance=True) + pot_inds, dists = self.pot_trees[cloud_ind].query_radius( + center_point, r=self.config.in_radius, return_distance=True + ) d2s = np.square(dists[0]) pot_inds = pot_inds[0] # Update potentials (Tukey weights) - if self.set != 'ERF': + if self.set != "ERF": tukeys = np.square(1 - d2s / np.square(self.config.in_radius)) tukeys[d2s > np.square(self.config.in_radius)] = 0 self.potentials[cloud_ind][pot_inds] += tukeys min_ind = torch.argmin(self.potentials[cloud_ind]) - self.min_potentials[[cloud_ind]] = self.potentials[cloud_ind][min_ind] + self.min_potentials[[cloud_ind]] = self.potentials[cloud_ind][ + min_ind + ] self.argmin_potentials[[cloud_ind]] = min_ind t += [time.time()] @@ -331,10 +349,10 @@ class S3DISDataset(PointCloudDataset): # Get points from tree structure points = np.array(self.input_trees[cloud_ind].data, copy=False) - # Indices of points in input region - input_inds = self.input_trees[cloud_ind].query_radius(center_point, - r=self.config.in_radius)[0] + input_inds = self.input_trees[cloud_ind].query_radius( + center_point, r=self.config.in_radius + )[0] t += [time.time()] @@ -345,7 +363,9 @@ class S3DISDataset(PointCloudDataset): if n < 2: failed_attempts += 1 if failed_attempts > 100 * self.config.batch_num: - raise ValueError('It seems this dataset only containes empty input spheres') + raise ValueError( + "It seems this dataset only containes empty input spheres" + ) t += [time.time()] t += [time.time()] continue @@ -353,7 +373,7 @@ class S3DISDataset(PointCloudDataset): # Collect labels and colors input_points = (points[input_inds] - center_point).astype(np.float32) input_colors = self.input_colors[cloud_ind][input_inds] - if self.set in ['test', 'ERF']: + if self.set in ["test", "ERF"]: input_labels = np.zeros(input_points.shape[0]) else: input_labels = self.input_labels[cloud_ind][input_inds] @@ -369,7 +389,9 @@ class S3DISDataset(PointCloudDataset): input_colors *= 0 # Get original height as additional feature - input_features = np.hstack((input_colors, input_points[:, 2:] + center_point[:, 2:])).astype(np.float32) + input_features = np.hstack( + (input_colors, input_points[:, 2:] + center_point[:, 2:]) + ).astype(np.float32) t += [time.time()] @@ -418,7 +440,9 @@ class S3DISDataset(PointCloudDataset): elif self.config.in_features_dim == 5: stacked_features = np.hstack((stacked_features, features)) else: - raise ValueError('Only accepted input dimensions are 1, 4 and 7 (without and with XYZ)') + raise ValueError( + "Only accepted input dimensions are 1, 4 and 7 (without and with XYZ)" + ) ####################### # Create network inputs @@ -430,10 +454,9 @@ class S3DISDataset(PointCloudDataset): t += [time.time()] # Get the whole input list - input_list = self.segmentation_inputs(stacked_points, - stacked_features, - labels, - stack_lengths) + input_list = self.segmentation_inputs( + stacked_points, stacked_features, labels, stack_lengths + ) t += [time.time()] @@ -441,16 +464,16 @@ class S3DISDataset(PointCloudDataset): input_list += [scales, rots, cloud_inds, point_inds, input_inds] if debug_workers: - message = '' + message = "" for wi in range(info.num_workers): if wi == wid: - message += ' {:}0{:} '.format(bcolors.OKBLUE, bcolors.ENDC) + message += " {:}0{:} ".format(bcolors.OKBLUE, bcolors.ENDC) elif self.worker_waiting[wi] == 0: - message += ' ' + message += " " elif self.worker_waiting[wi] == 1: - message += ' | ' + message += " | " elif self.worker_waiting[wi] == 2: - message += ' o ' + message += " o " print(message) self.worker_waiting[wid] = 2 @@ -459,51 +482,65 @@ class S3DISDataset(PointCloudDataset): # Display timings debugT = False if debugT: - print('\n************************\n') - print('Timings:') + print("\n************************\n") + print("Timings:") ti = 0 N = 5 - mess = 'Init ...... {:5.1f}ms /' - loop_times = [1000 * (t[ti + N * i + 1] - t[ti + N * i]) for i in range(len(stack_lengths))] + mess = "Init ...... {:5.1f}ms /" + loop_times = [ + 1000 * (t[ti + N * i + 1] - t[ti + N * i]) + for i in range(len(stack_lengths)) + ] for dt in loop_times: - mess += ' {:5.1f}'.format(dt) + mess += " {:5.1f}".format(dt) print(mess.format(np.sum(loop_times))) ti += 1 - mess = 'Pots ...... {:5.1f}ms /' - loop_times = [1000 * (t[ti + N * i + 1] - t[ti + N * i]) for i in range(len(stack_lengths))] + mess = "Pots ...... {:5.1f}ms /" + loop_times = [ + 1000 * (t[ti + N * i + 1] - t[ti + N * i]) + for i in range(len(stack_lengths)) + ] for dt in loop_times: - mess += ' {:5.1f}'.format(dt) + mess += " {:5.1f}".format(dt) print(mess.format(np.sum(loop_times))) ti += 1 - mess = 'Sphere .... {:5.1f}ms /' - loop_times = [1000 * (t[ti + N * i + 1] - t[ti + N * i]) for i in range(len(stack_lengths))] + mess = "Sphere .... {:5.1f}ms /" + loop_times = [ + 1000 * (t[ti + N * i + 1] - t[ti + N * i]) + for i in range(len(stack_lengths)) + ] for dt in loop_times: - mess += ' {:5.1f}'.format(dt) + mess += " {:5.1f}".format(dt) print(mess.format(np.sum(loop_times))) ti += 1 - mess = 'Collect ... {:5.1f}ms /' - loop_times = [1000 * (t[ti + N * i + 1] - t[ti + N * i]) for i in range(len(stack_lengths))] + mess = "Collect ... {:5.1f}ms /" + loop_times = [ + 1000 * (t[ti + N * i + 1] - t[ti + N * i]) + for i in range(len(stack_lengths)) + ] for dt in loop_times: - mess += ' {:5.1f}'.format(dt) + mess += " {:5.1f}".format(dt) print(mess.format(np.sum(loop_times))) ti += 1 - mess = 'Augment ... {:5.1f}ms /' - loop_times = [1000 * (t[ti + N * i + 1] - t[ti + N * i]) for i in range(len(stack_lengths))] + mess = "Augment ... {:5.1f}ms /" + loop_times = [ + 1000 * (t[ti + N * i + 1] - t[ti + N * i]) + for i in range(len(stack_lengths)) + ] for dt in loop_times: - mess += ' {:5.1f}'.format(dt) + mess += " {:5.1f}".format(dt) print(mess.format(np.sum(loop_times))) ti += N * (len(stack_lengths) - 1) + 1 - print('concat .... {:5.1f}ms'.format(1000 * (t[ti+1] - t[ti]))) + print("concat .... {:5.1f}ms".format(1000 * (t[ti + 1] - t[ti]))) ti += 1 - print('input ..... {:5.1f}ms'.format(1000 * (t[ti+1] - t[ti]))) + print("input ..... {:5.1f}ms".format(1000 * (t[ti + 1] - t[ti]))) ti += 1 - print('stack ..... {:5.1f}ms'.format(1000 * (t[ti+1] - t[ti]))) + print("stack ..... {:5.1f}ms".format(1000 * (t[ti + 1] - t[ti]))) ti += 1 - print('\n************************\n') + print("\n************************\n") return input_list def random_item(self, batch_i): - # Initiate concatanation lists p_list = [] f_list = [] @@ -517,9 +554,7 @@ class S3DISDataset(PointCloudDataset): failed_attempts = 0 while True: - with self.worker_lock: - # Get potential minimum cloud_ind = int(self.epoch_inds[0, self.epoch_i]) point_ind = int(self.epoch_inds[1, self.epoch_i]) @@ -528,7 +563,6 @@ class S3DISDataset(PointCloudDataset): self.epoch_i += 1 if self.epoch_i >= int(self.epoch_inds.shape[1]): self.epoch_i -= int(self.epoch_inds.shape[1]) - # Get points from tree structure points = np.array(self.input_trees[cloud_ind].data, copy=False) @@ -537,27 +571,32 @@ class S3DISDataset(PointCloudDataset): center_point = points[point_ind, :].reshape(1, -1) # Add a small noise to center point - if self.set != 'ERF': - center_point += np.random.normal(scale=self.config.in_radius / 10, size=center_point.shape) + if self.set != "ERF": + center_point += np.random.normal( + scale=self.config.in_radius / 10, size=center_point.shape + ) # Indices of points in input region - input_inds = self.input_trees[cloud_ind].query_radius(center_point, - r=self.config.in_radius)[0] + input_inds = self.input_trees[cloud_ind].query_radius( + center_point, r=self.config.in_radius + )[0] # Number collected n = input_inds.shape[0] - + # Safe check for empty spheres if n < 2: failed_attempts += 1 if failed_attempts > 100 * self.config.batch_num: - raise ValueError('It seems this dataset only containes empty input spheres') + raise ValueError( + "It seems this dataset only containes empty input spheres" + ) continue # Collect labels and colors input_points = (points[input_inds] - center_point).astype(np.float32) input_colors = self.input_colors[cloud_ind][input_inds] - if self.set in ['test', 'ERF']: + if self.set in ["test", "ERF"]: input_labels = np.zeros(input_points.shape[0]) else: input_labels = self.input_labels[cloud_ind][input_inds] @@ -571,7 +610,9 @@ class S3DISDataset(PointCloudDataset): input_colors *= 0 # Get original height as additional feature - input_features = np.hstack((input_colors, input_points[:, 2:] + center_point[:, 2:])).astype(np.float32) + input_features = np.hstack( + (input_colors, input_points[:, 2:] + center_point[:, 2:]) + ).astype(np.float32) # Stack batch p_list += [input_points] @@ -618,7 +659,9 @@ class S3DISDataset(PointCloudDataset): elif self.config.in_features_dim == 5: stacked_features = np.hstack((stacked_features, features)) else: - raise ValueError('Only accepted input dimensions are 1, 4 and 7 (without and with XYZ)') + raise ValueError( + "Only accepted input dimensions are 1, 4 and 7 (without and with XYZ)" + ) ####################### # Create network inputs @@ -628,10 +671,9 @@ class S3DISDataset(PointCloudDataset): # # Get the whole input list - input_list = self.segmentation_inputs(stacked_points, - stacked_features, - labels, - stack_lengths) + input_list = self.segmentation_inputs( + stacked_points, stacked_features, labels, stack_lengths + ) # Add scale and rotation for testing input_list += [scales, rots, cloud_inds, point_inds, input_inds] @@ -639,8 +681,7 @@ class S3DISDataset(PointCloudDataset): return input_list def prepare_S3DIS_ply(self): - - print('\nPreparing ply files') + print("\nPreparing ply files") t0 = time.time() # Folder for the ply files @@ -649,15 +690,18 @@ class S3DISDataset(PointCloudDataset): makedirs(ply_path) for cloud_name in self.cloud_names: - # Pass if the cloud has already been computed - cloud_file = join(ply_path, cloud_name + '.ply') + cloud_file = join(ply_path, cloud_name + ".ply") if exists(cloud_file): continue # Get rooms of the current cloud cloud_folder = join(self.path, cloud_name) - room_folders = [join(cloud_folder, room) for room in listdir(cloud_folder) if isdir(join(cloud_folder, room))] + room_folders = [ + join(cloud_folder, room) + for room in listdir(cloud_folder) + if isdir(join(cloud_folder, room)) + ] # Initiate containers cloud_points = np.empty((0, 3), dtype=np.float32) @@ -666,59 +710,68 @@ class S3DISDataset(PointCloudDataset): # Loop over rooms for i, room_folder in enumerate(room_folders): + print( + "Cloud %s - Room %d/%d : %s" + % (cloud_name, i + 1, len(room_folders), room_folder.split("/")[-1]) + ) - print('Cloud %s - Room %d/%d : %s' % (cloud_name, i+1, len(room_folders), room_folder.split('/')[-1])) - - for object_name in listdir(join(room_folder, 'Annotations')): - - if object_name[-4:] == '.txt': - + for object_name in listdir(join(room_folder, "Annotations")): + if object_name[-4:] == ".txt": # Text file containing point of the object - object_file = join(room_folder, 'Annotations', object_name) + object_file = join(room_folder, "Annotations", object_name) # Object class and ID - tmp = object_name[:-4].split('_')[0] + tmp = object_name[:-4].split("_")[0] if tmp in self.name_to_label: object_class = self.name_to_label[tmp] - elif tmp in ['stairs']: - object_class = self.name_to_label['clutter'] + elif tmp in ["stairs"]: + object_class = self.name_to_label["clutter"] else: - raise ValueError('Unknown object name: ' + str(tmp)) + raise ValueError("Unknown object name: " + str(tmp)) # Correct bug in S3DIS dataset - if object_name == 'ceiling_1.txt': - with open(object_file, 'r') as f: + if object_name == "ceiling_1.txt": + with open(object_file, "r") as f: lines = f.readlines() for l_i, line in enumerate(lines): - if '103.0\x100000' in line: - lines[l_i] = line.replace('103.0\x100000', '103.000000') - with open(object_file, 'w') as f: + if "103.0\x100000" in line: + lines[l_i] = line.replace( + "103.0\x100000", "103.000000" + ) + with open(object_file, "w") as f: f.writelines(lines) # Read object points and colors object_data = np.loadtxt(object_file, dtype=np.float32) # Stack all data - cloud_points = np.vstack((cloud_points, object_data[:, 0:3].astype(np.float32))) - cloud_colors = np.vstack((cloud_colors, object_data[:, 3:6].astype(np.uint8))) - object_classes = np.full((object_data.shape[0], 1), object_class, dtype=np.int32) + cloud_points = np.vstack( + (cloud_points, object_data[:, 0:3].astype(np.float32)) + ) + cloud_colors = np.vstack( + (cloud_colors, object_data[:, 3:6].astype(np.uint8)) + ) + object_classes = np.full( + (object_data.shape[0], 1), object_class, dtype=np.int32 + ) cloud_classes = np.vstack((cloud_classes, object_classes)) # Save as ply - write_ply(cloud_file, - (cloud_points, cloud_colors, cloud_classes), - ['x', 'y', 'z', 'red', 'green', 'blue', 'class']) + write_ply( + cloud_file, + (cloud_points, cloud_colors, cloud_classes), + ["x", "y", "z", "red", "green", "blue", "class"], + ) - print('Done in {:.1f}s'.format(time.time() - t0)) + print("Done in {:.1f}s".format(time.time() - t0)) return def load_subsampled_clouds(self): - # Parameter dl = self.config.first_subsampling_dl # Create path for files - tree_path = join(self.path, 'input_{:.3f}'.format(dl)) + tree_path = join(self.path, "input_{:.3f}".format(dl)) if not exists(tree_path): makedirs(tree_path) @@ -727,7 +780,6 @@ class S3DISDataset(PointCloudDataset): ############## for i, file_path in enumerate(self.files): - # Restart timer t0 = time.time() @@ -735,36 +787,43 @@ class S3DISDataset(PointCloudDataset): cloud_name = self.cloud_names[i] # Name of the input files - KDTree_file = join(tree_path, '{:s}_KDTree.pkl'.format(cloud_name)) - sub_ply_file = join(tree_path, '{:s}.ply'.format(cloud_name)) + KDTree_file = join(tree_path, "{:s}_KDTree.pkl".format(cloud_name)) + sub_ply_file = join(tree_path, "{:s}.ply".format(cloud_name)) # Check if inputs have already been computed if exists(KDTree_file): - print('\nFound KDTree for cloud {:s}, subsampled at {:.3f}'.format(cloud_name, dl)) + print( + "\nFound KDTree for cloud {:s}, subsampled at {:.3f}".format( + cloud_name, dl + ) + ) # read ply with data data = read_ply(sub_ply_file) - sub_colors = np.vstack((data['red'], data['green'], data['blue'])).T - sub_labels = data['class'] + sub_colors = np.vstack((data["red"], data["green"], data["blue"])).T + sub_labels = data["class"] # Read pkl with search tree - with open(KDTree_file, 'rb') as f: + with open(KDTree_file, "rb") as f: search_tree = pickle.load(f) else: - print('\nPreparing KDTree for cloud {:s}, subsampled at {:.3f}'.format(cloud_name, dl)) + print( + "\nPreparing KDTree for cloud {:s}, subsampled at {:.3f}".format( + cloud_name, dl + ) + ) # Read ply file data = read_ply(file_path) - points = np.vstack((data['x'], data['y'], data['z'])).T - colors = np.vstack((data['red'], data['green'], data['blue'])).T - labels = data['class'] + points = np.vstack((data["x"], data["y"], data["z"])).T + colors = np.vstack((data["red"], data["green"], data["blue"])).T + labels = data["class"] # Subsample cloud - sub_points, sub_colors, sub_labels = grid_subsampling(points, - features=colors, - labels=labels, - sampleDl=dl) + sub_points, sub_colors, sub_labels = grid_subsampling( + points, features=colors, labels=labels, sampleDl=dl + ) # Rescale float color and squeeze label sub_colors = sub_colors / 255 @@ -772,17 +831,19 @@ class S3DISDataset(PointCloudDataset): # Get chosen neighborhoods search_tree = KDTree(sub_points, leaf_size=10) - #search_tree = nnfln.KDTree(n_neighbors=1, metric='L2', leaf_size=10) - #search_tree.fit(sub_points) + # search_tree = nnfln.KDTree(n_neighbors=1, metric='L2', leaf_size=10) + # search_tree.fit(sub_points) # Save KDTree - with open(KDTree_file, 'wb') as f: + with open(KDTree_file, "wb") as f: pickle.dump(search_tree, f) # Save ply - write_ply(sub_ply_file, - [sub_points, sub_colors, sub_labels], - ['x', 'y', 'z', 'red', 'green', 'blue', 'class']) + write_ply( + sub_ply_file, + [sub_points, sub_colors, sub_labels], + ["x", "y", "z", "red", "green", "blue", "class"], + ) # Fill data containers self.input_trees += [search_tree] @@ -790,7 +851,7 @@ class S3DISDataset(PointCloudDataset): self.input_labels += [sub_labels] size = sub_colors.shape[0] * 4 * 7 - print('{:.1f} MB loaded in {:.1f}s'.format(size * 1e-6, time.time() - t0)) + print("{:.1f} MB loaded in {:.1f}s".format(size * 1e-6, time.time() - t0)) ############################ # Coarse potential locations @@ -798,7 +859,7 @@ class S3DISDataset(PointCloudDataset): # Only necessary for validation and test sets if self.use_potentials: - print('\nPreparing potentials') + print("\nPreparing potentials") # Restart timer t0 = time.time() @@ -807,36 +868,39 @@ class S3DISDataset(PointCloudDataset): cloud_ind = 0 for i, file_path in enumerate(self.files): - # Get cloud name cloud_name = self.cloud_names[i] # Name of the input files - coarse_KDTree_file = join(tree_path, '{:s}_coarse_KDTree.pkl'.format(cloud_name)) + coarse_KDTree_file = join( + tree_path, "{:s}_coarse_KDTree.pkl".format(cloud_name) + ) # Check if inputs have already been computed if exists(coarse_KDTree_file): # Read pkl with search tree - with open(coarse_KDTree_file, 'rb') as f: + with open(coarse_KDTree_file, "rb") as f: search_tree = pickle.load(f) else: # Subsample cloud sub_points = np.array(self.input_trees[cloud_ind].data, copy=False) - coarse_points = grid_subsampling(sub_points.astype(np.float32), sampleDl=pot_dl) + coarse_points = grid_subsampling( + sub_points.astype(np.float32), sampleDl=pot_dl + ) # Get chosen neighborhoods search_tree = KDTree(coarse_points, leaf_size=10) # Save KDTree - with open(coarse_KDTree_file, 'wb') as f: + with open(coarse_KDTree_file, "wb") as f: pickle.dump(search_tree, f) # Fill data containers self.pot_trees += [search_tree] cloud_ind += 1 - print('Done in {:.1f}s'.format(time.time() - t0)) + print("Done in {:.1f}s".format(time.time() - t0)) ###################### # Reprojection indices @@ -846,13 +910,11 @@ class S3DISDataset(PointCloudDataset): self.num_clouds = len(self.input_trees) # Only necessary for validation and test sets - if self.set in ['validation', 'test']: - - print('\nPreparing reprojection indices for testing') + if self.set in ["validation", "test"]: + print("\nPreparing reprojection indices for testing") # Get validation/test reprojection indices for i, file_path in enumerate(self.files): - # Restart timer t0 = time.time() @@ -860,29 +922,29 @@ class S3DISDataset(PointCloudDataset): cloud_name = self.cloud_names[i] # File name for saving - proj_file = join(tree_path, '{:s}_proj.pkl'.format(cloud_name)) + proj_file = join(tree_path, "{:s}_proj.pkl".format(cloud_name)) # Try to load previous indices if exists(proj_file): - with open(proj_file, 'rb') as f: + with open(proj_file, "rb") as f: proj_inds, labels = pickle.load(f) else: data = read_ply(file_path) - points = np.vstack((data['x'], data['y'], data['z'])).T - labels = data['class'] + points = np.vstack((data["x"], data["y"], data["z"])).T + labels = data["class"] # Compute projection inds idxs = self.input_trees[i].query(points, return_distance=False) - #dists, idxs = self.input_trees[i_cloud].kneighbors(points) + # dists, idxs = self.input_trees[i_cloud].kneighbors(points) proj_inds = np.squeeze(idxs).astype(np.int32) # Save - with open(proj_file, 'wb') as f: + with open(proj_file, "wb") as f: pickle.dump([proj_inds, labels], f) self.test_proj += [proj_inds] self.validation_labels += [labels] - print('{:s} done in {:.1f}s'.format(cloud_name, time.time() - t0)) + print("{:s} done in {:.1f}s".format(cloud_name, time.time() - t0)) print() return @@ -894,7 +956,7 @@ class S3DISDataset(PointCloudDataset): # Get original points data = read_ply(file_path) - return np.vstack((data['x'], data['y'], data['z'])).T + return np.vstack((data["x"], data["y"], data["z"])).T # ---------------------------------------------------------------------------------------------------------------------- @@ -913,7 +975,7 @@ class S3DISSampler(Sampler): self.dataset = dataset # Number of step per epoch - if dataset.set == 'training': + if dataset.set == "training": self.N = dataset.config.epoch_steps else: self.N = dataset.config.validation_size @@ -927,7 +989,6 @@ class S3DISSampler(Sampler): """ if not self.dataset.use_potentials: - # Initiate current epoch ind self.dataset.epoch_i *= 0 self.dataset.epoch_inds *= 0 @@ -940,15 +1001,23 @@ class S3DISSampler(Sampler): random_pick_n = int(np.ceil(num_centers / self.dataset.config.num_classes)) # Choose random points of each class for each cloud - epoch_indices = np.zeros((2, 0), dtype=np.int64) + np.zeros((2, 0), dtype=np.int64) for label_ind, label in enumerate(self.dataset.label_values): if label not in self.dataset.ignored_labels: - - # Gather indices of the points with this label in all the input clouds + # Gather indices of the points with this label in all the input clouds all_label_indices = [] for cloud_ind, cloud_labels in enumerate(self.dataset.input_labels): label_indices = np.where(np.equal(cloud_labels, label))[0] - all_label_indices.append(np.vstack((np.full(label_indices.shape, cloud_ind, dtype=np.int64), label_indices))) + all_label_indices.append( + np.vstack( + ( + np.full( + label_indices.shape, cloud_ind, dtype=np.int64 + ), + label_indices, + ) + ) + ) # Stack them: [2, N1+N2+...] all_label_indices = np.hstack(all_label_indices) @@ -958,23 +1027,40 @@ class S3DISSampler(Sampler): if N_inds < random_pick_n: chosen_label_inds = np.zeros((2, 0), dtype=np.int64) while chosen_label_inds.shape[1] < random_pick_n: - chosen_label_inds = np.hstack((chosen_label_inds, all_label_indices[:, np.random.permutation(N_inds)])) - warnings.warn('When choosing random epoch indices (use_potentials=False), \ + chosen_label_inds = np.hstack( + ( + chosen_label_inds, + all_label_indices[:, np.random.permutation(N_inds)], + ) + ) + warnings.warn( + "When choosing random epoch indices (use_potentials=False), \ class {:d}: {:s} only had {:d} available points, while we \ - needed {:d}. Repeating indices in the same epoch'.format(label, - self.dataset.label_names[label_ind], - N_inds, - random_pick_n)) + needed {:d}. Repeating indices in the same epoch".format( + label, + self.dataset.label_names[label_ind], + N_inds, + random_pick_n, + ) + ) elif N_inds < 50 * random_pick_n: - rand_inds = np.random.choice(N_inds, size=random_pick_n, replace=False) + rand_inds = np.random.choice( + N_inds, size=random_pick_n, replace=False + ) chosen_label_inds = all_label_indices[:, rand_inds] else: chosen_label_inds = np.zeros((2, 0), dtype=np.int64) while chosen_label_inds.shape[1] < random_pick_n: - rand_inds = np.unique(np.random.choice(N_inds, size=2*random_pick_n, replace=True)) - chosen_label_inds = np.hstack((chosen_label_inds, all_label_indices[:, rand_inds])) + rand_inds = np.unique( + np.random.choice( + N_inds, size=2 * random_pick_n, replace=True + ) + ) + chosen_label_inds = np.hstack( + (chosen_label_inds, all_label_indices[:, rand_inds]) + ) chosen_label_inds = chosen_label_inds[:, :random_pick_n] # Stack for each label @@ -1025,7 +1111,6 @@ class S3DISSampler(Sampler): for epoch in range(10): for i, test in enumerate(self): - # New time t = t[-1:] t += [time.time()] @@ -1064,17 +1149,23 @@ class S3DISSampler(Sampler): # Console display (only one per second) if (t[-1] - last_display) > 1.0: last_display = t[-1] - message = 'Step {:5d} estim_b ={:5.2f} batch_limit ={:7d}, // {:.1f}ms {:.1f}ms' - print(message.format(i, - estim_b, - int(self.dataset.batch_limit), - 1000 * mean_dt[0], - 1000 * mean_dt[1])) + message = "Step {:5d} estim_b ={:5.2f} batch_limit ={:7d}, // {:.1f}ms {:.1f}ms" + print( + message.format( + i, + estim_b, + int(self.dataset.batch_limit), + 1000 * mean_dt[0], + 1000 * mean_dt[1], + ) + ) if breaking: break - def calibration(self, dataloader, untouched_ratio=0.9, verbose=False, force_redo=False): + def calibration( + self, dataloader, untouched_ratio=0.9, verbose=False, force_redo=False + ): """ Method performing batch and neighbors calibration. Batch calibration: Set "batch_limit" (the maximum number of points allowed in every batch) so that the @@ -1087,7 +1178,7 @@ class S3DISSampler(Sampler): # Previously saved calibration ############################## - print('\nStarting Calibration (use verbose=True for more details)') + print("\nStarting Calibration (use verbose=True for more details)") t0 = time.time() redo = force_redo @@ -1096,45 +1187,47 @@ class S3DISSampler(Sampler): # *********** # Load batch_limit dictionary - batch_lim_file = join(self.dataset.path, 'batch_limits.pkl') + batch_lim_file = join(self.dataset.path, "batch_limits.pkl") if exists(batch_lim_file): - with open(batch_lim_file, 'rb') as file: + with open(batch_lim_file, "rb") as file: batch_lim_dict = pickle.load(file) else: batch_lim_dict = {} # Check if the batch limit associated with current parameters exists if self.dataset.use_potentials: - sampler_method = 'potentials' + sampler_method = "potentials" else: - sampler_method = 'random' - key = '{:s}_{:.3f}_{:.3f}_{:d}'.format(sampler_method, - self.dataset.config.in_radius, - self.dataset.config.first_subsampling_dl, - self.dataset.config.batch_num) + sampler_method = "random" + key = "{:s}_{:.3f}_{:.3f}_{:d}".format( + sampler_method, + self.dataset.config.in_radius, + self.dataset.config.first_subsampling_dl, + self.dataset.config.batch_num, + ) if not redo and key in batch_lim_dict: self.dataset.batch_limit[0] = batch_lim_dict[key] else: redo = True if verbose: - print('\nPrevious calibration found:') - print('Check batch limit dictionary') + print("\nPrevious calibration found:") + print("Check batch limit dictionary") if key in batch_lim_dict: color = bcolors.OKGREEN v = str(int(batch_lim_dict[key])) else: color = bcolors.FAIL - v = '?' - print('{:}\"{:s}\": {:s}{:}'.format(color, key, v, bcolors.ENDC)) + v = "?" + print('{:}"{:s}": {:s}{:}'.format(color, key, v, bcolors.ENDC)) # Neighbors limit # *************** # Load neighb_limits dictionary - neighb_lim_file = join(self.dataset.path, 'neighbors_limits.pkl') + neighb_lim_file = join(self.dataset.path, "neighbors_limits.pkl") if exists(neighb_lim_file): - with open(neighb_lim_file, 'rb') as file: + with open(neighb_lim_file, "rb") as file: neighb_lim_dict = pickle.load(file) else: neighb_lim_dict = {} @@ -1142,14 +1235,13 @@ class S3DISSampler(Sampler): # Check if the limit associated with current parameters exists (for each layer) neighb_limits = [] for layer_ind in range(self.dataset.config.num_layers): - dl = self.dataset.config.first_subsampling_dl * (2**layer_ind) if self.dataset.config.deform_layers[layer_ind]: r = dl * self.dataset.config.deform_radius else: r = dl * self.dataset.config.conv_radius - key = '{:.3f}_{:.3f}'.format(dl, r) + key = "{:.3f}_{:.3f}".format(dl, r) if key in neighb_lim_dict: neighb_limits += [neighb_lim_dict[key]] @@ -1159,34 +1251,37 @@ class S3DISSampler(Sampler): redo = True if verbose: - print('Check neighbors limit dictionary') + print("Check neighbors limit dictionary") for layer_ind in range(self.dataset.config.num_layers): dl = self.dataset.config.first_subsampling_dl * (2**layer_ind) if self.dataset.config.deform_layers[layer_ind]: r = dl * self.dataset.config.deform_radius else: r = dl * self.dataset.config.conv_radius - key = '{:.3f}_{:.3f}'.format(dl, r) + key = "{:.3f}_{:.3f}".format(dl, r) if key in neighb_lim_dict: color = bcolors.OKGREEN v = str(neighb_lim_dict[key]) else: color = bcolors.FAIL - v = '?' - print('{:}\"{:s}\": {:s}{:}'.format(color, key, v, bcolors.ENDC)) + v = "?" + print('{:}"{:s}": {:s}{:}'.format(color, key, v, bcolors.ENDC)) if redo: - ############################ # Neighbors calib parameters ############################ # From config parameter, compute higher bound of neighbors number in a neighborhood - hist_n = int(np.ceil(4 / 3 * np.pi * (self.dataset.config.deform_radius + 1) ** 3)) + hist_n = int( + np.ceil(4 / 3 * np.pi * (self.dataset.config.deform_radius + 1) ** 3) + ) # Histogram of neighborhood sizes - neighb_hists = np.zeros((self.dataset.config.num_layers, hist_n), dtype=np.int32) + neighb_hists = np.zeros( + (self.dataset.config.num_layers, hist_n), dtype=np.int32 + ) ######################## # Batch calib parameters @@ -1195,12 +1290,12 @@ class S3DISSampler(Sampler): # Estimated average batch size and target value estim_b = 0 target_b = self.dataset.config.batch_num - + # Expected batch size order of magnitude expected_N = 100000 # Calibration parameters. Higher means faster but can also become unstable - # Reduce Kp and Kd if your GP Uis small as the total number of points per batch will be smaller + # Reduce Kp and Kd if your GP Uis small as the total number of points per batch will be smaller low_pass_T = 100 Kp = expected_N / 200 Ki = 0.001 * Kp @@ -1229,13 +1324,15 @@ class S3DISSampler(Sampler): # Perform calibration ##################### - # number of batch per epoch + # number of batch per epoch sample_batches = 999 for epoch in range((sample_batches // self.N) + 1): for batch_i, batch in enumerate(dataloader): - # Update neighborhood histogram - counts = [np.sum(neighb_mat.numpy() < neighb_mat.shape[0], axis=1) for neighb_mat in batch.neighbors] + counts = [ + np.sum(neighb_mat.numpy() < neighb_mat.shape[0], axis=1) + for neighb_mat in batch.neighbors + ] hists = [np.bincount(c, minlength=hist_n)[:hist_n] for c in counts] neighb_hists += np.vstack(hists) @@ -1251,7 +1348,6 @@ class S3DISSampler(Sampler): error_D = error - last_error last_error = error - # Save smooth errors for convergene check smooth_errors.append(target_b - estim_b) if len(smooth_errors) > 30: @@ -1283,10 +1379,8 @@ class S3DISSampler(Sampler): # Console display (only one per second) if verbose and (t - last_display) > 1.0: last_display = t - message = 'Step {:5d} estim_b ={:5.2f} batch_limit ={:7d}' - print(message.format(i, - estim_b, - int(self.dataset.batch_limit))) + message = "Step {:5d} estim_b ={:5.2f} batch_limit ={:7d}" + print(message.format(i, estim_b, int(self.dataset.batch_limit))) # Debug plots debug_in.append(int(batch.points[0].shape[0])) @@ -1301,7 +1395,9 @@ class S3DISSampler(Sampler): if not breaking: import matplotlib.pyplot as plt - print("ERROR: It seems that the calibration have not reached convergence. Here are some plot to understand why:") + print( + "ERROR: It seems that the calibration have not reached convergence. Here are some plot to understand why:" + ) print("If you notice unstability, reduce the expected_N value") print("If convergece is too slow, increase the expected_N value") @@ -1315,71 +1411,69 @@ class S3DISSampler(Sampler): plt.show() - a = 1/0 - - # Use collected neighbor histogram to get neighbors limit cumsum = np.cumsum(neighb_hists.T, axis=0) - percentiles = np.sum(cumsum < (untouched_ratio * cumsum[hist_n - 1, :]), axis=0) + percentiles = np.sum( + cumsum < (untouched_ratio * cumsum[hist_n - 1, :]), axis=0 + ) self.dataset.neighborhood_limits = percentiles - if verbose: - # Crop histogram while np.sum(neighb_hists[:, -1]) == 0: neighb_hists = neighb_hists[:, :-1] hist_n = neighb_hists.shape[1] - print('\n**************************************************\n') - line0 = 'neighbors_num ' + print("\n**************************************************\n") + line0 = "neighbors_num " for layer in range(neighb_hists.shape[0]): - line0 += '| layer {:2d} '.format(layer) + line0 += "| layer {:2d} ".format(layer) print(line0) for neighb_size in range(hist_n): - line0 = ' {:4d} '.format(neighb_size) + line0 = " {:4d} ".format(neighb_size) for layer in range(neighb_hists.shape[0]): if neighb_size > percentiles[layer]: color = bcolors.FAIL else: color = bcolors.OKGREEN - line0 += '|{:}{:10d}{:} '.format(color, - neighb_hists[layer, neighb_size], - bcolors.ENDC) + line0 += "|{:}{:10d}{:} ".format( + color, neighb_hists[layer, neighb_size], bcolors.ENDC + ) print(line0) - print('\n**************************************************\n') - print('\nchosen neighbors limits: ', percentiles) + print("\n**************************************************\n") + print("\nchosen neighbors limits: ", percentiles) print() # Save batch_limit dictionary if self.dataset.use_potentials: - sampler_method = 'potentials' + sampler_method = "potentials" else: - sampler_method = 'random' - key = '{:s}_{:.3f}_{:.3f}_{:d}'.format(sampler_method, - self.dataset.config.in_radius, - self.dataset.config.first_subsampling_dl, - self.dataset.config.batch_num) + sampler_method = "random" + key = "{:s}_{:.3f}_{:.3f}_{:d}".format( + sampler_method, + self.dataset.config.in_radius, + self.dataset.config.first_subsampling_dl, + self.dataset.config.batch_num, + ) batch_lim_dict[key] = float(self.dataset.batch_limit) - with open(batch_lim_file, 'wb') as file: + with open(batch_lim_file, "wb") as file: pickle.dump(batch_lim_dict, file) # Save neighb_limit dictionary for layer_ind in range(self.dataset.config.num_layers): - dl = self.dataset.config.first_subsampling_dl * (2 ** layer_ind) + dl = self.dataset.config.first_subsampling_dl * (2**layer_ind) if self.dataset.config.deform_layers[layer_ind]: r = dl * self.dataset.config.deform_radius else: r = dl * self.dataset.config.conv_radius - key = '{:.3f}_{:.3f}'.format(dl, r) + key = "{:.3f}_{:.3f}".format(dl, r) neighb_lim_dict[key] = self.dataset.neighborhood_limits[layer_ind] - with open(neighb_lim_file, 'wb') as file: + with open(neighb_lim_file, "wb") as file: pickle.dump(neighb_lim_dict, file) - - print('Calibration done in {:.1f}s\n'.format(time.time() - t0)) + print("Calibration done in {:.1f}s\n".format(time.time() - t0)) return @@ -1387,7 +1481,6 @@ class S3DISCustomBatch: """Custom batch definition with memory pinning for S3DIS""" def __init__(self, input_list): - # Get rid of batch dimension input_list = input_list[0] @@ -1396,15 +1489,25 @@ class S3DISCustomBatch: # Extract input tensors from the list of numpy array ind = 0 - self.points = [torch.from_numpy(nparray) for nparray in input_list[ind:ind+L]] + self.points = [ + torch.from_numpy(nparray) for nparray in input_list[ind : ind + L] + ] ind += L - self.neighbors = [torch.from_numpy(nparray) for nparray in input_list[ind:ind+L]] + self.neighbors = [ + torch.from_numpy(nparray) for nparray in input_list[ind : ind + L] + ] ind += L - self.pools = [torch.from_numpy(nparray) for nparray in input_list[ind:ind+L]] + self.pools = [ + torch.from_numpy(nparray) for nparray in input_list[ind : ind + L] + ] ind += L - self.upsamples = [torch.from_numpy(nparray) for nparray in input_list[ind:ind+L]] + self.upsamples = [ + torch.from_numpy(nparray) for nparray in input_list[ind : ind + L] + ] ind += L - self.lengths = [torch.from_numpy(nparray) for nparray in input_list[ind:ind+L]] + self.lengths = [ + torch.from_numpy(nparray) for nparray in input_list[ind : ind + L] + ] ind += L self.features = torch.from_numpy(input_list[ind]) ind += 1 @@ -1443,7 +1546,6 @@ class S3DISCustomBatch: return self def to(self, device): - self.points = [in_tensor.to(device) for in_tensor in self.points] self.neighbors = [in_tensor.to(device) for in_tensor in self.neighbors] self.pools = [in_tensor.to(device) for in_tensor in self.pools] @@ -1461,15 +1563,15 @@ class S3DISCustomBatch: def unstack_points(self, layer=None): """Unstack the points""" - return self.unstack_elements('points', layer) + return self.unstack_elements("points", layer) def unstack_neighbors(self, layer=None): """Unstack the neighbors indices""" - return self.unstack_elements('neighbors', layer) + return self.unstack_elements("neighbors", layer) def unstack_pools(self, layer=None): """Unstack the pooling indices""" - return self.unstack_elements('pools', layer) + return self.unstack_elements("pools", layer) def unstack_elements(self, element_name, layer=None, to_numpy=True): """ @@ -1477,34 +1579,31 @@ class S3DISCustomBatch: layers """ - if element_name == 'points': + if element_name == "points": elements = self.points - elif element_name == 'neighbors': + elif element_name == "neighbors": elements = self.neighbors - elif element_name == 'pools': + elif element_name == "pools": elements = self.pools[:-1] else: - raise ValueError('Unknown element name: {:s}'.format(element_name)) + raise ValueError("Unknown element name: {:s}".format(element_name)) all_p_list = [] for layer_i, layer_elems in enumerate(elements): - if layer is None or layer == layer_i: - i0 = 0 p_list = [] - if element_name == 'pools': - lengths = self.lengths[layer_i+1] + if element_name == "pools": + lengths = self.lengths[layer_i + 1] else: lengths = self.lengths[layer_i] for b_i, length in enumerate(lengths): - - elem = layer_elems[i0:i0 + length] - if element_name == 'neighbors': + elem = layer_elems[i0 : i0 + length] + if element_name == "neighbors": elem[elem >= self.points[layer_i].shape[0]] = -1 elem[elem >= 0] -= i0 - elif element_name == 'pools': + elif element_name == "pools": elem[elem >= self.points[layer_i].shape[0]] = -1 elem[elem >= 0] -= torch.sum(self.lengths[layer_i][:b_i]) i0 += length @@ -1535,16 +1634,13 @@ def S3DISCollate(batch_data): def debug_upsampling(dataset, loader): """Shows which labels are sampled according to strategy chosen""" - for epoch in range(10): - for batch_i, batch in enumerate(loader): - pc1 = batch.points[1].numpy() pc2 = batch.points[2].numpy() up1 = batch.upsamples[1].numpy() - print(pc1.shape, '=>', pc2.shape) + print(pc1.shape, "=>", pc2.shape) print(up1.shape, np.max(up1)) pc2 = np.vstack((pc2, np.zeros_like(pc2[:1, :]))) @@ -1553,14 +1649,14 @@ def debug_upsampling(dataset, loader): p0 = pc1[10, :] neighbs0 = up1[10, :] neighbs0 = pc2[neighbs0, :] - p0 - d2 = np.sum(neighbs0 ** 2, axis=1) + d2 = np.sum(neighbs0**2, axis=1) print(neighbs0.shape) print(neighbs0[:5]) print(d2[:5]) - print('******************') - print('*******************************************') + print("******************") + print("*******************************************") _, counts = np.unique(dataset.input_labels, return_counts=True) print(counts) @@ -1576,7 +1672,6 @@ def debug_timing(dataset, loader): estim_N = 0 for epoch in range(10): - for batch_i, batch in enumerate(loader): # print(batch_i, tuple(points.shape), tuple(normals.shape), labels, indices, in_sizes) @@ -1598,57 +1693,49 @@ def debug_timing(dataset, loader): # Console display (only one per second) if (t[-1] - last_display) > -1.0: last_display = t[-1] - message = 'Step {:08d} -> (ms/batch) {:8.2f} {:8.2f} / batch = {:.2f} - {:.0f}' - print(message.format(batch_i, - 1000 * mean_dt[0], - 1000 * mean_dt[1], - estim_b, - estim_N)) + message = "Step {:08d} -> (ms/batch) {:8.2f} {:8.2f} / batch = {:.2f} - {:.0f}" + print( + message.format( + batch_i, 1000 * mean_dt[0], 1000 * mean_dt[1], estim_b, estim_N + ) + ) - print('************* Epoch ended *************') + print("************* Epoch ended *************") _, counts = np.unique(dataset.input_labels, return_counts=True) print(counts) def debug_show_clouds(dataset, loader): - - for epoch in range(10): - - clouds = [] - cloud_normals = [] - cloud_labels = [] - L = dataset.config.num_layers for batch_i, batch in enumerate(loader): - # Print characteristics of input tensors - print('\nPoints tensors') + print("\nPoints tensors") for i in range(L): print(batch.points[i].dtype, batch.points[i].shape) - print('\nNeigbors tensors') + print("\nNeigbors tensors") for i in range(L): print(batch.neighbors[i].dtype, batch.neighbors[i].shape) - print('\nPools tensors') + print("\nPools tensors") for i in range(L): print(batch.pools[i].dtype, batch.pools[i].shape) - print('\nStack lengths') + print("\nStack lengths") for i in range(L): print(batch.lengths[i].dtype, batch.lengths[i].shape) - print('\nFeatures') + print("\nFeatures") print(batch.features.dtype, batch.features.shape) - print('\nLabels') + print("\nLabels") print(batch.labels.dtype, batch.labels.shape) - print('\nAugment Scales') + print("\nAugment Scales") print(batch.scales.dtype, batch.scales.shape) - print('\nAugment Rotations') + print("\nAugment Rotations") print(batch.rots.dtype, batch.rots.shape) - print('\nModel indices') + print("\nModel indices") print(batch.model_inds.dtype, batch.model_inds.shape) - print('\nAre input tensors pinned') + print("\nAre input tensors pinned") print(batch.neighbors[0].is_pinned()) print(batch.neighbors[-1].is_pinned()) print(batch.points[0].is_pinned()) @@ -1660,7 +1747,7 @@ def debug_show_clouds(dataset, loader): show_input_batch(batch) - print('*******************************************') + print("*******************************************") _, counts = np.unique(dataset.input_labels, return_counts=True) print(counts) @@ -1674,7 +1761,6 @@ def debug_batch_and_neighbors_calib(dataset, loader): mean_dt = np.zeros(2) for epoch in range(10): - for batch_i, input_list in enumerate(loader): # print(batch_i, tuple(points.shape), tuple(normals.shape), labels, indices, in_sizes) @@ -1692,12 +1778,10 @@ def debug_batch_and_neighbors_calib(dataset, loader): # Console display (only one per second) if (t[-1] - last_display) > 1.0: last_display = t[-1] - message = 'Step {:08d} -> Average timings (ms/batch) {:8.2f} {:8.2f} ' - print(message.format(batch_i, - 1000 * mean_dt[0], - 1000 * mean_dt[1])) + message = "Step {:08d} -> Average timings (ms/batch) {:8.2f} {:8.2f} " + print(message.format(batch_i, 1000 * mean_dt[0], 1000 * mean_dt[1])) - print('************* Epoch ended *************') + print("************* Epoch ended *************") _, counts = np.unique(dataset.input_labels, return_counts=True) print(counts) diff --git a/datasetss/SemanticKitti.py b/datasetss/SemanticKitti.py index dcc6f46..5fbd97a 100644 --- a/datasetss/SemanticKitti.py +++ b/datasetss/SemanticKitti.py @@ -54,37 +54,39 @@ from utils.config import bcolors class SemanticKittiDataset(PointCloudDataset): """Class to handle SemanticKitti dataset.""" - def __init__(self, config, set='training', balance_classes=True): - PointCloudDataset.__init__(self, 'SemanticKitti') + def __init__(self, config, set="training", balance_classes=True): + PointCloudDataset.__init__(self, "SemanticKitti") ########################## # Parameters for the files ########################## # Dataset folder - self.path = './Data/SemanticKitti' + self.path = "./Data/SemanticKitti" # Type of task conducted on this dataset - self.dataset_task = 'slam_segmentation' + self.dataset_task = "slam_segmentation" # Training or test set self.set = set # Get a list of sequences - if self.set == 'training': - self.sequences = ['{:02d}'.format(i) for i in range(11) if i != 8] - elif self.set == 'validation': - self.sequences = ['{:02d}'.format(i) for i in range(11) if i == 8] - elif self.set == 'test': - self.sequences = ['{:02d}'.format(i) for i in range(11, 22)] + if self.set == "training": + self.sequences = ["{:02d}".format(i) for i in range(11) if i != 8] + elif self.set == "validation": + self.sequences = ["{:02d}".format(i) for i in range(11) if i == 8] + elif self.set == "test": + self.sequences = ["{:02d}".format(i) for i in range(11, 22)] else: - raise ValueError('Unknown set for SemanticKitti data: ', self.set) + raise ValueError("Unknown set for SemanticKitti data: ", self.set) # List all files in each sequence self.frames = [] for seq in self.sequences: - velo_path = join(self.path, 'sequences', seq, 'velodyne') - frames = np.sort([vf[:-4] for vf in listdir(velo_path) if vf.endswith('.bin')]) + velo_path = join(self.path, "sequences", seq, "velodyne") + frames = np.sort( + [vf[:-4] for vf in listdir(velo_path) if vf.endswith(".bin")] + ) self.frames.append(frames) ########################### @@ -93,22 +95,26 @@ class SemanticKittiDataset(PointCloudDataset): # Read labels if config.n_frames == 1: - config_file = join(self.path, 'semantic-kitti.yaml') + config_file = join(self.path, "semantic-kitti.yaml") elif config.n_frames > 1: - config_file = join(self.path, 'semantic-kitti-all.yaml') + config_file = join(self.path, "semantic-kitti-all.yaml") else: - raise ValueError('number of frames has to be >= 1') + raise ValueError("number of frames has to be >= 1") - with open(config_file, 'r') as stream: + with open(config_file, "r") as stream: doc = yaml.safe_load(stream) - all_labels = doc['labels'] - learning_map_inv = doc['learning_map_inv'] - learning_map = doc['learning_map'] - self.learning_map = np.zeros((np.max([k for k in learning_map.keys()]) + 1), dtype=np.int32) + all_labels = doc["labels"] + learning_map_inv = doc["learning_map_inv"] + learning_map = doc["learning_map"] + self.learning_map = np.zeros( + (np.max([k for k in learning_map.keys()]) + 1), dtype=np.int32 + ) for k, v in learning_map.items(): self.learning_map[k] = v - self.learning_map_inv = np.zeros((np.max([k for k in learning_map_inv.keys()]) + 1), dtype=np.int32) + self.learning_map_inv = np.zeros( + (np.max([k for k in learning_map_inv.keys()]) + 1), dtype=np.int32 + ) for k, v in learning_map_inv.items(): self.learning_map_inv[k] = v @@ -157,14 +163,16 @@ class SemanticKittiDataset(PointCloudDataset): self.batch_limit.share_memory_() # Initialize frame potentials - self.potentials = torch.from_numpy(np.random.rand(self.all_inds.shape[0]) * 0.1 + 0.1) + self.potentials = torch.from_numpy( + np.random.rand(self.all_inds.shape[0]) * 0.1 + 0.1 + ) self.potentials.share_memory_() # If true, the same amount of frames is picked per class self.balance_classes = balance_classes # Choose batch_num in_R and max_in_p depending on validation or training - if self.set == 'training': + if self.set == "training": self.batch_num = config.batch_num self.max_in_p = config.max_in_points self.in_R = config.in_radius @@ -174,7 +182,7 @@ class SemanticKittiDataset(PointCloudDataset): self.in_R = config.val_radius # shared epoch indices and classes (in case we want class balanced sampler) - if set == 'training': + if set == "training": N = int(np.ceil(config.epoch_steps * self.batch_num * 1.1)) else: N = int(np.ceil(config.validation_size * self.batch_num * 1.1)) @@ -185,7 +193,9 @@ class SemanticKittiDataset(PointCloudDataset): self.epoch_inds.share_memory_() self.epoch_labels.share_memory_() - self.worker_waiting = torch.tensor([0 for _ in range(config.input_threads)], dtype=torch.int32) + self.worker_waiting = torch.tensor( + [0 for _ in range(config.input_threads)], dtype=torch.int32 + ) self.worker_waiting.share_memory_() self.worker_lock = Lock() @@ -219,11 +229,9 @@ class SemanticKittiDataset(PointCloudDataset): batch_n = 0 while True: - t += [time.time()] with self.worker_lock: - # Get potential minimum ind = int(self.epoch_inds[self.epoch_i]) wanted_label = int(self.epoch_labels[self.epoch_i]) @@ -260,7 +268,6 @@ class SemanticKittiDataset(PointCloudDataset): num_merged = 0 f_inc = 0 while num_merged < self.config.n_frames and f_ind - f_inc >= 0: - # Current frame pose pose = self.poses[s_ind][f_ind - f_inc] @@ -273,18 +280,22 @@ class SemanticKittiDataset(PointCloudDataset): continue # Path of points and labels - seq_path = join(self.path, 'sequences', self.sequences[s_ind]) - velo_file = join(seq_path, 'velodyne', self.frames[s_ind][f_ind - f_inc] + '.bin') - if self.set == 'test': + seq_path = join(self.path, "sequences", self.sequences[s_ind]) + velo_file = join( + seq_path, "velodyne", self.frames[s_ind][f_ind - f_inc] + ".bin" + ) + if self.set == "test": label_file = None else: - label_file = join(seq_path, 'labels', self.frames[s_ind][f_ind - f_inc] + '.label') + label_file = join( + seq_path, "labels", self.frames[s_ind][f_ind - f_inc] + ".label" + ) # Read points frame_points = np.fromfile(velo_file, dtype=np.float32) points = frame_points.reshape((-1, 4)) - if self.set == 'test': + if self.set == "test": # Fake labels sem_labels = np.zeros((frame_points.shape[0],), dtype=np.int32) else: @@ -295,25 +306,29 @@ class SemanticKittiDataset(PointCloudDataset): # Apply pose (without np.dot to avoid multi-threading) hpoints = np.hstack((points[:, :3], np.ones_like(points[:, :1]))) - #new_points = hpoints.dot(pose.T) + # new_points = hpoints.dot(pose.T) new_points = np.sum(np.expand_dims(hpoints, 2) * pose.T, axis=1) - #new_points[:, 3:] = points[:, 3:] + # new_points[:, 3:] = points[:, 3:] # In case of validation, keep the original points in memory - if self.set in ['validation', 'test'] and f_inc == 0: + if self.set in ["validation", "test"] and f_inc == 0: o_pts = new_points[:, :3].astype(np.float32) o_labels = sem_labels.astype(np.int32) # In case radius smaller than 50m, chose new center on a point of the wanted class or not if self.in_R < 50.0 and f_inc == 0: if self.balance_classes: - wanted_ind = np.random.choice(np.where(sem_labels == wanted_label)[0]) + wanted_ind = np.random.choice( + np.where(sem_labels == wanted_label)[0] + ) else: wanted_ind = np.random.choice(new_points.shape[0]) p0 = new_points[wanted_ind, :3] # Eliminate points further than config.in_radius - mask = np.sum(np.square(new_points[:, :3] - p0), axis=1) < self.in_R ** 2 + mask = ( + np.sum(np.square(new_points[:, :3] - p0), axis=1) < self.in_R**2 + ) mask_inds = np.where(mask)[0].astype(np.int32) # Shuffle points @@ -328,7 +343,9 @@ class SemanticKittiDataset(PointCloudDataset): # We have to project in the first frame coordinates new_coords = new_points - pose0[:3, 3] # new_coords = new_coords.dot(pose0[:3, :3]) - new_coords = np.sum(np.expand_dims(new_coords, 2) * pose0[:3, :3], axis=1) + new_coords = np.sum( + np.expand_dims(new_coords, 2) * pose0[:3, :3], axis=1 + ) new_coords = np.hstack((new_coords, points[rand_order, 3:])) # Increment merge count @@ -345,10 +362,12 @@ class SemanticKittiDataset(PointCloudDataset): ######################### # Subsample merged frames - in_pts, in_fts, in_lbls = grid_subsampling(merged_points, - features=merged_coords, - labels=merged_labels, - sampleDl=self.config.first_subsampling_dl) + in_pts, in_fts, in_lbls = grid_subsampling( + merged_points, + features=merged_coords, + labels=merged_labels, + sampleDl=self.config.first_subsampling_dl, + ) t += [time.time()] @@ -370,15 +389,16 @@ class SemanticKittiDataset(PointCloudDataset): t += [time.time()] # Before augmenting, compute reprojection inds (only for validation and test) - if self.set in ['validation', 'test']: - + if self.set in ["validation", "test"]: # get val_points that are in range radiuses = np.sum(np.square(o_pts - p0), axis=1) reproj_mask = radiuses < (0.99 * self.in_R) ** 2 # Project predictions on the frame points search_tree = KDTree(in_pts, leaf_size=50) - proj_inds = search_tree.query(o_pts[reproj_mask, :], return_distance=False) + proj_inds = search_tree.query( + o_pts[reproj_mask, :], return_distance=False + ) proj_inds = np.squeeze(proj_inds).astype(np.int32) else: proj_inds = np.zeros((0,)) @@ -446,7 +466,9 @@ class SemanticKittiDataset(PointCloudDataset): # Use all coordinates + reflectance stacked_features = np.hstack((stacked_features, features)) else: - raise ValueError('Only accepted input dimensions are 1, 4 and 7 (without and with XYZ)') + raise ValueError( + "Only accepted input dimensions are 1, 4 and 7 (without and with XYZ)" + ) t += [time.time()] @@ -458,86 +480,120 @@ class SemanticKittiDataset(PointCloudDataset): # # Get the whole input list - input_list = self.segmentation_inputs(stacked_points, - stacked_features, - labels.astype(np.int64), - stack_lengths) + input_list = self.segmentation_inputs( + stacked_points, stacked_features, labels.astype(np.int64), stack_lengths + ) t += [time.time()] # Add scale and rotation for testing - input_list += [scales, rots, frame_inds, frame_centers, r_inds_list, r_mask_list, val_labels_list] + input_list += [ + scales, + rots, + frame_inds, + frame_centers, + r_inds_list, + r_mask_list, + val_labels_list, + ] t += [time.time()] # Display timings debugT = False if debugT: - print('\n************************\n') - print('Timings:') + print("\n************************\n") + print("Timings:") ti = 0 N = 9 - mess = 'Init ...... {:5.1f}ms /' - loop_times = [1000 * (t[ti + N * i + 1] - t[ti + N * i]) for i in range(len(stack_lengths))] + mess = "Init ...... {:5.1f}ms /" + loop_times = [ + 1000 * (t[ti + N * i + 1] - t[ti + N * i]) + for i in range(len(stack_lengths)) + ] for dt in loop_times: - mess += ' {:5.1f}'.format(dt) + mess += " {:5.1f}".format(dt) print(mess.format(np.sum(loop_times))) ti += 1 - mess = 'Lock ...... {:5.1f}ms /' - loop_times = [1000 * (t[ti + N * i + 1] - t[ti + N * i]) for i in range(len(stack_lengths))] + mess = "Lock ...... {:5.1f}ms /" + loop_times = [ + 1000 * (t[ti + N * i + 1] - t[ti + N * i]) + for i in range(len(stack_lengths)) + ] for dt in loop_times: - mess += ' {:5.1f}'.format(dt) + mess += " {:5.1f}".format(dt) print(mess.format(np.sum(loop_times))) ti += 1 - mess = 'Init ...... {:5.1f}ms /' - loop_times = [1000 * (t[ti + N * i + 1] - t[ti + N * i]) for i in range(len(stack_lengths))] + mess = "Init ...... {:5.1f}ms /" + loop_times = [ + 1000 * (t[ti + N * i + 1] - t[ti + N * i]) + for i in range(len(stack_lengths)) + ] for dt in loop_times: - mess += ' {:5.1f}'.format(dt) + mess += " {:5.1f}".format(dt) print(mess.format(np.sum(loop_times))) ti += 1 - mess = 'Load ...... {:5.1f}ms /' - loop_times = [1000 * (t[ti + N * i + 1] - t[ti + N * i]) for i in range(len(stack_lengths))] + mess = "Load ...... {:5.1f}ms /" + loop_times = [ + 1000 * (t[ti + N * i + 1] - t[ti + N * i]) + for i in range(len(stack_lengths)) + ] for dt in loop_times: - mess += ' {:5.1f}'.format(dt) + mess += " {:5.1f}".format(dt) print(mess.format(np.sum(loop_times))) ti += 1 - mess = 'Subs ...... {:5.1f}ms /' - loop_times = [1000 * (t[ti + N * i + 1] - t[ti + N * i]) for i in range(len(stack_lengths))] + mess = "Subs ...... {:5.1f}ms /" + loop_times = [ + 1000 * (t[ti + N * i + 1] - t[ti + N * i]) + for i in range(len(stack_lengths)) + ] for dt in loop_times: - mess += ' {:5.1f}'.format(dt) + mess += " {:5.1f}".format(dt) print(mess.format(np.sum(loop_times))) ti += 1 - mess = 'Drop ...... {:5.1f}ms /' - loop_times = [1000 * (t[ti + N * i + 1] - t[ti + N * i]) for i in range(len(stack_lengths))] + mess = "Drop ...... {:5.1f}ms /" + loop_times = [ + 1000 * (t[ti + N * i + 1] - t[ti + N * i]) + for i in range(len(stack_lengths)) + ] for dt in loop_times: - mess += ' {:5.1f}'.format(dt) + mess += " {:5.1f}".format(dt) print(mess.format(np.sum(loop_times))) ti += 1 - mess = 'Reproj .... {:5.1f}ms /' - loop_times = [1000 * (t[ti + N * i + 1] - t[ti + N * i]) for i in range(len(stack_lengths))] + mess = "Reproj .... {:5.1f}ms /" + loop_times = [ + 1000 * (t[ti + N * i + 1] - t[ti + N * i]) + for i in range(len(stack_lengths)) + ] for dt in loop_times: - mess += ' {:5.1f}'.format(dt) + mess += " {:5.1f}".format(dt) print(mess.format(np.sum(loop_times))) ti += 1 - mess = 'Augment ... {:5.1f}ms /' - loop_times = [1000 * (t[ti + N * i + 1] - t[ti + N * i]) for i in range(len(stack_lengths))] + mess = "Augment ... {:5.1f}ms /" + loop_times = [ + 1000 * (t[ti + N * i + 1] - t[ti + N * i]) + for i in range(len(stack_lengths)) + ] for dt in loop_times: - mess += ' {:5.1f}'.format(dt) + mess += " {:5.1f}".format(dt) print(mess.format(np.sum(loop_times))) ti += 1 - mess = 'Stack ..... {:5.1f}ms /' - loop_times = [1000 * (t[ti + N * i + 1] - t[ti + N * i]) for i in range(len(stack_lengths))] + mess = "Stack ..... {:5.1f}ms /" + loop_times = [ + 1000 * (t[ti + N * i + 1] - t[ti + N * i]) + for i in range(len(stack_lengths)) + ] for dt in loop_times: - mess += ' {:5.1f}'.format(dt) + mess += " {:5.1f}".format(dt) print(mess.format(np.sum(loop_times))) ti += N * (len(stack_lengths) - 1) + 1 - print('concat .... {:5.1f}ms'.format(1000 * (t[ti+1] - t[ti]))) + print("concat .... {:5.1f}ms".format(1000 * (t[ti + 1] - t[ti]))) ti += 1 - print('input ..... {:5.1f}ms'.format(1000 * (t[ti+1] - t[ti]))) + print("input ..... {:5.1f}ms".format(1000 * (t[ti + 1] - t[ti]))) ti += 1 - print('stack ..... {:5.1f}ms'.format(1000 * (t[ti+1] - t[ti]))) + print("stack ..... {:5.1f}ms".format(1000 * (t[ti + 1] - t[ti]))) ti += 1 - print('\n************************\n') + print("\n************************\n") return [self.config.num_layers] + input_list @@ -555,24 +611,31 @@ class SemanticKittiDataset(PointCloudDataset): self.poses = [] for seq in self.sequences: - - seq_folder = join(self.path, 'sequences', seq) + seq_folder = join(self.path, "sequences", seq) # Read Calib - self.calibrations.append(self.parse_calibration(join(seq_folder, "calib.txt"))) + self.calibrations.append( + self.parse_calibration(join(seq_folder, "calib.txt")) + ) # Read times - self.times.append(np.loadtxt(join(seq_folder, 'times.txt'), dtype=np.float32)) + self.times.append( + np.loadtxt(join(seq_folder, "times.txt"), dtype=np.float32) + ) # Read poses - poses_f64 = self.parse_poses(join(seq_folder, 'poses.txt'), self.calibrations[-1]) + poses_f64 = self.parse_poses( + join(seq_folder, "poses.txt"), self.calibrations[-1] + ) self.poses.append([pose.astype(np.float32) for pose in poses_f64]) ################################### # Prepare the indices of all frames ################################### - seq_inds = np.hstack([np.ones(len(_), dtype=np.int32) * i for i, _ in enumerate(self.frames)]) + seq_inds = np.hstack( + [np.ones(len(_), dtype=np.int32) * i for i, _ in enumerate(self.frames)] + ) frame_inds = np.hstack([np.arange(len(_), dtype=np.int32) for _ in self.frames]) self.all_inds = np.vstack((seq_inds, frame_inds)).T @@ -580,61 +643,69 @@ class SemanticKittiDataset(PointCloudDataset): # For each class list the frames containing them ################################################ - if self.set in ['training', 'validation']: - + if self.set in ["training", "validation"]: class_frames_bool = np.zeros((0, self.num_classes), dtype=np.bool) self.class_proportions = np.zeros((self.num_classes,), dtype=np.int32) for s_ind, (seq, seq_frames) in enumerate(zip(self.sequences, self.frames)): - - frame_mode = 'single' + frame_mode = "single" if self.config.n_frames > 1: - frame_mode = 'multi' - seq_stat_file = join(self.path, 'sequences', seq, 'stats_{:s}.pkl'.format(frame_mode)) + frame_mode = "multi" + seq_stat_file = join( + self.path, "sequences", seq, "stats_{:s}.pkl".format(frame_mode) + ) # Check if inputs have already been computed if exists(seq_stat_file): # Read pkl - with open(seq_stat_file, 'rb') as f: + with open(seq_stat_file, "rb") as f: seq_class_frames, seq_proportions = pickle.load(f) else: - # Initiate dict - print('Preparing seq {:s} class frames. (Long but one time only)'.format(seq)) + print( + "Preparing seq {:s} class frames. (Long but one time only)".format( + seq + ) + ) # Class frames as a boolean mask - seq_class_frames = np.zeros((len(seq_frames), self.num_classes), dtype=np.bool) + seq_class_frames = np.zeros( + (len(seq_frames), self.num_classes), dtype=np.bool + ) # Proportion of each class seq_proportions = np.zeros((self.num_classes,), dtype=np.int32) # Sequence path - seq_path = join(self.path, 'sequences', seq) + seq_path = join(self.path, "sequences", seq) # Read all frames for f_ind, frame_name in enumerate(seq_frames): - # Path of points and labels - label_file = join(seq_path, 'labels', frame_name + '.label') + label_file = join(seq_path, "labels", frame_name + ".label") # Read labels frame_labels = np.fromfile(label_file, dtype=np.int32) - sem_labels = frame_labels & 0xFFFF # semantic label in lower half + sem_labels = ( + frame_labels & 0xFFFF + ) # semantic label in lower half sem_labels = self.learning_map[sem_labels] # Get present labels and there frequency unique, counts = np.unique(sem_labels, return_counts=True) # Add this frame to the frame lists of all class present - frame_labels = np.array([self.label_to_idx[l] for l in unique], dtype=np.int32) + frame_labels = np.array( + [self.label_to_idx[l] for l in unique], dtype=np.int32 + ) seq_class_frames[f_ind, frame_labels] = True # Add proportions seq_proportions[frame_labels] += counts # Save pickle - with open(seq_stat_file, 'wb') as f: + with open(seq_stat_file, "wb") as f: pickle.dump([seq_class_frames, seq_proportions], f) class_frames_bool = np.vstack((class_frames_bool, seq_class_frames)) @@ -647,26 +718,30 @@ class SemanticKittiDataset(PointCloudDataset): self.class_frames.append(torch.zeros((0,), dtype=torch.int64)) else: integer_inds = np.where(class_frames_bool[:, i])[0] - self.class_frames.append(torch.from_numpy(integer_inds.astype(np.int64))) + self.class_frames.append( + torch.from_numpy(integer_inds.astype(np.int64)) + ) # Add variables for validation - if self.set == 'validation': + if self.set == "validation": self.val_points = [] self.val_labels = [] self.val_confs = [] for s_ind, seq_frames in enumerate(self.frames): - self.val_confs.append(np.zeros((len(seq_frames), self.num_classes, self.num_classes))) + self.val_confs.append( + np.zeros((len(seq_frames), self.num_classes, self.num_classes)) + ) return def parse_calibration(self, filename): - """ read calibration file with given filename + """read calibration file with given filename - Returns - ------- - dict - Calibration matrices as 4x4 numpy arrays. + Returns + ------- + dict + Calibration matrices as 4x4 numpy arrays. """ calib = {} @@ -688,12 +763,12 @@ class SemanticKittiDataset(PointCloudDataset): return calib def parse_poses(self, filename, calibration): - """ read poses file with per-scan poses from given filename + """read poses file with per-scan poses from given filename - Returns - ------- - list - list of poses as 4x4 numpy arrays. + Returns + ------- + list + list of poses as 4x4 numpy arrays. """ file = open(filename) @@ -732,7 +807,7 @@ class SemanticKittiSampler(Sampler): self.dataset = dataset # Number of step per epoch - if dataset.set == 'training': + if dataset.set == "training": self.N = dataset.config.epoch_steps else: self.N = dataset.config.validation_size @@ -746,7 +821,6 @@ class SemanticKittiSampler(Sampler): """ if self.dataset.balance_classes: - # Initiate current epoch ind self.dataset.epoch_i *= 0 self.dataset.epoch_inds *= 0 @@ -760,23 +834,30 @@ class SemanticKittiSampler(Sampler): gen_classes = [] for i, c in enumerate(self.dataset.label_values): if c not in self.dataset.ignored_labels: - # Get the potentials of the frames containing this class - class_potentials = self.dataset.potentials[self.dataset.class_frames[i]] - + class_potentials = self.dataset.potentials[ + self.dataset.class_frames[i] + ] if class_potentials.shape[0] > 0: - # Get the indices to generate thanks to potentials - used_classes = self.dataset.num_classes - len(self.dataset.ignored_labels) + used_classes = self.dataset.num_classes - len( + self.dataset.ignored_labels + ) class_n = num_centers // used_classes + 1 if class_n < class_potentials.shape[0]: - _, class_indices = torch.topk(class_potentials, class_n, largest=False) + _, class_indices = torch.topk( + class_potentials, class_n, largest=False + ) else: class_indices = torch.zeros((0,), dtype=torch.int64) while class_indices.shape[0] < class_n: - new_class_inds = torch.randperm(class_potentials.shape[0]).type(torch.int64) - class_indices = torch.cat((class_indices, new_class_inds), dim=0) + new_class_inds = torch.randperm( + class_potentials.shape[0] + ).type(torch.int64) + class_indices = torch.cat( + (class_indices, new_class_inds), dim=0 + ) class_indices = class_indices[:class_n] class_indices = self.dataset.class_frames[i][class_indices] @@ -786,16 +867,25 @@ class SemanticKittiSampler(Sampler): # Update potentials update_inds = torch.unique(class_indices) - self.dataset.potentials[update_inds] = torch.ceil(self.dataset.potentials[update_inds]) - self.dataset.potentials[update_inds] += torch.from_numpy(np.random.rand(update_inds.shape[0]) * 0.1 + 0.1) + self.dataset.potentials[update_inds] = torch.ceil( + self.dataset.potentials[update_inds] + ) + self.dataset.potentials[update_inds] += torch.from_numpy( + np.random.rand(update_inds.shape[0]) * 0.1 + 0.1 + ) else: - error_message = '\nIt seems there is a problem with the class statistics of your dataset, saved in the variable dataset.class_frames.\n' - error_message += 'Here are the current statistics:\n' - error_message += '{:>15s} {:>15s}\n'.format('Class', '# of frames') + error_message = "\nIt seems there is a problem with the class statistics of your dataset, saved in the variable dataset.class_frames.\n" + error_message += "Here are the current statistics:\n" + error_message += "{:>15s} {:>15s}\n".format( + "Class", "# of frames" + ) for iii, ccc in enumerate(self.dataset.label_values): - error_message += '{:>15s} {:>15d}\n'.format(self.dataset.label_names[iii], len(self.dataset.class_frames[iii])) - error_message += '\nThis error is raised if one of the classes is not ignored and does not appear in any of the frames of the dataset.\n' + error_message += "{:>15s} {:>15d}\n".format( + self.dataset.label_names[iii], + len(self.dataset.class_frames[iii]), + ) + error_message += "\nThis error is raised if one of the classes is not ignored and does not appear in any of the frames of the dataset.\n" raise ValueError(error_message) # Stack the chosen indices of all classes @@ -808,15 +898,14 @@ class SemanticKittiSampler(Sampler): gen_classes = gen_classes[rand_order] # Update potentials (Change the order for the next epoch) - #self.dataset.potentials[gen_indices] = torch.ceil(self.dataset.potentials[gen_indices]) - #self.dataset.potentials[gen_indices] += torch.from_numpy(np.random.rand(gen_indices.shape[0]) * 0.1 + 0.1) + # self.dataset.potentials[gen_indices] = torch.ceil(self.dataset.potentials[gen_indices]) + # self.dataset.potentials[gen_indices] += torch.from_numpy(np.random.rand(gen_indices.shape[0]) * 0.1 + 0.1) # Update epoch inds self.dataset.epoch_inds += gen_indices self.dataset.epoch_labels += gen_classes.type(torch.int32) else: - # Initiate current epoch ind self.dataset.epoch_i *= 0 self.dataset.epoch_inds *= 0 @@ -827,17 +916,25 @@ class SemanticKittiSampler(Sampler): # Get the list of indices to generate thanks to potentials if num_centers < self.dataset.potentials.shape[0]: - _, gen_indices = torch.topk(self.dataset.potentials, num_centers, largest=False, sorted=True) + _, gen_indices = torch.topk( + self.dataset.potentials, num_centers, largest=False, sorted=True + ) else: gen_indices = torch.randperm(self.dataset.potentials.shape[0]) while gen_indices.shape[0] < num_centers: - new_gen_indices = torch.randperm(self.dataset.potentials.shape[0]).type(torch.int32) + new_gen_indices = torch.randperm( + self.dataset.potentials.shape[0] + ).type(torch.int32) gen_indices = torch.cat((gen_indices, new_gen_indices), dim=0) gen_indices = gen_indices[:num_centers] # Update potentials (Change the order for the next epoch) - self.dataset.potentials[gen_indices] = torch.ceil(self.dataset.potentials[gen_indices]) - self.dataset.potentials[gen_indices] += torch.from_numpy(np.random.rand(gen_indices.shape[0]) * 0.1 + 0.1) + self.dataset.potentials[gen_indices] = torch.ceil( + self.dataset.potentials[gen_indices] + ) + self.dataset.potentials[gen_indices] += torch.from_numpy( + np.random.rand(gen_indices.shape[0]) * 0.1 + 0.1 + ) # Update epoch inds self.dataset.epoch_inds += gen_indices @@ -852,7 +949,9 @@ class SemanticKittiSampler(Sampler): """ return self.N - def calib_max_in(self, config, dataloader, untouched_ratio=0.8, verbose=True, force_redo=False): + def calib_max_in( + self, config, dataloader, untouched_ratio=0.8, verbose=True, force_redo=False + ): """ Method performing batch and neighbors calibration. Batch calibration: Set "batch_limit" (the maximum number of points allowed in every batch) so that the @@ -865,7 +964,9 @@ class SemanticKittiSampler(Sampler): # Previously saved calibration ############################## - print('\nStarting Calibration of max_in_points value (use verbose=True for more details)') + print( + "\nStarting Calibration of max_in_points value (use verbose=True for more details)" + ) t0 = time.time() redo = force_redo @@ -874,39 +975,38 @@ class SemanticKittiSampler(Sampler): # *********** # Load max_in_limit dictionary - max_in_lim_file = join(self.dataset.path, 'max_in_limits.pkl') + max_in_lim_file = join(self.dataset.path, "max_in_limits.pkl") if exists(max_in_lim_file): - with open(max_in_lim_file, 'rb') as file: + with open(max_in_lim_file, "rb") as file: max_in_lim_dict = pickle.load(file) else: max_in_lim_dict = {} # Check if the max_in limit associated with current parameters exists if self.dataset.balance_classes: - sampler_method = 'balanced' + sampler_method = "balanced" else: - sampler_method = 'random' - key = '{:s}_{:.3f}_{:.3f}'.format(sampler_method, - self.dataset.in_R, - self.dataset.config.first_subsampling_dl) + sampler_method = "random" + key = "{:s}_{:.3f}_{:.3f}".format( + sampler_method, self.dataset.in_R, self.dataset.config.first_subsampling_dl + ) if not redo and key in max_in_lim_dict: self.dataset.max_in_p = max_in_lim_dict[key] else: redo = True if verbose: - print('\nPrevious calibration found:') - print('Check max_in limit dictionary') + print("\nPrevious calibration found:") + print("Check max_in limit dictionary") if key in max_in_lim_dict: color = bcolors.OKGREEN v = str(int(max_in_lim_dict[key])) else: color = bcolors.FAIL - v = '?' - print('{:}\"{:s}\": {:s}{:}'.format(color, key, v, bcolors.ENDC)) + v = "?" + print('{:}"{:s}": {:s}{:}'.format(color, key, v, bcolors.ENDC)) if redo: - ######################## # Batch calib parameters ######################## @@ -925,7 +1025,6 @@ class SemanticKittiSampler(Sampler): for epoch in range(10): for batch_i, batch in enumerate(dataloader): - # Control max_in_points value all_lengths += batch.lengths[0].tolist() @@ -940,36 +1039,38 @@ class SemanticKittiSampler(Sampler): # Console display (only one per second) if t - last_display > 1.0: last_display = t - message = 'Collecting {:d} in_points: {:5.1f}%' - print(message.format(N, - 100 * len(all_lengths) / N)) + message = "Collecting {:d} in_points: {:5.1f}%" + print(message.format(N, 100 * len(all_lengths) / N)) if breaking: break - self.dataset.max_in_p = int(np.percentile(all_lengths, 100*untouched_ratio)) + self.dataset.max_in_p = int( + np.percentile(all_lengths, 100 * untouched_ratio) + ) if verbose: - # Create histogram a = 1 # Save max_in_limit dictionary - print('New max_in_p = ', self.dataset.max_in_p) + print("New max_in_p = ", self.dataset.max_in_p) max_in_lim_dict[key] = self.dataset.max_in_p - with open(max_in_lim_file, 'wb') as file: + with open(max_in_lim_file, "wb") as file: pickle.dump(max_in_lim_dict, file) # Update value in config - if self.dataset.set == 'training': + if self.dataset.set == "training": config.max_in_points = self.dataset.max_in_p else: config.max_val_points = self.dataset.max_in_p - print('Calibration done in {:.1f}s\n'.format(time.time() - t0)) + print("Calibration done in {:.1f}s\n".format(time.time() - t0)) return - def calibration(self, dataloader, untouched_ratio=0.9, verbose=False, force_redo=False): + def calibration( + self, dataloader, untouched_ratio=0.9, verbose=False, force_redo=False + ): """ Method performing batch and neighbors calibration. Batch calibration: Set "batch_limit" (the maximum number of points allowed in every batch) so that the @@ -982,7 +1083,7 @@ class SemanticKittiSampler(Sampler): # Previously saved calibration ############################## - print('\nStarting Calibration (use verbose=True for more details)') + print("\nStarting Calibration (use verbose=True for more details)") t0 = time.time() redo = force_redo @@ -991,46 +1092,48 @@ class SemanticKittiSampler(Sampler): # *********** # Load batch_limit dictionary - batch_lim_file = join(self.dataset.path, 'batch_limits.pkl') + batch_lim_file = join(self.dataset.path, "batch_limits.pkl") if exists(batch_lim_file): - with open(batch_lim_file, 'rb') as file: + with open(batch_lim_file, "rb") as file: batch_lim_dict = pickle.load(file) else: batch_lim_dict = {} # Check if the batch limit associated with current parameters exists if self.dataset.balance_classes: - sampler_method = 'balanced' + sampler_method = "balanced" else: - sampler_method = 'random' - key = '{:s}_{:.3f}_{:.3f}_{:d}_{:d}'.format(sampler_method, - self.dataset.in_R, - self.dataset.config.first_subsampling_dl, - self.dataset.batch_num, - self.dataset.max_in_p) + sampler_method = "random" + key = "{:s}_{:.3f}_{:.3f}_{:d}_{:d}".format( + sampler_method, + self.dataset.in_R, + self.dataset.config.first_subsampling_dl, + self.dataset.batch_num, + self.dataset.max_in_p, + ) if not redo and key in batch_lim_dict: self.dataset.batch_limit[0] = batch_lim_dict[key] else: redo = True if verbose: - print('\nPrevious calibration found:') - print('Check batch limit dictionary') + print("\nPrevious calibration found:") + print("Check batch limit dictionary") if key in batch_lim_dict: color = bcolors.OKGREEN v = str(int(batch_lim_dict[key])) else: color = bcolors.FAIL - v = '?' - print('{:}\"{:s}\": {:s}{:}'.format(color, key, v, bcolors.ENDC)) + v = "?" + print('{:}"{:s}": {:s}{:}'.format(color, key, v, bcolors.ENDC)) # Neighbors limit # *************** # Load neighb_limits dictionary - neighb_lim_file = join(self.dataset.path, 'neighbors_limits.pkl') + neighb_lim_file = join(self.dataset.path, "neighbors_limits.pkl") if exists(neighb_lim_file): - with open(neighb_lim_file, 'rb') as file: + with open(neighb_lim_file, "rb") as file: neighb_lim_dict = pickle.load(file) else: neighb_lim_dict = {} @@ -1038,14 +1141,15 @@ class SemanticKittiSampler(Sampler): # Check if the limit associated with current parameters exists (for each layer) neighb_limits = [] for layer_ind in range(self.dataset.config.num_layers): - dl = self.dataset.config.first_subsampling_dl * (2**layer_ind) if self.dataset.config.deform_layers[layer_ind]: r = dl * self.dataset.config.deform_radius else: r = dl * self.dataset.config.conv_radius - key = '{:s}_{:d}_{:.3f}_{:.3f}'.format(sampler_method, self.dataset.max_in_p, dl, r) + key = "{:s}_{:d}_{:.3f}_{:.3f}".format( + sampler_method, self.dataset.max_in_p, dl, r + ) if key in neighb_lim_dict: neighb_limits += [neighb_lim_dict[key]] @@ -1055,34 +1159,39 @@ class SemanticKittiSampler(Sampler): redo = True if verbose: - print('Check neighbors limit dictionary') + print("Check neighbors limit dictionary") for layer_ind in range(self.dataset.config.num_layers): dl = self.dataset.config.first_subsampling_dl * (2**layer_ind) if self.dataset.config.deform_layers[layer_ind]: r = dl * self.dataset.config.deform_radius else: r = dl * self.dataset.config.conv_radius - key = '{:s}_{:d}_{:.3f}_{:.3f}'.format(sampler_method, self.dataset.max_in_p, dl, r) + key = "{:s}_{:d}_{:.3f}_{:.3f}".format( + sampler_method, self.dataset.max_in_p, dl, r + ) if key in neighb_lim_dict: color = bcolors.OKGREEN v = str(neighb_lim_dict[key]) else: color = bcolors.FAIL - v = '?' - print('{:}\"{:s}\": {:s}{:}'.format(color, key, v, bcolors.ENDC)) + v = "?" + print('{:}"{:s}": {:s}{:}'.format(color, key, v, bcolors.ENDC)) if redo: - ############################ # Neighbors calib parameters ############################ # From config parameter, compute higher bound of neighbors number in a neighborhood - hist_n = int(np.ceil(4 / 3 * np.pi * (self.dataset.config.deform_radius + 1) ** 3)) + hist_n = int( + np.ceil(4 / 3 * np.pi * (self.dataset.config.deform_radius + 1) ** 3) + ) # Histogram of neighborhood sizes - neighb_hists = np.zeros((self.dataset.config.num_layers, hist_n), dtype=np.int32) + neighb_hists = np.zeros( + (self.dataset.config.num_layers, hist_n), dtype=np.int32 + ) ######################## # Batch calib parameters @@ -1114,18 +1223,20 @@ class SemanticKittiSampler(Sampler): # Perform calibration ##################### - #self.dataset.batch_limit[0] = self.dataset.max_in_p * (self.dataset.batch_num - 1) + # self.dataset.batch_limit[0] = self.dataset.max_in_p * (self.dataset.batch_num - 1) for epoch in range(10): for batch_i, batch in enumerate(dataloader): - # Control max_in_points value are_cropped = batch.lengths[0] > self.dataset.max_in_p - 1 cropped_n += torch.sum(are_cropped.type(torch.int32)).item() all_n += int(batch.lengths[0].shape[0]) # Update neighborhood histogram - counts = [np.sum(neighb_mat.numpy() < neighb_mat.shape[0], axis=1) for neighb_mat in batch.neighbors] + counts = [ + np.sum(neighb_mat.numpy() < neighb_mat.shape[0], axis=1) + for neighb_mat in batch.neighbors + ] hists = [np.bincount(c, minlength=hist_n)[:hist_n] for c in counts] neighb_hists += np.vstack(hists) @@ -1162,85 +1273,97 @@ class SemanticKittiSampler(Sampler): # Console display (only one per second) if verbose and (t - last_display) > 1.0: last_display = t - message = 'Step {:5d} estim_b ={:5.2f} batch_limit ={:7d}' - print(message.format(i, - estim_b, - int(self.dataset.batch_limit[0]))) + message = "Step {:5d} estim_b ={:5.2f} batch_limit ={:7d}" + print( + message.format(i, estim_b, int(self.dataset.batch_limit[0])) + ) if breaking: break # Use collected neighbor histogram to get neighbors limit cumsum = np.cumsum(neighb_hists.T, axis=0) - percentiles = np.sum(cumsum < (untouched_ratio * cumsum[hist_n - 1, :]), axis=0) + percentiles = np.sum( + cumsum < (untouched_ratio * cumsum[hist_n - 1, :]), axis=0 + ) self.dataset.neighborhood_limits = percentiles if verbose: - # Crop histogram while np.sum(neighb_hists[:, -1]) == 0: neighb_hists = neighb_hists[:, :-1] hist_n = neighb_hists.shape[1] - print('\n**************************************************\n') - line0 = 'neighbors_num ' + print("\n**************************************************\n") + line0 = "neighbors_num " for layer in range(neighb_hists.shape[0]): - line0 += '| layer {:2d} '.format(layer) + line0 += "| layer {:2d} ".format(layer) print(line0) for neighb_size in range(hist_n): - line0 = ' {:4d} '.format(neighb_size) + line0 = " {:4d} ".format(neighb_size) for layer in range(neighb_hists.shape[0]): if neighb_size > percentiles[layer]: color = bcolors.FAIL else: color = bcolors.OKGREEN - line0 += '|{:}{:10d}{:} '.format(color, - neighb_hists[layer, neighb_size], - bcolors.ENDC) + line0 += "|{:}{:10d}{:} ".format( + color, neighb_hists[layer, neighb_size], bcolors.ENDC + ) print(line0) - print('\n**************************************************\n') - print('\nchosen neighbors limits: ', percentiles) + print("\n**************************************************\n") + print("\nchosen neighbors limits: ", percentiles) print() # Control max_in_points value - print('\n**************************************************\n') + print("\n**************************************************\n") if cropped_n > 0.3 * all_n: color = bcolors.FAIL else: color = bcolors.OKGREEN - print('Current value of max_in_points {:d}'.format(self.dataset.max_in_p)) - print(' > {:}{:.1f}% inputs are cropped{:}'.format(color, 100 * cropped_n / all_n, bcolors.ENDC)) + print("Current value of max_in_points {:d}".format(self.dataset.max_in_p)) + print( + " > {:}{:.1f}% inputs are cropped{:}".format( + color, 100 * cropped_n / all_n, bcolors.ENDC + ) + ) if cropped_n > 0.3 * all_n: - print('\nTry a higher max_in_points value\n'.format(100 * cropped_n / all_n)) - #raise ValueError('Value of max_in_points too low') - print('\n**************************************************\n') + print( + "\nTry a higher max_in_points value\n".format( + 100 * cropped_n / all_n + ) + ) + # raise ValueError('Value of max_in_points too low') + print("\n**************************************************\n") # Save batch_limit dictionary - key = '{:s}_{:.3f}_{:.3f}_{:d}_{:d}'.format(sampler_method, - self.dataset.in_R, - self.dataset.config.first_subsampling_dl, - self.dataset.batch_num, - self.dataset.max_in_p) + key = "{:s}_{:.3f}_{:.3f}_{:d}_{:d}".format( + sampler_method, + self.dataset.in_R, + self.dataset.config.first_subsampling_dl, + self.dataset.batch_num, + self.dataset.max_in_p, + ) batch_lim_dict[key] = float(self.dataset.batch_limit[0]) - with open(batch_lim_file, 'wb') as file: + with open(batch_lim_file, "wb") as file: pickle.dump(batch_lim_dict, file) # Save neighb_limit dictionary for layer_ind in range(self.dataset.config.num_layers): - dl = self.dataset.config.first_subsampling_dl * (2 ** layer_ind) + dl = self.dataset.config.first_subsampling_dl * (2**layer_ind) if self.dataset.config.deform_layers[layer_ind]: r = dl * self.dataset.config.deform_radius else: r = dl * self.dataset.config.conv_radius - key = '{:s}_{:d}_{:.3f}_{:.3f}'.format(sampler_method, self.dataset.max_in_p, dl, r) + key = "{:s}_{:d}_{:.3f}_{:.3f}".format( + sampler_method, self.dataset.max_in_p, dl, r + ) neighb_lim_dict[key] = self.dataset.neighborhood_limits[layer_ind] - with open(neighb_lim_file, 'wb') as file: + with open(neighb_lim_file, "wb") as file: pickle.dump(neighb_lim_dict, file) - - print('Calibration done in {:.1f}s\n'.format(time.time() - t0)) + print("Calibration done in {:.1f}s\n".format(time.time() - t0)) return @@ -1248,7 +1371,6 @@ class SemanticKittiCustomBatch: """Custom batch definition with memory pinning for SemanticKitti""" def __init__(self, input_list): - # Get rid of batch dimension input_list = input_list[0] @@ -1257,15 +1379,25 @@ class SemanticKittiCustomBatch: # Extract input tensors from the list of numpy array ind = 1 - self.points = [torch.from_numpy(nparray) for nparray in input_list[ind:ind+L]] + self.points = [ + torch.from_numpy(nparray) for nparray in input_list[ind : ind + L] + ] ind += L - self.neighbors = [torch.from_numpy(nparray) for nparray in input_list[ind:ind+L]] + self.neighbors = [ + torch.from_numpy(nparray) for nparray in input_list[ind : ind + L] + ] ind += L - self.pools = [torch.from_numpy(nparray) for nparray in input_list[ind:ind+L]] + self.pools = [ + torch.from_numpy(nparray) for nparray in input_list[ind : ind + L] + ] ind += L - self.upsamples = [torch.from_numpy(nparray) for nparray in input_list[ind:ind+L]] + self.upsamples = [ + torch.from_numpy(nparray) for nparray in input_list[ind : ind + L] + ] ind += L - self.lengths = [torch.from_numpy(nparray) for nparray in input_list[ind:ind+L]] + self.lengths = [ + torch.from_numpy(nparray) for nparray in input_list[ind : ind + L] + ] ind += L self.features = torch.from_numpy(input_list[ind]) ind += 1 @@ -1307,7 +1439,6 @@ class SemanticKittiCustomBatch: return self def to(self, device): - self.points = [in_tensor.to(device) for in_tensor in self.points] self.neighbors = [in_tensor.to(device) for in_tensor in self.neighbors] self.pools = [in_tensor.to(device) for in_tensor in self.pools] @@ -1324,15 +1455,15 @@ class SemanticKittiCustomBatch: def unstack_points(self, layer=None): """Unstack the points""" - return self.unstack_elements('points', layer) + return self.unstack_elements("points", layer) def unstack_neighbors(self, layer=None): """Unstack the neighbors indices""" - return self.unstack_elements('neighbors', layer) + return self.unstack_elements("neighbors", layer) def unstack_pools(self, layer=None): """Unstack the pooling indices""" - return self.unstack_elements('pools', layer) + return self.unstack_elements("pools", layer) def unstack_elements(self, element_name, layer=None, to_numpy=True): """ @@ -1340,34 +1471,31 @@ class SemanticKittiCustomBatch: layers """ - if element_name == 'points': + if element_name == "points": elements = self.points - elif element_name == 'neighbors': + elif element_name == "neighbors": elements = self.neighbors - elif element_name == 'pools': + elif element_name == "pools": elements = self.pools[:-1] else: - raise ValueError('Unknown element name: {:s}'.format(element_name)) + raise ValueError("Unknown element name: {:s}".format(element_name)) all_p_list = [] for layer_i, layer_elems in enumerate(elements): - if layer is None or layer == layer_i: - i0 = 0 p_list = [] - if element_name == 'pools': - lengths = self.lengths[layer_i+1] + if element_name == "pools": + lengths = self.lengths[layer_i + 1] else: lengths = self.lengths[layer_i] for b_i, length in enumerate(lengths): - - elem = layer_elems[i0:i0 + length] - if element_name == 'neighbors': + elem = layer_elems[i0 : i0 + length] + if element_name == "neighbors": elem[elem >= self.points[layer_i].shape[0]] = -1 elem[elem >= 0] -= i0 - elif element_name == 'pools': + elif element_name == "pools": elem[elem >= self.points[layer_i].shape[0]] = -1 elem[elem >= 0] -= torch.sum(self.lengths[layer_i][:b_i]) i0 += length @@ -1405,7 +1533,6 @@ def debug_timing(dataset, loader): estim_N = 0 for epoch in range(10): - for batch_i, batch in enumerate(loader): # print(batch_i, tuple(points.shape), tuple(normals.shape), labels, indices, in_sizes) @@ -1427,14 +1554,14 @@ def debug_timing(dataset, loader): # Console display (only one per second) if (t[-1] - last_display) > -1.0: last_display = t[-1] - message = 'Step {:08d} -> (ms/batch) {:8.2f} {:8.2f} / batch = {:.2f} - {:.0f}' - print(message.format(batch_i, - 1000 * mean_dt[0], - 1000 * mean_dt[1], - estim_b, - estim_N)) + message = "Step {:08d} -> (ms/batch) {:8.2f} {:8.2f} / batch = {:.2f} - {:.0f}" + print( + message.format( + batch_i, 1000 * mean_dt[0], 1000 * mean_dt[1], estim_b, estim_N + ) + ) - print('************* Epoch ended *************') + print("************* Epoch ended *************") _, counts = np.unique(dataset.input_labels, return_counts=True) print(counts) @@ -1447,11 +1574,11 @@ def debug_class_w(dataset, loader): counts = np.zeros((dataset.num_classes,), dtype=np.int64) - s = '{:^6}|'.format('step') + s = "{:^6}|".format("step") for c in dataset.label_names: - s += '{:^6}'.format(c[:4]) + s += "{:^6}".format(c[:4]) print(s) - print(6*'-' + '|' + 6*dataset.num_classes*'-') + print(6 * "-" + "|" + 6 * dataset.num_classes * "-") for epoch in range(10): for batch_i, batch in enumerate(loader): @@ -1460,14 +1587,13 @@ def debug_class_w(dataset, loader): # count labels new_counts = np.bincount(batch.labels) - counts[:new_counts.shape[0]] += new_counts.astype(np.int64) + counts[: new_counts.shape[0]] += new_counts.astype(np.int64) # Update proportions proportions = 1000 * counts / np.sum(counts) - s = '{:^6d}|'.format(i) + s = "{:^6d}|".format(i) for pp in proportions: - s += '{:^6.1f}'.format(pp) + s += "{:^6.1f}".format(pp) print(s) i += 1 - diff --git a/datasetss/common.py b/datasetss/common.py index e476939..242de46 100644 --- a/datasetss/common.py +++ b/datasetss/common.py @@ -21,12 +21,8 @@ # # Common libs -import time -import os import numpy as np -import sys -import torch -from torch.utils.data import DataLoader, Dataset +from torch.utils.data import Dataset from utils.config import Config from utils.mayavi_visu import * from kernels.kernel_points import create_3D_rotations @@ -41,6 +37,7 @@ import cpp_wrappers.cpp_neighbors.radius_neighbors as cpp_neighbors # \***********************/ # + def grid_subsampling(points, features=None, labels=None, sampleDl=0.1, verbose=0): """ CPP wrapper for a grid subsampling (method = barycenter for points and features) @@ -53,29 +50,35 @@ def grid_subsampling(points, features=None, labels=None, sampleDl=0.1, verbose=0 """ if (features is None) and (labels is None): - return cpp_subsampling.subsample(points, - sampleDl=sampleDl, - verbose=verbose) - elif (labels is None): - return cpp_subsampling.subsample(points, - features=features, - sampleDl=sampleDl, - verbose=verbose) - elif (features is None): - return cpp_subsampling.subsample(points, - classes=labels, - sampleDl=sampleDl, - verbose=verbose) + return cpp_subsampling.subsample(points, sampleDl=sampleDl, verbose=verbose) + elif labels is None: + return cpp_subsampling.subsample( + points, features=features, sampleDl=sampleDl, verbose=verbose + ) + elif features is None: + return cpp_subsampling.subsample( + points, classes=labels, sampleDl=sampleDl, verbose=verbose + ) else: - return cpp_subsampling.subsample(points, - features=features, - classes=labels, - sampleDl=sampleDl, - verbose=verbose) + return cpp_subsampling.subsample( + points, + features=features, + classes=labels, + sampleDl=sampleDl, + verbose=verbose, + ) -def batch_grid_subsampling(points, batches_len, features=None, labels=None, - sampleDl=0.1, max_p=0, verbose=0, random_grid_orient=True): +def batch_grid_subsampling( + points, + batches_len, + features=None, + labels=None, + sampleDl=0.1, + max_p=0, + verbose=0, + random_grid_orient=True, +): """ CPP wrapper for a grid subsampling (method = barycenter for points and features) :param points: (N, 3) matrix of input points @@ -89,7 +92,6 @@ def batch_grid_subsampling(points, batches_len, features=None, labels=None, R = None B = len(batches_len) if random_grid_orient: - ######################################################## # Create a random rotation matrix for each batch element ######################################################## @@ -99,7 +101,9 @@ def batch_grid_subsampling(points, batches_len, features=None, labels=None, phi = (np.random.rand(B) - 0.5) * np.pi # Create the first vector in carthesian coordinates - u = np.vstack([np.cos(theta) * np.cos(phi), np.sin(theta) * np.cos(phi), np.sin(phi)]) + u = np.vstack( + [np.cos(theta) * np.cos(phi), np.sin(theta) * np.cos(phi), np.sin(phi)] + ) # Choose a random rotation angle alpha = np.random.rand(B) * 2 * np.pi @@ -115,7 +119,9 @@ def batch_grid_subsampling(points, batches_len, features=None, labels=None, points = points.copy() for bi, length in enumerate(batches_len): # Apply the rotation - points[i0:i0 + length, :] = np.sum(np.expand_dims(points[i0:i0 + length, :], 2) * R[bi], axis=1) + points[i0 : i0 + length, :] = np.sum( + np.expand_dims(points[i0 : i0 + length, :], 2) * R[bi], axis=1 + ) i0 += length ####################### @@ -123,61 +129,73 @@ def batch_grid_subsampling(points, batches_len, features=None, labels=None, ####################### if (features is None) and (labels is None): - s_points, s_len = cpp_subsampling.subsample_batch(points, - batches_len, - sampleDl=sampleDl, - max_p=max_p, - verbose=verbose) + s_points, s_len = cpp_subsampling.subsample_batch( + points, batches_len, sampleDl=sampleDl, max_p=max_p, verbose=verbose + ) if random_grid_orient: i0 = 0 for bi, length in enumerate(s_len): - s_points[i0:i0 + length, :] = np.sum(np.expand_dims(s_points[i0:i0 + length, :], 2) * R[bi].T, axis=1) + s_points[i0 : i0 + length, :] = np.sum( + np.expand_dims(s_points[i0 : i0 + length, :], 2) * R[bi].T, axis=1 + ) i0 += length return s_points, s_len - elif (labels is None): - s_points, s_len, s_features = cpp_subsampling.subsample_batch(points, - batches_len, - features=features, - sampleDl=sampleDl, - max_p=max_p, - verbose=verbose) + elif labels is None: + s_points, s_len, s_features = cpp_subsampling.subsample_batch( + points, + batches_len, + features=features, + sampleDl=sampleDl, + max_p=max_p, + verbose=verbose, + ) if random_grid_orient: i0 = 0 for bi, length in enumerate(s_len): # Apply the rotation - s_points[i0:i0 + length, :] = np.sum(np.expand_dims(s_points[i0:i0 + length, :], 2) * R[bi].T, axis=1) + s_points[i0 : i0 + length, :] = np.sum( + np.expand_dims(s_points[i0 : i0 + length, :], 2) * R[bi].T, axis=1 + ) i0 += length return s_points, s_len, s_features - elif (features is None): - s_points, s_len, s_labels = cpp_subsampling.subsample_batch(points, - batches_len, - classes=labels, - sampleDl=sampleDl, - max_p=max_p, - verbose=verbose) + elif features is None: + s_points, s_len, s_labels = cpp_subsampling.subsample_batch( + points, + batches_len, + classes=labels, + sampleDl=sampleDl, + max_p=max_p, + verbose=verbose, + ) if random_grid_orient: i0 = 0 for bi, length in enumerate(s_len): # Apply the rotation - s_points[i0:i0 + length, :] = np.sum(np.expand_dims(s_points[i0:i0 + length, :], 2) * R[bi].T, axis=1) + s_points[i0 : i0 + length, :] = np.sum( + np.expand_dims(s_points[i0 : i0 + length, :], 2) * R[bi].T, axis=1 + ) i0 += length return s_points, s_len, s_labels else: - s_points, s_len, s_features, s_labels = cpp_subsampling.subsample_batch(points, - batches_len, - features=features, - classes=labels, - sampleDl=sampleDl, - max_p=max_p, - verbose=verbose) + s_points, s_len, s_features, s_labels = cpp_subsampling.subsample_batch( + points, + batches_len, + features=features, + classes=labels, + sampleDl=sampleDl, + max_p=max_p, + verbose=verbose, + ) if random_grid_orient: i0 = 0 for bi, length in enumerate(s_len): # Apply the rotation - s_points[i0:i0 + length, :] = np.sum(np.expand_dims(s_points[i0:i0 + length, :], 2) * R[bi].T, axis=1) + s_points[i0 : i0 + length, :] = np.sum( + np.expand_dims(s_points[i0 : i0 + length, :], 2) * R[bi].T, axis=1 + ) i0 += length return s_points, s_len, s_features, s_labels @@ -193,7 +211,9 @@ def batch_neighbors(queries, supports, q_batches, s_batches, radius): :return: neighbors indices """ - return cpp_neighbors.batch_query(queries, supports, q_batches, s_batches, radius=radius) + return cpp_neighbors.batch_query( + queries, supports, q_batches, s_batches, radius=radius + ) # ---------------------------------------------------------------------------------------------------------------------- @@ -211,7 +231,7 @@ class PointCloudDataset(Dataset): """ self.name = name - self.path = '' + self.path = "" self.label_to_names = {} self.num_classes = 0 self.label_values = np.zeros((0,), dtype=np.int32) @@ -237,7 +257,6 @@ class PointCloudDataset(Dataset): return 0 def init_labels(self): - # Initialize all label parameters given the label_to_names dict self.num_classes = len(self.label_to_names) self.label_values = np.sort([k for k, v in self.label_to_names.items()]) @@ -256,27 +275,33 @@ class PointCloudDataset(Dataset): R = np.eye(points.shape[1]) if points.shape[1] == 3: - if self.config.augment_rotation == 'vertical': - + if self.config.augment_rotation == "vertical": # Create random rotations theta = np.random.rand() * 2 * np.pi c, s = np.cos(theta), np.sin(theta) R = np.array([[c, -s, 0], [s, c, 0], [0, 0, 1]], dtype=np.float32) - elif self.config.augment_rotation == 'all': - + elif self.config.augment_rotation == "all": # Choose two random angles for the first vector in polar coordinates theta = np.random.rand() * 2 * np.pi phi = (np.random.rand() - 0.5) * np.pi # Create the first vector in carthesian coordinates - u = np.array([np.cos(theta) * np.cos(phi), np.sin(theta) * np.cos(phi), np.sin(phi)]) + u = np.array( + [ + np.cos(theta) * np.cos(phi), + np.sin(theta) * np.cos(phi), + np.sin(phi), + ] + ) # Choose a random rotation angle alpha = np.random.rand() * 2 * np.pi # Create the rotation matrix with this vector and angle - R = create_3D_rotations(np.reshape(u, (1, -1)), np.reshape(alpha, (1, -1)))[0] + R = create_3D_rotations( + np.reshape(u, (1, -1)), np.reshape(alpha, (1, -1)) + )[0] R = R.astype(np.float32) @@ -301,17 +326,19 @@ class PointCloudDataset(Dataset): # Noise ####### - noise = (np.random.randn(points.shape[0], points.shape[1]) * self.config.augment_noise).astype(np.float32) + noise = ( + np.random.randn(points.shape[0], points.shape[1]) + * self.config.augment_noise + ).astype(np.float32) ################## # Apply transforms ################## # Do not use np.dot because it is multi-threaded - #augmented_points = np.dot(points, R) * scale + noise + # augmented_points = np.dot(points, R) * scale + noise augmented_points = np.sum(np.expand_dims(points, 2) * R, axis=1) * scale + noise - if normals is None: return augmented_points, scale, R else: @@ -319,12 +346,14 @@ class PointCloudDataset(Dataset): normal_scale = scale[[1, 2, 0]] * scale[[2, 0, 1]] augmented_normals = np.dot(normals, R) * normal_scale # Renormalise - augmented_normals *= 1 / (np.linalg.norm(augmented_normals, axis=1, keepdims=True) + 1e-6) + augmented_normals *= 1 / ( + np.linalg.norm(augmented_normals, axis=1, keepdims=True) + 1e-6 + ) if verbose: test_p = [np.vstack([points, augmented_points])] test_n = [np.vstack([normals, augmented_normals])] - test_l = [np.hstack([points[:, 2]*0, augmented_points[:, 2]*0+1])] + test_l = [np.hstack([points[:, 2] * 0, augmented_points[:, 2] * 0 + 1])] show_ModelNet_examples(test_p, test_n, test_l) return augmented_points, augmented_normals, scale, R @@ -337,16 +366,13 @@ class PointCloudDataset(Dataset): # crop neighbors matrix if len(self.neighborhood_limits) > 0: - return neighbors[:, :self.neighborhood_limits[layer]] + return neighbors[:, : self.neighborhood_limits[layer]] else: return neighbors - def classification_inputs(self, - stacked_points, - stacked_features, - labels, - stack_lengths): - + def classification_inputs( + self, stacked_points, stacked_features, labels, stack_lengths + ): # Starting radius of convolutions r_normal = self.config.first_subsampling_dl * self.config.conv_radius @@ -367,9 +393,13 @@ class PointCloudDataset(Dataset): arch = self.config.architecture for block_i, block in enumerate(arch): - # Get all blocks of the layer - if not ('pool' in block or 'strided' in block or 'global' in block or 'upsample' in block): + if not ( + "pool" in block + or "strided" in block + or "global" in block + or "upsample" in block + ): layer_blocks += [block] continue @@ -379,12 +409,14 @@ class PointCloudDataset(Dataset): deform_layer = False if layer_blocks: # Convolutions are done in this layer, compute the neighbors with the good radius - if np.any(['deformable' in blck for blck in layer_blocks]): + if np.any(["deformable" in blck for blck in layer_blocks]): r = r_normal * self.config.deform_radius / self.config.conv_radius deform_layer = True else: r = r_normal - conv_i = batch_neighbors(stacked_points, stacked_points, stack_lengths, stack_lengths, r) + conv_i = batch_neighbors( + stacked_points, stacked_points, stack_lengths, stack_lengths, r + ) else: # This layer only perform pooling, no neighbors required @@ -394,23 +426,26 @@ class PointCloudDataset(Dataset): # ************************* # If end of layer is a pooling operation - if 'pool' in block or 'strided' in block: - + if "pool" in block or "strided" in block: # New subsampling length dl = 2 * r_normal / self.config.conv_radius # Subsampled points - pool_p, pool_b = batch_grid_subsampling(stacked_points, stack_lengths, sampleDl=dl) + pool_p, pool_b = batch_grid_subsampling( + stacked_points, stack_lengths, sampleDl=dl + ) # Radius of pooled neighbors - if 'deformable' in block: + if "deformable" in block: r = r_normal * self.config.deform_radius / self.config.conv_radius deform_layer = True else: r = r_normal # Subsample indices - pool_i = batch_neighbors(pool_p, stacked_points, pool_b, stack_lengths, r) + pool_i = batch_neighbors( + pool_p, stacked_points, pool_b, stack_lengths, r + ) else: # No pooling in the end of this layer, no pooling indices required @@ -438,7 +473,7 @@ class PointCloudDataset(Dataset): layer_blocks = [] # Stop when meeting a global pooling or upsampling - if 'global' in block or 'upsample' in block: + if "global" in block or "upsample" in block: break ############### @@ -453,13 +488,9 @@ class PointCloudDataset(Dataset): return li - - def segmentation_inputs(self, - stacked_points, - stacked_features, - labels, - stack_lengths): - + def segmentation_inputs( + self, stacked_points, stacked_features, labels, stack_lengths + ): # Starting radius of convolutions r_normal = self.config.first_subsampling_dl * self.config.conv_radius @@ -481,9 +512,13 @@ class PointCloudDataset(Dataset): arch = self.config.architecture for block_i, block in enumerate(arch): - # Get all blocks of the layer - if not ('pool' in block or 'strided' in block or 'global' in block or 'upsample' in block): + if not ( + "pool" in block + or "strided" in block + or "global" in block + or "upsample" in block + ): layer_blocks += [block] continue @@ -493,12 +528,14 @@ class PointCloudDataset(Dataset): deform_layer = False if layer_blocks: # Convolutions are done in this layer, compute the neighbors with the good radius - if np.any(['deformable' in blck for blck in layer_blocks]): + if np.any(["deformable" in blck for blck in layer_blocks]): r = r_normal * self.config.deform_radius / self.config.conv_radius deform_layer = True else: r = r_normal - conv_i = batch_neighbors(stacked_points, stacked_points, stack_lengths, stack_lengths, r) + conv_i = batch_neighbors( + stacked_points, stacked_points, stack_lengths, stack_lengths, r + ) else: # This layer only perform pooling, no neighbors required @@ -508,26 +545,31 @@ class PointCloudDataset(Dataset): # ************************* # If end of layer is a pooling operation - if 'pool' in block or 'strided' in block: - + if "pool" in block or "strided" in block: # New subsampling length dl = 2 * r_normal / self.config.conv_radius # Subsampled points - pool_p, pool_b = batch_grid_subsampling(stacked_points, stack_lengths, sampleDl=dl) + pool_p, pool_b = batch_grid_subsampling( + stacked_points, stack_lengths, sampleDl=dl + ) # Radius of pooled neighbors - if 'deformable' in block: + if "deformable" in block: r = r_normal * self.config.deform_radius / self.config.conv_radius deform_layer = True else: r = r_normal # Subsample indices - pool_i = batch_neighbors(pool_p, stacked_points, pool_b, stack_lengths, r) + pool_i = batch_neighbors( + pool_p, stacked_points, pool_b, stack_lengths, r + ) # Upsample indices (with the radius of the next layer to keep wanted density) - up_i = batch_neighbors(stacked_points, pool_p, stack_lengths, pool_b, 2 * r) + up_i = batch_neighbors( + stacked_points, pool_p, stack_lengths, pool_b, 2 * r + ) else: # No pooling in the end of this layer, no pooling indices required @@ -540,7 +582,7 @@ class PointCloudDataset(Dataset): conv_i = self.big_neighborhood_filter(conv_i, len(input_points)) pool_i = self.big_neighborhood_filter(pool_i, len(input_points)) if up_i.shape[0] > 0: - up_i = self.big_neighborhood_filter(up_i, len(input_points)+1) + up_i = self.big_neighborhood_filter(up_i, len(input_points) + 1) # Updating input lists input_points += [stacked_points] @@ -559,7 +601,7 @@ class PointCloudDataset(Dataset): layer_blocks = [] # Stop when meeting a global pooling or upsampling - if 'global' in block or 'upsample' in block: + if "global" in block or "upsample" in block: break ############### @@ -567,20 +609,13 @@ class PointCloudDataset(Dataset): ############### # list of network inputs - li = input_points + input_neighbors + input_pools + input_upsamples + input_stack_lengths + li = ( + input_points + + input_neighbors + + input_pools + + input_upsamples + + input_stack_lengths + ) li += [stacked_features, labels] return li - - - - - - - - - - - - - diff --git a/kernels/kernel_points.py b/kernels/kernel_points.py index f109244..3ce2daa 100644 --- a/kernels/kernel_points.py +++ b/kernels/kernel_points.py @@ -23,10 +23,8 @@ # Import numpy package and name it "np" -import time import numpy as np import matplotlib.pyplot as plt -from matplotlib import cm from os import makedirs from os.path import join, exists @@ -41,6 +39,7 @@ from utils.config import bcolors # # + def create_3D_rotations(axis, angle): """ Create rotation matrices from a list of axes and angles. Code from wikipedia on quaternions @@ -62,21 +61,35 @@ def create_3D_rotations(axis, angle): t19 = t2 * axis[:, 1] * axis[:, 2] t20 = t8 * axis[:, 0] t24 = axis[:, 2] * axis[:, 2] - R = np.stack([t1 + t2 * t3, - t7 - t9, - t11 + t12, - t7 + t9, - t1 + t2 * t15, - t19 - t20, - t11 - t12, - t19 + t20, - t1 + t2 * t24], axis=1) + R = np.stack( + [ + t1 + t2 * t3, + t7 - t9, + t11 + t12, + t7 + t9, + t1 + t2 * t15, + t19 - t20, + t11 - t12, + t19 + t20, + t1 + t2 * t24, + ], + axis=1, + ) return np.reshape(R, (-1, 3, 3)) -def spherical_Lloyd(radius, num_cells, dimension=3, fixed='center', approximation='monte-carlo', - approx_n=5000, max_iter=500, momentum=0.9, verbose=0): +def spherical_Lloyd( + radius, + num_cells, + dimension=3, + fixed="center", + approximation="monte-carlo", + approx_n=5000, + max_iter=500, + momentum=0.9, + verbose=0, +): """ Creation of kernel point via Lloyd algorithm. We use an approximation of the algorithm, and compute the Voronoi cell centers with discretization of space. The exact formula is not trivial with part of the sphere as sides. @@ -109,13 +122,15 @@ def spherical_Lloyd(radius, num_cells, dimension=3, fixed='center', approximatio new_points = np.random.rand(num_cells, dimension) * 2 * radius0 - radius0 kernel_points = np.vstack((kernel_points, new_points)) d2 = np.sum(np.power(kernel_points, 2), axis=1) - kernel_points = kernel_points[np.logical_and(d2 < radius0 ** 2, (0.9 * radius0) ** 2 < d2), :] + kernel_points = kernel_points[ + np.logical_and(d2 < radius0**2, (0.9 * radius0) ** 2 < d2), : + ] kernel_points = kernel_points[:num_cells, :].reshape((num_cells, -1)) # Optional fixing - if fixed == 'center': + if fixed == "center": kernel_points[0, :] *= 0 - if fixed == 'verticals': + if fixed == "verticals": kernel_points[:3, :] *= 0 kernel_points[1, -1] += 2 * radius0 / 3 kernel_points[2, -1] -= 2 * radius0 / 3 @@ -129,10 +144,10 @@ def spherical_Lloyd(radius, num_cells, dimension=3, fixed='center', approximatio fig = plt.figure() # Initialize discretization in this method is chosen - if approximation == 'discretization': - side_n = int(np.floor(approx_n ** (1. / dimension))) + if approximation == "discretization": + side_n = int(np.floor(approx_n ** (1.0 / dimension))) dl = 2 * radius0 / side_n - coords = np.arange(-radius0 + dl/2, radius0, dl) + coords = np.arange(-radius0 + dl / 2, radius0, dl) if dimension == 2: x, y = np.meshgrid(coords, coords) X = np.vstack((np.ravel(x), np.ravel(y))).T @@ -143,11 +158,13 @@ def spherical_Lloyd(radius, num_cells, dimension=3, fixed='center', approximatio x, y, z, t = np.meshgrid(coords, coords, coords, coords) X = np.vstack((np.ravel(x), np.ravel(y), np.ravel(z), np.ravel(t))).T else: - raise ValueError('Unsupported dimension (max is 4)') - elif approximation == 'monte-carlo': + raise ValueError("Unsupported dimension (max is 4)") + elif approximation == "monte-carlo": X = np.zeros((0, dimension)) else: - raise ValueError('Wrong approximation method chosen: "{:s}"'.format(approximation)) + raise ValueError( + 'Wrong approximation method chosen: "{:s}"'.format(approximation) + ) # Only points inside the sphere are used d2 = np.sum(np.power(X, 2), axis=1) @@ -164,9 +181,8 @@ def spherical_Lloyd(radius, num_cells, dimension=3, fixed='center', approximatio max_moves = np.zeros((0,)) for iter in range(max_iter): - # In the case of monte-carlo, renew the sampled points - if approximation == 'monte-carlo': + if approximation == "monte-carlo": X = np.random.rand(approx_n, dimension) * 2 * radius0 - radius0 d2 = np.sum(np.power(X, 2), axis=1) X = X[d2 < radius0 * radius0, :] @@ -179,7 +195,7 @@ def spherical_Lloyd(radius, num_cells, dimension=3, fixed='center', approximatio cell_inds = np.argmin(sq_distances, axis=1) centers = [] for c in range(num_cells): - bool_c = (cell_inds == c) + bool_c = cell_inds == c num_c = np.sum(bool_c.astype(np.int32)) if num_c > 0: centers.append(np.sum(X[bool_c, :], axis=0) / num_c) @@ -196,28 +212,42 @@ def spherical_Lloyd(radius, num_cells, dimension=3, fixed='center', approximatio max_moves = np.append(max_moves, np.max(np.linalg.norm(moves, axis=1))) # Optional fixing - if fixed == 'center': + if fixed == "center": kernel_points[0, :] *= 0 - if fixed == 'verticals': + if fixed == "verticals": kernel_points[0, :] *= 0 kernel_points[:3, :-1] *= 0 if verbose: - print('iter {:5d} / max move = {:f}'.format(iter, np.max(np.linalg.norm(moves, axis=1)))) + print( + "iter {:5d} / max move = {:f}".format( + iter, np.max(np.linalg.norm(moves, axis=1)) + ) + ) if warning: - print('{:}WARNING: at least one point has no cell{:}'.format(bcolors.WARNING, bcolors.ENDC)) + print( + "{:}WARNING: at least one point has no cell{:}".format( + bcolors.WARNING, bcolors.ENDC + ) + ) if verbose > 1: plt.clf() - plt.scatter(X[:, 0], X[:, 1], c=cell_inds, s=20.0, - marker='.', cmap=plt.get_cmap('tab20')) - #plt.scatter(kernel_points[:, 0], kernel_points[:, 1], c=np.arange(num_cells), s=100.0, + plt.scatter( + X[:, 0], + X[:, 1], + c=cell_inds, + s=20.0, + marker=".", + cmap=plt.get_cmap("tab20"), + ) + # plt.scatter(kernel_points[:, 0], kernel_points[:, 1], c=np.arange(num_cells), s=100.0, # marker='+', cmap=plt.get_cmap('tab20')) - plt.plot(kernel_points[:, 0], kernel_points[:, 1], 'k+') - circle = plt.Circle((0, 0), radius0, color='r', fill=False) + plt.plot(kernel_points[:, 0], kernel_points[:, 1], "k+") + circle = plt.Circle((0, 0), radius0, color="r", fill=False) fig.axes[0].add_artist(circle) fig.axes[0].set_xlim((-radius0 * 1.1, radius0 * 1.1)) fig.axes[0].set_ylim((-radius0 * 1.1, radius0 * 1.1)) - fig.axes[0].set_aspect('equal') + fig.axes[0].set_aspect("equal") plt.draw() plt.pause(0.001) plt.show(block=False) @@ -231,32 +261,45 @@ def spherical_Lloyd(radius, num_cells, dimension=3, fixed='center', approximatio if dimension == 2: fig, (ax1, ax2) = plt.subplots(1, 2, figsize=[10.4, 4.8]) ax1.plot(max_moves) - ax2.scatter(X[:, 0], X[:, 1], c=cell_inds, s=20.0, - marker='.', cmap=plt.get_cmap('tab20')) + ax2.scatter( + X[:, 0], + X[:, 1], + c=cell_inds, + s=20.0, + marker=".", + cmap=plt.get_cmap("tab20"), + ) # plt.scatter(kernel_points[:, 0], kernel_points[:, 1], c=np.arange(num_cells), s=100.0, # marker='+', cmap=plt.get_cmap('tab20')) - ax2.plot(kernel_points[:, 0], kernel_points[:, 1], 'k+') - circle = plt.Circle((0, 0), radius0, color='r', fill=False) + ax2.plot(kernel_points[:, 0], kernel_points[:, 1], "k+") + circle = plt.Circle((0, 0), radius0, color="r", fill=False) ax2.add_artist(circle) ax2.set_xlim((-radius0 * 1.1, radius0 * 1.1)) ax2.set_ylim((-radius0 * 1.1, radius0 * 1.1)) - ax2.set_aspect('equal') - plt.title('Check if kernel is correct.') + ax2.set_aspect("equal") + plt.title("Check if kernel is correct.") plt.draw() plt.show() if dimension > 2: plt.figure() plt.plot(max_moves) - plt.title('Check if kernel is correct.') + plt.title("Check if kernel is correct.") plt.show() # Rescale kernels with real radius return kernel_points * radius -def kernel_point_optimization_debug(radius, num_points, num_kernels=1, dimension=3, - fixed='center', ratio=0.66, verbose=0): +def kernel_point_optimization_debug( + radius, + num_points, + num_kernels=1, + dimension=3, + fixed="center", + ratio=0.66, + verbose=0, +): """ Creation of kernel point via optimization of potentials. :param radius: Radius of the kernels @@ -292,18 +335,25 @@ def kernel_point_optimization_debug(radius, num_points, num_kernels=1, dimension ####################### # Random kernel points - kernel_points = np.random.rand(num_kernels * num_points - 1, dimension) * diameter0 - radius0 - while (kernel_points.shape[0] < num_kernels * num_points): - new_points = np.random.rand(num_kernels * num_points - 1, dimension) * diameter0 - radius0 + kernel_points = ( + np.random.rand(num_kernels * num_points - 1, dimension) * diameter0 - radius0 + ) + while kernel_points.shape[0] < num_kernels * num_points: + new_points = ( + np.random.rand(num_kernels * num_points - 1, dimension) * diameter0 + - radius0 + ) kernel_points = np.vstack((kernel_points, new_points)) d2 = np.sum(np.power(kernel_points, 2), axis=1) kernel_points = kernel_points[d2 < 0.5 * radius0 * radius0, :] - kernel_points = kernel_points[:num_kernels * num_points, :].reshape((num_kernels, num_points, -1)) + kernel_points = kernel_points[: num_kernels * num_points, :].reshape( + (num_kernels, num_points, -1) + ) # Optionnal fixing - if fixed == 'center': + if fixed == "center": kernel_points[:, 0, :] *= 0 - if fixed == 'verticals': + if fixed == "verticals": kernel_points[:, :3, :] *= 0 kernel_points[:, 1, -1] += 2 * radius0 / 3 kernel_points[:, 2, -1] -= 2 * radius0 / 3 @@ -313,14 +363,13 @@ def kernel_point_optimization_debug(radius, num_points, num_kernels=1, dimension ##################### # Initialize figure - if verbose>1: + if verbose > 1: fig = plt.figure() saved_gradient_norms = np.zeros((10000, num_kernels)) old_gradient_norms = np.zeros((num_kernels, num_points)) step = -1 while step < 10000: - # Increment step += 1 @@ -331,16 +380,16 @@ def kernel_point_optimization_debug(radius, num_points, num_kernels=1, dimension A = np.expand_dims(kernel_points, axis=2) B = np.expand_dims(kernel_points, axis=1) interd2 = np.sum(np.power(A - B, 2), axis=-1) - inter_grads = (A - B) / (np.power(np.expand_dims(interd2, -1), 3/2) + 1e-6) + inter_grads = (A - B) / (np.power(np.expand_dims(interd2, -1), 3 / 2) + 1e-6) inter_grads = np.sum(inter_grads, axis=1) # Derivative of the radius potential - circle_grads = 10*kernel_points + circle_grads = 10 * kernel_points # All gradients gradients = inter_grads + circle_grads - if fixed == 'verticals': + if fixed == "verticals": gradients[:, 1:3, :-1] = 0 # Stop condition @@ -352,9 +401,17 @@ def kernel_point_optimization_debug(radius, num_points, num_kernels=1, dimension # Stop if all moving points are gradients fixed (low gradients diff) - if fixed == 'center' and np.max(np.abs(old_gradient_norms[:, 1:] - gradients_norms[:, 1:])) < thresh: + if ( + fixed == "center" + and np.max(np.abs(old_gradient_norms[:, 1:] - gradients_norms[:, 1:])) + < thresh + ): break - elif fixed == 'verticals' and np.max(np.abs(old_gradient_norms[:, 3:] - gradients_norms[:, 3:])) < thresh: + elif ( + fixed == "verticals" + and np.max(np.abs(old_gradient_norms[:, 3:] - gradients_norms[:, 3:])) + < thresh + ): break elif np.max(np.abs(old_gradient_norms - gradients_norms)) < thresh: break @@ -367,24 +424,32 @@ def kernel_point_optimization_debug(radius, num_points, num_kernels=1, dimension moving_dists = np.minimum(moving_factor * gradients_norms, clip) # Fix central point - if fixed == 'center': + if fixed == "center": moving_dists[:, 0] = 0 - if fixed == 'verticals': + if fixed == "verticals": moving_dists[:, 0] = 0 # Move points - kernel_points -= np.expand_dims(moving_dists, -1) * gradients / np.expand_dims(gradients_norms + 1e-6, -1) + kernel_points -= ( + np.expand_dims(moving_dists, -1) + * gradients + / np.expand_dims(gradients_norms + 1e-6, -1) + ) if verbose: - print('step {:5d} / max grad = {:f}'.format(step, np.max(gradients_norms[:, 3:]))) + print( + "step {:5d} / max grad = {:f}".format( + step, np.max(gradients_norms[:, 3:]) + ) + ) if verbose > 1: plt.clf() - plt.plot(kernel_points[0, :, 0], kernel_points[0, :, 1], '.') - circle = plt.Circle((0, 0), radius, color='r', fill=False) + plt.plot(kernel_points[0, :, 0], kernel_points[0, :, 1], ".") + circle = plt.Circle((0, 0), radius, color="r", fill=False) fig.axes[0].add_artist(circle) - fig.axes[0].set_xlim((-radius*1.1, radius*1.1)) - fig.axes[0].set_ylim((-radius*1.1, radius*1.1)) - fig.axes[0].set_aspect('equal') + fig.axes[0].set_xlim((-radius * 1.1, radius * 1.1)) + fig.axes[0].set_ylim((-radius * 1.1, radius * 1.1)) + fig.axes[0].set_aspect("equal") plt.draw() plt.pause(0.001) plt.show(block=False) @@ -395,7 +460,7 @@ def kernel_point_optimization_debug(radius, num_points, num_kernels=1, dimension # Remove unused lines in the saved gradients if step < 10000: - saved_gradient_norms = saved_gradient_norms[:step+1, :] + saved_gradient_norms = saved_gradient_norms[: step + 1, :] # Rescale radius to fit the wanted ratio of radius r = np.sqrt(np.sum(np.power(kernel_points, 2), axis=-1)) @@ -406,9 +471,8 @@ def kernel_point_optimization_debug(radius, num_points, num_kernels=1, dimension def load_kernels(radius, num_kpoints, dimension, fixed, lloyd=False): - # Kernel directory - kernel_dir = 'kernels/dispositions' + kernel_dir = "kernels/dispositions" if not exists(kernel_dir): makedirs(kernel_dir) @@ -417,26 +481,28 @@ def load_kernels(radius, num_kpoints, dimension, fixed, lloyd=False): lloyd = True # Kernel_file - kernel_file = join(kernel_dir, 'k_{:03d}_{:s}_{:d}D.ply'.format(num_kpoints, fixed, dimension)) + kernel_file = join( + kernel_dir, "k_{:03d}_{:s}_{:d}D.ply".format(num_kpoints, fixed, dimension) + ) # Check if already done if not exists(kernel_file): if lloyd: # Create kernels - kernel_points = spherical_Lloyd(1.0, - num_kpoints, - dimension=dimension, - fixed=fixed, - verbose=0) + kernel_points = spherical_Lloyd( + 1.0, num_kpoints, dimension=dimension, fixed=fixed, verbose=0 + ) else: # Create kernels - kernel_points, grad_norms = kernel_point_optimization_debug(1.0, - num_kpoints, - num_kernels=100, - dimension=dimension, - fixed=fixed, - verbose=0) + kernel_points, grad_norms = kernel_point_optimization_debug( + 1.0, + num_kpoints, + num_kernels=100, + dimension=dimension, + fixed=fixed, + verbose=0, + ) # Find best candidate best_k = np.argmin(grad_norms[-1, :]) @@ -444,23 +510,23 @@ def load_kernels(radius, num_kpoints, dimension, fixed, lloyd=False): # Save points kernel_points = kernel_points[best_k, :, :] - write_ply(kernel_file, kernel_points, ['x', 'y', 'z']) + write_ply(kernel_file, kernel_points, ["x", "y", "z"]) else: data = read_ply(kernel_file) - kernel_points = np.vstack((data['x'], data['y'], data['z'])).T + kernel_points = np.vstack((data["x"], data["y"], data["z"])).T # Random roations for the kernel # N.B. 4D random rotations not supported yet R = np.eye(dimension) theta = np.random.rand() * 2 * np.pi if dimension == 2: - if fixed != 'vertical': + if fixed != "vertical": c, s = np.cos(theta), np.sin(theta) R = np.array([[c, -s], [s, c]], dtype=np.float32) elif dimension == 3: - if fixed != 'vertical': + if fixed != "vertical": c, s = np.cos(theta), np.sin(theta) R = np.array([[c, -s, 0], [s, c, 0], [0, 0, 1]], dtype=np.float32) @@ -468,18 +534,24 @@ def load_kernels(radius, num_kpoints, dimension, fixed, lloyd=False): phi = (np.random.rand() - 0.5) * np.pi # Create the first vector in carthesian coordinates - u = np.array([np.cos(theta) * np.cos(phi), np.sin(theta) * np.cos(phi), np.sin(phi)]) + u = np.array( + [np.cos(theta) * np.cos(phi), np.sin(theta) * np.cos(phi), np.sin(phi)] + ) # Choose a random rotation angle alpha = np.random.rand() * 2 * np.pi # Create the rotation matrix with this vector and angle - R = create_3D_rotations(np.reshape(u, (1, -1)), np.reshape(alpha, (1, -1)))[0] + R = create_3D_rotations(np.reshape(u, (1, -1)), np.reshape(alpha, (1, -1)))[ + 0 + ] R = R.astype(np.float32) # Add a small noise - kernel_points = kernel_points + np.random.normal(scale=0.01, size=kernel_points.shape) + kernel_points = kernel_points + np.random.normal( + scale=0.01, size=kernel_points.shape + ) # Scale kernels kernel_points = radius * kernel_points @@ -487,4 +559,4 @@ def load_kernels(radius, num_kpoints, dimension, fixed, lloyd=False): # Rotate kernels kernel_points = np.matmul(kernel_points, R) - return kernel_points.astype(np.float32) \ No newline at end of file + return kernel_points.astype(np.float32) diff --git a/models/architectures.py b/models/architectures.py index 4442579..4967e47 100644 --- a/models/architectures.py +++ b/models/architectures.py @@ -19,20 +19,17 @@ import numpy as np def p2p_fitting_regularizer(net): - fitting_loss = 0 repulsive_loss = 0 for m in net.modules(): - if isinstance(m, KPConv) and m.deformable: - ############## # Fitting loss ############## # Get the distance to closest input point and normalize to be independant from layers - KP_min_d2 = m.min_d2 / (m.KP_extent ** 2) + KP_min_d2 = m.min_d2 / (m.KP_extent**2) # Loss will be the square distance to closest input point. We use L1 because dist is already squared fitting_loss += net.l1(KP_min_d2, torch.zeros_like(KP_min_d2)) @@ -46,9 +43,15 @@ def p2p_fitting_regularizer(net): # Point should not be close to each other for i in range(net.K): - other_KP = torch.cat([KP_locs[:, :i, :], KP_locs[:, i + 1:, :]], dim=1).detach() - distances = torch.sqrt(torch.sum((other_KP - KP_locs[:, i:i + 1, :]) ** 2, dim=2)) - rep_loss = torch.sum(torch.clamp_max(distances - net.repulse_extent, max=0.0) ** 2, dim=1) + other_KP = torch.cat( + [KP_locs[:, :i, :], KP_locs[:, i + 1 :, :]], dim=1 + ).detach() + distances = torch.sqrt( + torch.sum((other_KP - KP_locs[:, i : i + 1, :]) ** 2, dim=2) + ) + rep_loss = torch.sum( + torch.clamp_max(distances - net.repulse_extent, max=0.0) ** 2, dim=1 + ) repulsive_loss += net.l1(rep_loss, torch.zeros_like(rep_loss)) / net.K return net.deform_fitting_power * (2 * fitting_loss + repulsive_loss) @@ -79,36 +82,32 @@ class KPCNN(nn.Module): # Loop over consecutive blocks block_in_layer = 0 for block_i, block in enumerate(config.architecture): - # Check equivariance - if ('equivariant' in block) and (not out_dim % 3 == 0): - raise ValueError('Equivariant block but features dimension is not a factor of 3') + if ("equivariant" in block) and (not out_dim % 3 == 0): + raise ValueError( + "Equivariant block but features dimension is not a factor of 3" + ) # Detect upsampling block to stop - if 'upsample' in block: + if "upsample" in block: break # Apply the good block function defining tf ops - self.block_ops.append(block_decider(block, - r, - in_dim, - out_dim, - layer, - config)) - + self.block_ops.append( + block_decider(block, r, in_dim, out_dim, layer, config) + ) # Index of block in this layer block_in_layer += 1 # Update dimension of input from output - if 'simple' in block: + if "simple" in block: in_dim = out_dim // 2 else: in_dim = out_dim - # Detect change to a subsampled layer - if 'pool' in block or 'strided' in block: + if "pool" in block or "strided" in block: # Update radius and feature dimension for next layer layer += 1 r *= 2 @@ -134,7 +133,6 @@ class KPCNN(nn.Module): return def forward(self, batch, config): - # Save all block operations in a list of modules x = batch.features.clone().detach() @@ -160,12 +158,12 @@ class KPCNN(nn.Module): self.output_loss = self.criterion(outputs, labels) # Regularization of deformable offsets - if self.deform_fitting_mode == 'point2point': + if self.deform_fitting_mode == "point2point": self.reg_loss = p2p_fitting_regularizer(self) - elif self.deform_fitting_mode == 'point2plane': - raise ValueError('point2plane fitting mode not implemented yet.') + elif self.deform_fitting_mode == "point2plane": + raise ValueError("point2plane fitting mode not implemented yet.") else: - raise ValueError('Unknown fitting mode: ' + self.deform_fitting_mode) + raise ValueError("Unknown fitting mode: " + self.deform_fitting_mode) # Combined loss return self.output_loss + self.reg_loss @@ -217,36 +215,36 @@ class KPFCNN(nn.Module): # Loop over consecutive blocks for block_i, block in enumerate(config.architecture): - # Check equivariance - if ('equivariant' in block) and (not out_dim % 3 == 0): - raise ValueError('Equivariant block but features dimension is not a factor of 3') + if ("equivariant" in block) and (not out_dim % 3 == 0): + raise ValueError( + "Equivariant block but features dimension is not a factor of 3" + ) # Detect change to next layer for skip connection - if np.any([tmp in block for tmp in ['pool', 'strided', 'upsample', 'global']]): + if np.any( + [tmp in block for tmp in ["pool", "strided", "upsample", "global"]] + ): self.encoder_skips.append(block_i) self.encoder_skip_dims.append(in_dim) # Detect upsampling block to stop - if 'upsample' in block: + if "upsample" in block: break # Apply the good block function defining tf ops - self.encoder_blocks.append(block_decider(block, - r, - in_dim, - out_dim, - layer, - config)) + self.encoder_blocks.append( + block_decider(block, r, in_dim, out_dim, layer, config) + ) # Update dimension of input from output - if 'simple' in block: + if "simple" in block: in_dim = out_dim // 2 else: in_dim = out_dim # Detect change to a subsampled layer - if 'pool' in block or 'strided' in block: + if "pool" in block or "strided" in block: # Update radius and feature dimension for next layer layer += 1 r *= 2 @@ -263,38 +261,36 @@ class KPFCNN(nn.Module): # Find first upsampling block start_i = 0 for block_i, block in enumerate(config.architecture): - if 'upsample' in block: + if "upsample" in block: start_i = block_i break # Loop over consecutive blocks for block_i, block in enumerate(config.architecture[start_i:]): - # Add dimension of skip connection concat - if block_i > 0 and 'upsample' in config.architecture[start_i + block_i - 1]: + if block_i > 0 and "upsample" in config.architecture[start_i + block_i - 1]: in_dim += self.encoder_skip_dims[layer] self.decoder_concats.append(block_i) # Apply the good block function defining tf ops - self.decoder_blocks.append(block_decider(block, - r, - in_dim, - out_dim, - layer, - config)) + self.decoder_blocks.append( + block_decider(block, r, in_dim, out_dim, layer, config) + ) # Update dimension of input from output in_dim = out_dim # Detect change to a subsampled layer - if 'upsample' in block: + if "upsample" in block: # Update radius and feature dimension for next layer layer -= 1 r *= 0.5 out_dim = out_dim // 2 self.head_mlp = UnaryBlock(out_dim, config.first_features_dim, False, 0) - self.head_softmax = UnaryBlock(config.first_features_dim, self.C, False, 0, no_relu=True) + self.head_softmax = UnaryBlock( + config.first_features_dim, self.C, False, 0, no_relu=True + ) ################ # Network Losses @@ -320,7 +316,6 @@ class KPFCNN(nn.Module): return def forward(self, batch, config): - # Get input features x = batch.features.clone().detach() @@ -351,7 +346,7 @@ class KPFCNN(nn.Module): """ # Set all ignored labels to -1 and correct the other label to be in [0, C-1] range - target = - torch.ones_like(labels) + target = -torch.ones_like(labels) for i, c in enumerate(self.valid_labels): target[labels == c] = i @@ -364,12 +359,12 @@ class KPFCNN(nn.Module): self.output_loss = self.criterion(outputs, target) # Regularization of deformable offsets - if self.deform_fitting_mode == 'point2point': + if self.deform_fitting_mode == "point2point": self.reg_loss = p2p_fitting_regularizer(self) - elif self.deform_fitting_mode == 'point2plane': - raise ValueError('point2plane fitting mode not implemented yet.') + elif self.deform_fitting_mode == "point2plane": + raise ValueError("point2plane fitting mode not implemented yet.") else: - raise ValueError('Unknown fitting mode: ' + self.deform_fitting_mode) + raise ValueError("Unknown fitting mode: " + self.deform_fitting_mode) # Combined loss return self.output_loss + self.reg_loss @@ -383,7 +378,7 @@ class KPFCNN(nn.Module): """ # Set all ignored labels to -1 and correct the other label to be in [0, C-1] range - target = - torch.ones_like(labels) + target = -torch.ones_like(labels) for i, c in enumerate(self.valid_labels): target[labels == c] = i @@ -392,24 +387,3 @@ class KPFCNN(nn.Module): correct = (predicted == target).sum().item() return correct / total - - - - - - - - - - - - - - - - - - - - - diff --git a/models/blocks.py b/models/blocks.py index 86b04a3..291582c 100644 --- a/models/blocks.py +++ b/models/blocks.py @@ -15,7 +15,6 @@ # -import time import math import torch import torch.nn as nn @@ -23,7 +22,6 @@ from torch.nn.parameter import Parameter from torch.nn.init import kaiming_uniform_ from kernels.kernel_points import load_kernels -from utils.ply import write_ply # ---------------------------------------------------------------------------------------------------------------------- # @@ -51,19 +49,19 @@ def gather(x, idx, method=2): return x.gather(0, idx) elif method == 2: for i, ni in enumerate(idx.size()[1:]): - x = x.unsqueeze(i+1) + x = x.unsqueeze(i + 1) new_s = list(x.size()) - new_s[i+1] = ni + new_s[i + 1] = ni x = x.expand(new_s) n = len(idx.size()) for i, di in enumerate(x.size()[n:]): - idx = idx.unsqueeze(i+n) + idx = idx.unsqueeze(i + n) new_s = list(idx.size()) - new_s[i+n] = di + new_s[i + n] = di idx = idx.expand(new_s) return x.gather(0, idx) else: - raise ValueError('Unkown method') + raise ValueError("Unkown method") def radius_gaussian(sq_r, sig, eps=1e-9): @@ -122,9 +120,8 @@ def global_average(x, batch_lengths): averaged_features = [] i0 = 0 for b_i, length in enumerate(batch_lengths): - # Average features for each batch cloud - averaged_features.append(torch.mean(x[i0:i0 + length], dim=0)) + averaged_features.append(torch.mean(x[i0 : i0 + length], dim=0)) # Increment for next cloud i0 += length @@ -141,10 +138,20 @@ def global_average(x, batch_lengths): class KPConv(nn.Module): - - def __init__(self, kernel_size, p_dim, in_channels, out_channels, KP_extent, radius, - fixed_kernel_points='center', KP_influence='linear', aggregation_mode='sum', - deformable=False, modulated=False): + def __init__( + self, + kernel_size, + p_dim, + in_channels, + out_channels, + KP_extent, + radius, + fixed_kernel_points="center", + KP_influence="linear", + aggregation_mode="sum", + deformable=False, + modulated=False, + ): """ Initialize parameters for KPConvDeformable. :param kernel_size: Number of kernel points. @@ -180,8 +187,10 @@ class KPConv(nn.Module): self.offset_features = None # Initialize weights - self.weights = Parameter(torch.zeros((self.K, in_channels, out_channels), dtype=torch.float32), - requires_grad=True) + self.weights = Parameter( + torch.zeros((self.K, in_channels, out_channels), dtype=torch.float32), + requires_grad=True, + ) # Initiate weights for offsets if deformable: @@ -189,16 +198,20 @@ class KPConv(nn.Module): self.offset_dim = (self.p_dim + 1) * self.K else: self.offset_dim = self.p_dim * self.K - self.offset_conv = KPConv(self.K, - self.p_dim, - self.in_channels, - self.offset_dim, - KP_extent, - radius, - fixed_kernel_points=fixed_kernel_points, - KP_influence=KP_influence, - aggregation_mode=aggregation_mode) - self.offset_bias = Parameter(torch.zeros(self.offset_dim, dtype=torch.float32), requires_grad=True) + self.offset_conv = KPConv( + self.K, + self.p_dim, + self.in_channels, + self.offset_dim, + KP_extent, + radius, + fixed_kernel_points=fixed_kernel_points, + KP_influence=KP_influence, + aggregation_mode=aggregation_mode, + ) + self.offset_bias = Parameter( + torch.zeros(self.offset_dim, dtype=torch.float32), requires_grad=True + ) else: self.offset_dim = None @@ -226,36 +239,36 @@ class KPConv(nn.Module): """ # Create one kernel disposition (as numpy array). Choose the KP distance to center thanks to the KP extent - K_points_numpy = load_kernels(self.radius, - self.K, - dimension=self.p_dim, - fixed=self.fixed_kernel_points) + K_points_numpy = load_kernels( + self.radius, self.K, dimension=self.p_dim, fixed=self.fixed_kernel_points + ) - return Parameter(torch.tensor(K_points_numpy, dtype=torch.float32), - requires_grad=False) + return Parameter( + torch.tensor(K_points_numpy, dtype=torch.float32), requires_grad=False + ) def forward(self, q_pts, s_pts, neighb_inds, x): - ################### # Offset generation ################### if self.deformable: - # Get offsets with a KPConv that only takes part of the features - self.offset_features = self.offset_conv(q_pts, s_pts, neighb_inds, x) + self.offset_bias + self.offset_features = ( + self.offset_conv(q_pts, s_pts, neighb_inds, x) + self.offset_bias + ) if self.modulated: - # Get offset (in normalized scale) from features - unscaled_offsets = self.offset_features[:, :self.p_dim * self.K] + unscaled_offsets = self.offset_features[:, : self.p_dim * self.K] unscaled_offsets = unscaled_offsets.view(-1, self.K, self.p_dim) # Get modulations - modulations = 2 * torch.sigmoid(self.offset_features[:, self.p_dim * self.K:]) + modulations = 2 * torch.sigmoid( + self.offset_features[:, self.p_dim * self.K :] + ) else: - # Get offset (in normalized scale) from features unscaled_offsets = self.offset_features.view(-1, self.K, self.p_dim) @@ -294,22 +307,25 @@ class KPConv(nn.Module): differences = neighbors - deformed_K_points # Get the square distances [n_points, n_neighbors, n_kpoints] - sq_distances = torch.sum(differences ** 2, dim=3) + sq_distances = torch.sum(differences**2, dim=3) # Optimization by ignoring points outside a deformed KP range if self.deformable: - # Save distances for loss self.min_d2, _ = torch.min(sq_distances, dim=1) # Boolean of the neighbors in range of a kernel point [n_points, n_neighbors] - in_range = torch.any(sq_distances < self.KP_extent ** 2, dim=2).type(torch.int32) + in_range = torch.any(sq_distances < self.KP_extent**2, dim=2).type( + torch.int32 + ) # New value of max neighbors new_max_neighb = torch.max(torch.sum(in_range, dim=1)) # For each row of neighbors, indices of the ones that are in range [n_points, new_max_neighb] - neighb_row_bool, neighb_row_inds = torch.topk(in_range, new_max_neighb.item(), dim=1) + neighb_row_bool, neighb_row_inds = torch.topk( + in_range, new_max_neighb.item(), dim=1 + ) # Gather new neighbor indices [n_points, new_max_neighb] new_neighb_inds = neighb_inds.gather(1, neighb_row_inds, sparse_grad=False) @@ -321,35 +337,41 @@ class KPConv(nn.Module): # New shadow neighbors have to point to the last shadow point new_neighb_inds *= neighb_row_bool - new_neighb_inds -= (neighb_row_bool.type(torch.int64) - 1) * int(s_pts.shape[0] - 1) + new_neighb_inds -= (neighb_row_bool.type(torch.int64) - 1) * int( + s_pts.shape[0] - 1 + ) else: new_neighb_inds = neighb_inds # Get Kernel point influences [n_points, n_kpoints, n_neighbors] - if self.KP_influence == 'constant': + if self.KP_influence == "constant": # Every point get an influence of 1. all_weights = torch.ones_like(sq_distances) all_weights = torch.transpose(all_weights, 1, 2) - elif self.KP_influence == 'linear': + elif self.KP_influence == "linear": # Influence decrease linearly with the distance, and get to zero when d = KP_extent. - all_weights = torch.clamp(1 - torch.sqrt(sq_distances) / self.KP_extent, min=0.0) + all_weights = torch.clamp( + 1 - torch.sqrt(sq_distances) / self.KP_extent, min=0.0 + ) all_weights = torch.transpose(all_weights, 1, 2) - elif self.KP_influence == 'gaussian': + elif self.KP_influence == "gaussian": # Influence in gaussian of the distance. sigma = self.KP_extent * 0.3 all_weights = radius_gaussian(sq_distances, sigma) all_weights = torch.transpose(all_weights, 1, 2) else: - raise ValueError('Unknown influence function type (config.KP_influence)') + raise ValueError("Unknown influence function type (config.KP_influence)") # In case of closest mode, only the closest KP can influence each point - if self.aggregation_mode == 'closest': + if self.aggregation_mode == "closest": neighbors_1nn = torch.argmin(sq_distances, dim=2) - all_weights *= torch.transpose(nn.functional.one_hot(neighbors_1nn, self.K), 1, 2) + all_weights *= torch.transpose( + nn.functional.one_hot(neighbors_1nn, self.K), 1, 2 + ) - elif self.aggregation_mode != 'sum': + elif self.aggregation_mode != "sum": raise ValueError("Unknown convolution mode. Should be 'closest' or 'sum'") # Add a zero feature for shadow neighbors @@ -373,9 +395,10 @@ class KPConv(nn.Module): return torch.sum(kernel_outputs, dim=0) def __repr__(self): - return 'KPConv(radius: {:.2f}, in_feat: {:d}, out_feat: {:d})'.format(self.radius, - self.in_channels, - self.out_channels) + return "KPConv(radius: {:.2f}, in_feat: {:d}, out_feat: {:d})".format( + self.radius, self.in_channels, self.out_channels + ) + # ---------------------------------------------------------------------------------------------------------------------- # @@ -383,51 +406,55 @@ class KPConv(nn.Module): # \********************/ # -def block_decider(block_name, - radius, - in_dim, - out_dim, - layer_ind, - config): - if block_name == 'unary': - return UnaryBlock(in_dim, out_dim, config.use_batch_norm, config.batch_norm_momentum) +def block_decider(block_name, radius, in_dim, out_dim, layer_ind, config): + if block_name == "unary": + return UnaryBlock( + in_dim, out_dim, config.use_batch_norm, config.batch_norm_momentum + ) - elif block_name in ['simple', - 'simple_deformable', - 'simple_invariant', - 'simple_equivariant', - 'simple_strided', - 'simple_deformable_strided', - 'simple_invariant_strided', - 'simple_equivariant_strided']: + elif block_name in [ + "simple", + "simple_deformable", + "simple_invariant", + "simple_equivariant", + "simple_strided", + "simple_deformable_strided", + "simple_invariant_strided", + "simple_equivariant_strided", + ]: return SimpleBlock(block_name, in_dim, out_dim, radius, layer_ind, config) - elif block_name in ['resnetb', - 'resnetb_invariant', - 'resnetb_equivariant', - 'resnetb_deformable', - 'resnetb_strided', - 'resnetb_deformable_strided', - 'resnetb_equivariant_strided', - 'resnetb_invariant_strided']: - return ResnetBottleneckBlock(block_name, in_dim, out_dim, radius, layer_ind, config) + elif block_name in [ + "resnetb", + "resnetb_invariant", + "resnetb_equivariant", + "resnetb_deformable", + "resnetb_strided", + "resnetb_deformable_strided", + "resnetb_equivariant_strided", + "resnetb_invariant_strided", + ]: + return ResnetBottleneckBlock( + block_name, in_dim, out_dim, radius, layer_ind, config + ) - elif block_name == 'max_pool' or block_name == 'max_pool_wide': + elif block_name == "max_pool" or block_name == "max_pool_wide": return MaxPoolBlock(layer_ind) - elif block_name == 'global_average': + elif block_name == "global_average": return GlobalAverageBlock() - elif block_name == 'nearest_upsample': + elif block_name == "nearest_upsample": return NearestUpsampleBlock(layer_ind) else: - raise ValueError('Unknown block name in the architecture definition : ' + block_name) + raise ValueError( + "Unknown block name in the architecture definition : " + block_name + ) class BatchNormBlock(nn.Module): - def __init__(self, in_dim, use_bn, bn_momentum): """ Initialize a batch normalization block. If network does not use batch normalization, replace with biases. @@ -441,9 +468,11 @@ class BatchNormBlock(nn.Module): self.in_dim = in_dim if self.use_bn: self.batch_norm = nn.BatchNorm1d(in_dim, momentum=bn_momentum) - #self.batch_norm = nn.InstanceNorm1d(in_dim, momentum=bn_momentum) + # self.batch_norm = nn.InstanceNorm1d(in_dim, momentum=bn_momentum) else: - self.bias = Parameter(torch.zeros(in_dim, dtype=torch.float32), requires_grad=True) + self.bias = Parameter( + torch.zeros(in_dim, dtype=torch.float32), requires_grad=True + ) return def reset_parameters(self): @@ -451,7 +480,6 @@ class BatchNormBlock(nn.Module): def forward(self, x): if self.use_bn: - x = x.unsqueeze(2) x = x.transpose(0, 2) x = self.batch_norm(x) @@ -461,13 +489,14 @@ class BatchNormBlock(nn.Module): return x + self.bias def __repr__(self): - return 'BatchNormBlock(in_feat: {:d}, momentum: {:.3f}, only_bias: {:s})'.format(self.in_dim, - self.bn_momentum, - str(not self.use_bn)) + return ( + "BatchNormBlock(in_feat: {:d}, momentum: {:.3f}, only_bias: {:s})".format( + self.in_dim, self.bn_momentum, str(not self.use_bn) + ) + ) class UnaryBlock(nn.Module): - def __init__(self, in_dim, out_dim, use_bn, bn_momentum, no_relu=False): """ Initialize a standard unary block with its ReLU and BatchNorm. @@ -497,14 +526,12 @@ class UnaryBlock(nn.Module): return x def __repr__(self): - return 'UnaryBlock(in_feat: {:d}, out_feat: {:d}, BN: {:s}, ReLU: {:s})'.format(self.in_dim, - self.out_dim, - str(self.use_bn), - str(not self.no_relu)) + return "UnaryBlock(in_feat: {:d}, out_feat: {:d}, BN: {:s}, ReLU: {:s})".format( + self.in_dim, self.out_dim, str(self.use_bn), str(not self.no_relu) + ) class SimpleBlock(nn.Module): - def __init__(self, block_name, in_dim, out_dim, radius, layer_ind, config): """ Initialize a simple convolution block with its ReLU and BatchNorm. @@ -527,17 +554,19 @@ class SimpleBlock(nn.Module): self.out_dim = out_dim # Define the KPConv class - self.KPConv = KPConv(config.num_kernel_points, - config.in_points_dim, - in_dim, - out_dim // 2, - current_extent, - radius, - fixed_kernel_points=config.fixed_kernel_points, - KP_influence=config.KP_influence, - aggregation_mode=config.aggregation_mode, - deformable='deform' in block_name, - modulated=config.modulated) + self.KPConv = KPConv( + config.num_kernel_points, + config.in_points_dim, + in_dim, + out_dim // 2, + current_extent, + radius, + fixed_kernel_points=config.fixed_kernel_points, + KP_influence=config.KP_influence, + aggregation_mode=config.aggregation_mode, + deformable="deform" in block_name, + modulated=config.modulated, + ) # Other opperations self.batch_norm = BatchNormBlock(out_dim // 2, self.use_bn, self.bn_momentum) @@ -546,8 +575,7 @@ class SimpleBlock(nn.Module): return def forward(self, x, batch): - - if 'strided' in self.block_name: + if "strided" in self.block_name: q_pts = batch.points[self.layer_ind + 1] s_pts = batch.points[self.layer_ind] neighb_inds = batch.pools[self.layer_ind] @@ -561,7 +589,6 @@ class SimpleBlock(nn.Module): class ResnetBottleneckBlock(nn.Module): - def __init__(self, block_name, in_dim, out_dim, radius, layer_ind, config): """ Initialize a resnet bottleneck block. @@ -585,30 +612,40 @@ class ResnetBottleneckBlock(nn.Module): # First downscaling mlp if in_dim != out_dim // 4: - self.unary1 = UnaryBlock(in_dim, out_dim // 4, self.use_bn, self.bn_momentum) + self.unary1 = UnaryBlock( + in_dim, out_dim // 4, self.use_bn, self.bn_momentum + ) else: self.unary1 = nn.Identity() # KPConv block - self.KPConv = KPConv(config.num_kernel_points, - config.in_points_dim, - out_dim // 4, - out_dim // 4, - current_extent, - radius, - fixed_kernel_points=config.fixed_kernel_points, - KP_influence=config.KP_influence, - aggregation_mode=config.aggregation_mode, - deformable='deform' in block_name, - modulated=config.modulated) - self.batch_norm_conv = BatchNormBlock(out_dim // 4, self.use_bn, self.bn_momentum) + self.KPConv = KPConv( + config.num_kernel_points, + config.in_points_dim, + out_dim // 4, + out_dim // 4, + current_extent, + radius, + fixed_kernel_points=config.fixed_kernel_points, + KP_influence=config.KP_influence, + aggregation_mode=config.aggregation_mode, + deformable="deform" in block_name, + modulated=config.modulated, + ) + self.batch_norm_conv = BatchNormBlock( + out_dim // 4, self.use_bn, self.bn_momentum + ) # Second upscaling mlp - self.unary2 = UnaryBlock(out_dim // 4, out_dim, self.use_bn, self.bn_momentum, no_relu=True) + self.unary2 = UnaryBlock( + out_dim // 4, out_dim, self.use_bn, self.bn_momentum, no_relu=True + ) # Shortcut optional mpl if in_dim != out_dim: - self.unary_shortcut = UnaryBlock(in_dim, out_dim, self.use_bn, self.bn_momentum, no_relu=True) + self.unary_shortcut = UnaryBlock( + in_dim, out_dim, self.use_bn, self.bn_momentum, no_relu=True + ) else: self.unary_shortcut = nn.Identity() @@ -618,8 +655,7 @@ class ResnetBottleneckBlock(nn.Module): return def forward(self, features, batch): - - if 'strided' in self.block_name: + if "strided" in self.block_name: q_pts = batch.points[self.layer_ind + 1] s_pts = batch.points[self.layer_ind] neighb_inds = batch.pools[self.layer_ind] @@ -639,7 +675,7 @@ class ResnetBottleneckBlock(nn.Module): x = self.unary2(x) # Shortcut - if 'strided' in self.block_name: + if "strided" in self.block_name: shortcut = max_pool(features, neighb_inds) else: shortcut = features @@ -649,7 +685,6 @@ class ResnetBottleneckBlock(nn.Module): class GlobalAverageBlock(nn.Module): - def __init__(self): """ Initialize a global average block with its ReLU and BatchNorm. @@ -662,7 +697,6 @@ class GlobalAverageBlock(nn.Module): class NearestUpsampleBlock(nn.Module): - def __init__(self, layer_ind): """ Initialize a nearest upsampling block with its ReLU and BatchNorm. @@ -675,12 +709,12 @@ class NearestUpsampleBlock(nn.Module): return closest_pool(x, batch.upsamples[self.layer_ind - 1]) def __repr__(self): - return 'NearestUpsampleBlock(layer: {:d} -> {:d})'.format(self.layer_ind, - self.layer_ind - 1) + return "NearestUpsampleBlock(layer: {:d} -> {:d})".format( + self.layer_ind, self.layer_ind - 1 + ) class MaxPoolBlock(nn.Module): - def __init__(self, layer_ind): """ Initialize a max pooling block with its ReLU and BatchNorm. @@ -691,4 +725,3 @@ class MaxPoolBlock(nn.Module): def forward(self, x, batch): return max_pool(x, batch.pools[self.layer_ind + 1]) - diff --git a/plot_convergence.py b/plot_convergence.py index ee27e7f..f8f40b7 100644 --- a/plot_convergence.py +++ b/plot_convergence.py @@ -22,14 +22,11 @@ # # Common libs -import os import torch import numpy as np import matplotlib.pyplot as plt from os.path import isfile, join, exists -from os import listdir, remove, getcwd -from sklearn.metrics import confusion_matrix -import time +from os import listdir, remove # My libs from utils.config import Config @@ -37,7 +34,6 @@ from utils.metrics import IoU_from_confusions, smooth_metrics, fast_confusion from utils.ply import read_ply # Datasets -from datasetss.ModelNet40 import ModelNet40Dataset from datasetss.S3DIS import S3DISDataset from datasetss.SemanticKitti import SemanticKittiDataset @@ -47,8 +43,8 @@ from datasetss.SemanticKitti import SemanticKittiDataset # \***********************/ # -def listdir_str(path): +def listdir_str(path): # listdir can return binary string instead od decoded string sometimes. # This function ensures a steady behavior @@ -63,41 +59,39 @@ def listdir_str(path): return f_list - def running_mean(signal, n, axis=0, stride=1): signal = np.array(signal) - torch_conv = torch.nn.Conv1d(1, 1, kernel_size=2*n+1, stride=stride, bias=False) + torch_conv = torch.nn.Conv1d(1, 1, kernel_size=2 * n + 1, stride=stride, bias=False) torch_conv.weight.requires_grad_(False) torch_conv.weight *= 0 - torch_conv.weight += 1 / (2*n+1) + torch_conv.weight += 1 / (2 * n + 1) if signal.ndim == 1: torch_signal = torch.from_numpy(signal.reshape([1, 1, -1]).astype(np.float32)) return torch_conv(torch_signal).squeeze().numpy() elif signal.ndim == 2: - print('TODO implement with torch and stride here') + print("TODO implement with torch and stride here") smoothed = np.empty(signal.shape) if axis == 0: for i, sig in enumerate(signal): - sig_sum = np.convolve(sig, np.ones((2*n+1,)), mode='same') - sig_num = np.convolve(sig*0+1, np.ones((2*n+1,)), mode='same') + sig_sum = np.convolve(sig, np.ones((2 * n + 1,)), mode="same") + sig_num = np.convolve(sig * 0 + 1, np.ones((2 * n + 1,)), mode="same") smoothed[i, :] = sig_sum / sig_num elif axis == 1: for i, sig in enumerate(signal.T): - sig_sum = np.convolve(sig, np.ones((2*n+1,)), mode='same') - sig_num = np.convolve(sig*0+1, np.ones((2*n+1,)), mode='same') + sig_sum = np.convolve(sig, np.ones((2 * n + 1,)), mode="same") + sig_num = np.convolve(sig * 0 + 1, np.ones((2 * n + 1,)), mode="same") smoothed[:, i] = sig_sum / sig_num else: - print('wrong axis') + print("wrong axis") return smoothed else: - print('wrong dimensions') + print("wrong dimensions") return None def IoU_class_metrics(all_IoUs, smooth_n): - # Get mean IoU per class for consecutive epochs to directly get a mean without further smoothing smoothed_IoUs = [] for epoch in range(len(all_IoUs)): @@ -111,8 +105,7 @@ def IoU_class_metrics(all_IoUs, smooth_n): def load_confusions(filename, n_class): - - with open(filename, 'r') as f: + with open(filename, "r") as f: lines = f.readlines() confs = np.zeros((len(lines), n_class, n_class)) @@ -124,9 +117,8 @@ def load_confusions(filename, n_class): def load_training_results(path): - - filename = join(path, 'training.txt') - with open(filename, 'r') as f: + filename = join(path, "training.txt") + with open(filename, "r") as f: lines = f.readlines() epochs = [] @@ -137,7 +129,7 @@ def load_training_results(path): t = [] for line in lines[1:]: line_info = line.split() - if (len(line) > 0): + if len(line) > 0: epochs += [int(line_info[0])] steps += [int(line_info[1])] L_out += [float(line_info[2])] @@ -151,8 +143,7 @@ def load_training_results(path): def load_single_IoU(filename, n_parts): - - with open(filename, 'r') as f: + with open(filename, "r") as f: lines = f.readlines() # Load all IoUs @@ -163,37 +154,42 @@ def load_single_IoU(filename, n_parts): def load_snap_clouds(path, dataset, only_last=False): - - cloud_folders = np.array([join(path, f) for f in listdir_str(path) if f.startswith('val_preds')]) - cloud_epochs = np.array([int(f.split('_')[-1]) for f in cloud_folders]) + cloud_folders = np.array( + [join(path, f) for f in listdir_str(path) if f.startswith("val_preds")] + ) + cloud_epochs = np.array([int(f.split("_")[-1]) for f in cloud_folders]) epoch_order = np.argsort(cloud_epochs) cloud_epochs = cloud_epochs[epoch_order] cloud_folders = cloud_folders[epoch_order] - Confs = np.zeros((len(cloud_epochs), dataset.num_classes, dataset.num_classes), dtype=np.int32) + Confs = np.zeros( + (len(cloud_epochs), dataset.num_classes, dataset.num_classes), dtype=np.int32 + ) for c_i, cloud_folder in enumerate(cloud_folders): if only_last and c_i < len(cloud_epochs) - 1: continue # Load confusion if previously saved - conf_file = join(cloud_folder, 'conf.txt') + conf_file = join(cloud_folder, "conf.txt") if isfile(conf_file): Confs[c_i] += np.loadtxt(conf_file, dtype=np.int32) else: for f in listdir_str(cloud_folder): - if f.endswith('.ply') and not f.endswith('sub.ply'): + if f.endswith(".ply") and not f.endswith("sub.ply"): data = read_ply(join(cloud_folder, f)) - labels = data['class'] - preds = data['preds'] - Confs[c_i] += fast_confusion(labels, preds, dataset.label_values).astype(np.int32) + labels = data["class"] + preds = data["preds"] + Confs[c_i] += fast_confusion( + labels, preds, dataset.label_values + ).astype(np.int32) - np.savetxt(conf_file, Confs[c_i], '%12d') + np.savetxt(conf_file, Confs[c_i], "%12d") # Erase ply to save disk memory if c_i < len(cloud_folders) - 1: for f in listdir_str(cloud_folder): - if f.endswith('.ply'): + if f.endswith(".ply"): remove(join(cloud_folder, f)) # Remove ignored labels from confusions @@ -213,7 +209,6 @@ def load_snap_clouds(path, dataset, only_last=False): def compare_trainings(list_of_paths, list_of_labels=None): - # Parameters # ********** @@ -231,13 +226,13 @@ def compare_trainings(list_of_paths, list_of_labels=None): all_loss = [] all_lr = [] all_times = [] - all_RAMs = [] for path in list_of_paths: - print(path) - if ('val_IoUs.txt' in [f for f in listdir_str(path)]) or ('val_confs.txt' in [f for f in listdir_str(path)]): + if ("val_IoUs.txt" in [f for f in listdir_str(path)]) or ( + "val_confs.txt" in [f for f in listdir_str(path)] + ): config = Config() config.load(path) else: @@ -278,59 +273,58 @@ def compare_trainings(list_of_paths, list_of_labels=None): # Plots learning rate # ******************* - if plot_lr: # Figure - fig = plt.figure('lr') + fig = plt.figure("lr") for i, label in enumerate(list_of_labels): plt.plot(all_epochs[i], all_lr[i], linewidth=1, label=label) # Set names for axes - plt.xlabel('epochs') - plt.ylabel('lr') - plt.yscale('log') + plt.xlabel("epochs") + plt.ylabel("lr") + plt.yscale("log") # Display legends and title plt.legend(loc=1) # Customize the graph ax = fig.gca() - ax.grid(linestyle='-.', which='both') + ax.grid(linestyle="-.", which="both") # ax.set_yticks(np.arange(0.8, 1.02, 0.02)) # Plots loss # ********** # Figure - fig = plt.figure('loss') + fig = plt.figure("loss") for i, label in enumerate(list_of_labels): plt.plot(all_epochs[i], all_loss[i], linewidth=1, label=label) # Set names for axes - plt.xlabel('epochs') - plt.ylabel('loss') - plt.yscale('log') + plt.xlabel("epochs") + plt.ylabel("loss") + plt.yscale("log") # Display legends and title plt.legend(loc=1) - plt.title('Losses compare') + plt.title("Losses compare") # Customize the graph ax = fig.gca() - ax.grid(linestyle='-.', which='both') + ax.grid(linestyle="-.", which="both") # ax.set_yticks(np.arange(0.8, 1.02, 0.02)) # Plot Times # ********** # Figure - fig = plt.figure('time') + fig = plt.figure("time") for i, label in enumerate(list_of_labels): plt.plot(all_epochs[i], np.array(all_times[i]) / 3600, linewidth=1, label=label) # Set names for axes - plt.xlabel('epochs') - plt.ylabel('time') + plt.xlabel("epochs") + plt.ylabel("time") # plt.yscale('log') # Display legends and title @@ -338,7 +332,7 @@ def compare_trainings(list_of_paths, list_of_labels=None): # Customize the graph ax = fig.gca() - ax.grid(linestyle='-.', which='both') + ax.grid(linestyle="-.", which="both") # ax.set_yticks(np.arange(0.8, 1.02, 0.02)) # Show all @@ -346,7 +340,6 @@ def compare_trainings(list_of_paths, list_of_labels=None): def compare_convergences_segment(dataset, list_of_paths, list_of_names=None): - # Parameters # ********** @@ -368,18 +361,20 @@ def compare_convergences_segment(dataset, list_of_paths, list_of_names=None): config = Config() config.load(list_of_paths[0]) - class_list = [dataset.label_to_names[label] for label in dataset.label_values - if label not in dataset.ignored_labels] + class_list = [ + dataset.label_to_names[label] + for label in dataset.label_values + if label not in dataset.ignored_labels + ] - s = '{:^10}|'.format('mean') + s = "{:^10}|".format("mean") for c in class_list: - s += '{:^10}'.format(c) + s += "{:^10}".format(c) print(s) - print(10*'-' + '|' + 10*config.num_classes*'-') + print(10 * "-" + "|" + 10 * config.num_classes * "-") for path in list_of_paths: - # Get validation IoUs - file = join(path, 'val_IoUs.txt') + file = join(path, "val_IoUs.txt") val_IoUs = load_single_IoU(file, config.num_classes) # Get mean IoU @@ -390,9 +385,9 @@ def compare_convergences_segment(dataset, list_of_paths, list_of_names=None): all_mIoUs += [mIoUs] all_class_IoUs += [class_IoUs] - s = '{:^10.1f}|'.format(100*mIoUs[-1]) + s = "{:^10.1f}|".format(100 * mIoUs[-1]) for IoU in class_IoUs[-1]: - s += '{:^10.1f}'.format(100*IoU) + s += "{:^10.1f}".format(100 * IoU) print(s) # Get optional full validation on clouds @@ -400,73 +395,80 @@ def compare_convergences_segment(dataset, list_of_paths, list_of_names=None): all_snap_epochs += [snap_epochs] all_snap_IoUs += [snap_IoUs] - print(10*'-' + '|' + 10*config.num_classes*'-') + print(10 * "-" + "|" + 10 * config.num_classes * "-") for snap_IoUs in all_snap_IoUs: if len(snap_IoUs) > 0: - s = '{:^10.1f}|'.format(100*np.mean(snap_IoUs[-1])) + s = "{:^10.1f}|".format(100 * np.mean(snap_IoUs[-1])) for IoU in snap_IoUs[-1]: - s += '{:^10.1f}'.format(100*IoU) + s += "{:^10.1f}".format(100 * IoU) else: - s = '{:^10s}'.format('-') + s = "{:^10s}".format("-") for _ in range(config.num_classes): - s += '{:^10s}'.format('-') + s += "{:^10s}".format("-") print(s) # Plots # ***** # Figure - fig = plt.figure('mIoUs') + fig = plt.figure("mIoUs") for i, name in enumerate(list_of_names): - p = plt.plot(all_pred_epochs[i], all_mIoUs[i], '--', linewidth=1, label=name) - plt.plot(all_snap_epochs[i], np.mean(all_snap_IoUs[i], axis=1), linewidth=1, color=p[-1].get_color()) - plt.xlabel('epochs') - plt.ylabel('IoU') + p = plt.plot(all_pred_epochs[i], all_mIoUs[i], "--", linewidth=1, label=name) + plt.plot( + all_snap_epochs[i], + np.mean(all_snap_IoUs[i], axis=1), + linewidth=1, + color=p[-1].get_color(), + ) + plt.xlabel("epochs") + plt.ylabel("IoU") # Set limits for y axis - #plt.ylim(0.55, 0.95) + # plt.ylim(0.55, 0.95) # Display legends and title plt.legend(loc=4) # Customize the graph ax = fig.gca() - ax.grid(linestyle='-.', which='both') - #ax.set_yticks(np.arange(0.8, 1.02, 0.02)) + ax.grid(linestyle="-.", which="both") + # ax.set_yticks(np.arange(0.8, 1.02, 0.02)) displayed_classes = [0, 1, 2, 3, 4, 5, 6, 7] displayed_classes = [] for c_i, c_name in enumerate(class_list): if c_i in displayed_classes: - # Figure - fig = plt.figure(c_name + ' IoU') + fig = plt.figure(c_name + " IoU") for i, name in enumerate(list_of_names): - plt.plot(all_pred_epochs[i], all_class_IoUs[i][:, c_i], linewidth=1, label=name) - plt.xlabel('epochs') - plt.ylabel('IoU') + plt.plot( + all_pred_epochs[i], + all_class_IoUs[i][:, c_i], + linewidth=1, + label=name, + ) + plt.xlabel("epochs") + plt.ylabel("IoU") # Set limits for y axis - #plt.ylim(0.8, 1) + # plt.ylim(0.8, 1) # Display legends and title plt.legend(loc=4) # Customize the graph ax = fig.gca() - ax.grid(linestyle='-.', which='both') - #ax.set_yticks(np.arange(0.8, 1.02, 0.02)) + ax.grid(linestyle="-.", which="both") + # ax.set_yticks(np.arange(0.8, 1.02, 0.02)) # Show all plt.show() def compare_convergences_classif(list_of_paths, list_of_labels=None): - # Parameters # ********** - steps_per_epoch = 0 smooth_n = 12 if list_of_labels is None: @@ -477,13 +479,10 @@ def compare_convergences_classif(list_of_paths, list_of_labels=None): all_pred_epochs = [] all_val_OA = [] - all_train_OA = [] all_vote_OA = [] all_vote_confs = [] - for path in list_of_paths: - # Load parameters config = Config() config.load(list_of_paths[0]) @@ -496,21 +495,31 @@ def compare_convergences_classif(list_of_paths, list_of_labels=None): first_e = np.min(epochs) # Get validation confusions - file = join(path, 'val_confs.txt') + file = join(path, "val_confs.txt") val_C1 = load_confusions(file, n_class) - val_PRE, val_REC, val_F1, val_IoU, val_ACC = smooth_metrics(val_C1, smooth_n=smooth_n) + val_PRE, val_REC, val_F1, val_IoU, val_ACC = smooth_metrics( + val_C1, smooth_n=smooth_n + ) # Get vote confusions - file = join(path, 'vote_confs.txt') + file = join(path, "vote_confs.txt") if exists(file): vote_C2 = load_confusions(file, n_class) - vote_PRE, vote_REC, vote_F1, vote_IoU, vote_ACC = smooth_metrics(vote_C2, smooth_n=2) + vote_PRE, vote_REC, vote_F1, vote_IoU, vote_ACC = smooth_metrics( + vote_C2, smooth_n=2 + ) else: vote_C2 = val_C1 - vote_PRE, vote_REC, vote_F1, vote_IoU, vote_ACC = (val_PRE, val_REC, val_F1, val_IoU, val_ACC) + vote_PRE, vote_REC, vote_F1, vote_IoU, vote_ACC = ( + val_PRE, + val_REC, + val_F1, + val_IoU, + val_ACC, + ) # Aggregate results - all_pred_epochs += [np.array([i+first_e for i in range(len(val_ACC))])] + all_pred_epochs += [np.array([i + first_e for i in range(len(val_ACC))])] all_val_OA += [val_ACC] all_vote_OA += [vote_ACC] all_vote_confs += [vote_C2] @@ -521,12 +530,15 @@ def compare_convergences_classif(list_of_paths, list_of_labels=None): # *********** for i, label in enumerate(list_of_labels): - - print('\n' + label + '\n' + '*' * len(label) + '\n') + print("\n" + label + "\n" + "*" * len(label) + "\n") print(list_of_paths[i]) best_epoch = np.argmax(all_vote_OA[i]) - print('Best Accuracy : {:.1f} % (epoch {:d})'.format(100 * all_vote_OA[i][best_epoch], best_epoch)) + print( + "Best Accuracy : {:.1f} % (epoch {:d})".format( + 100 * all_vote_OA[i][best_epoch], best_epoch + ) + ) confs = all_vote_confs[i] @@ -544,32 +556,31 @@ def compare_convergences_classif(list_of_paths, list_of_labels=None): diags = np.diagonal(class_avg_confs, axis1=-2, axis2=-1) class_avg_ACC = np.sum(diags, axis=-1) / np.sum(class_avg_confs, axis=(-1, -2)) - print('Corresponding mAcc : {:.1f} %'.format(100 * class_avg_ACC[best_epoch])) + print("Corresponding mAcc : {:.1f} %".format(100 * class_avg_ACC[best_epoch])) # Plots # ***** - for fig_name, OA in zip(['Validation', 'Vote'], [all_val_OA, all_vote_OA]): - + for fig_name, OA in zip(["Validation", "Vote"], [all_val_OA, all_vote_OA]): # Figure fig = plt.figure(fig_name) for i, label in enumerate(list_of_labels): plt.plot(all_pred_epochs[i], OA[i], linewidth=1, label=label) - plt.xlabel('epochs') - plt.ylabel(fig_name + ' Accuracy') + plt.xlabel("epochs") + plt.ylabel(fig_name + " Accuracy") # Set limits for y axis - #plt.ylim(0.55, 0.95) + # plt.ylim(0.55, 0.95) # Display legends and title plt.legend(loc=4) # Customize the graph ax = fig.gca() - ax.grid(linestyle='-.', which='both') - #ax.set_yticks(np.arange(0.8, 1.02, 0.02)) + ax.grid(linestyle="-.", which="both") + # ax.set_yticks(np.arange(0.8, 1.02, 0.02)) - #for i, label in enumerate(list_of_labels): + # for i, label in enumerate(list_of_labels): # print(label, np.max(all_train_OA[i]), np.max(all_val_OA[i])) # Show all @@ -577,7 +588,6 @@ def compare_convergences_classif(list_of_paths, list_of_labels=None): def compare_convergences_SLAM(dataset, list_of_paths, list_of_names=None): - # Parameters # ********** @@ -599,23 +609,25 @@ def compare_convergences_SLAM(dataset, list_of_paths, list_of_names=None): config = Config() config.load(list_of_paths[0]) - class_list = [dataset.label_to_names[label] for label in dataset.label_values - if label not in dataset.ignored_labels] + class_list = [ + dataset.label_to_names[label] + for label in dataset.label_values + if label not in dataset.ignored_labels + ] - s = '{:^6}|'.format('mean') + s = "{:^6}|".format("mean") for c in class_list: - s += '{:^6}'.format(c[:4]) + s += "{:^6}".format(c[:4]) print(s) - print(6*'-' + '|' + 6*config.num_classes*'-') + print(6 * "-" + "|" + 6 * config.num_classes * "-") for path in list_of_paths: - # Get validation IoUs nc_model = dataset.num_classes - len(dataset.ignored_labels) - file = join(path, 'val_IoUs.txt') + file = join(path, "val_IoUs.txt") val_IoUs = load_single_IoU(file, nc_model) # Get Subpart IoUs - file = join(path, 'subpart_IoUs.txt') + file = join(path, "subpart_IoUs.txt") subpart_IoUs = load_single_IoU(file, nc_model) # Get mean IoU @@ -629,69 +641,75 @@ def compare_convergences_SLAM(dataset, list_of_paths, list_of_names=None): all_subpart_mIoUs += [subpart_mIoUs] all_subpart_class_IoUs += [subpart_class_IoUs] - s = '{:^6.1f}|'.format(100*subpart_mIoUs[-1]) + s = "{:^6.1f}|".format(100 * subpart_mIoUs[-1]) for IoU in subpart_class_IoUs[-1]: - s += '{:^6.1f}'.format(100*IoU) + s += "{:^6.1f}".format(100 * IoU) print(s) - print(6*'-' + '|' + 6*config.num_classes*'-') + print(6 * "-" + "|" + 6 * config.num_classes * "-") for snap_IoUs in all_val_class_IoUs: if len(snap_IoUs) > 0: - s = '{:^6.1f}|'.format(100*np.mean(snap_IoUs[-1])) + s = "{:^6.1f}|".format(100 * np.mean(snap_IoUs[-1])) for IoU in snap_IoUs[-1]: - s += '{:^6.1f}'.format(100*IoU) + s += "{:^6.1f}".format(100 * IoU) else: - s = '{:^6s}'.format('-') + s = "{:^6s}".format("-") for _ in range(config.num_classes): - s += '{:^6s}'.format('-') + s += "{:^6s}".format("-") print(s) # Plots # ***** # Figure - fig = plt.figure('mIoUs') + fig = plt.figure("mIoUs") for i, name in enumerate(list_of_names): - p = plt.plot(all_pred_epochs[i], all_subpart_mIoUs[i], '--', linewidth=1, label=name) - plt.plot(all_pred_epochs[i], all_val_mIoUs[i], linewidth=1, color=p[-1].get_color()) - plt.xlabel('epochs') - plt.ylabel('IoU') + p = plt.plot( + all_pred_epochs[i], all_subpart_mIoUs[i], "--", linewidth=1, label=name + ) + plt.plot( + all_pred_epochs[i], all_val_mIoUs[i], linewidth=1, color=p[-1].get_color() + ) + plt.xlabel("epochs") + plt.ylabel("IoU") # Set limits for y axis - #plt.ylim(0.55, 0.95) + # plt.ylim(0.55, 0.95) # Display legends and title plt.legend(loc=4) # Customize the graph ax = fig.gca() - ax.grid(linestyle='-.', which='both') - #ax.set_yticks(np.arange(0.8, 1.02, 0.02)) + ax.grid(linestyle="-.", which="both") + # ax.set_yticks(np.arange(0.8, 1.02, 0.02)) displayed_classes = [0, 1, 2, 3, 4, 5, 6, 7] - #displayed_classes = [] + # displayed_classes = [] for c_i, c_name in enumerate(class_list): if c_i in displayed_classes: - # Figure - fig = plt.figure(c_name + ' IoU') + fig = plt.figure(c_name + " IoU") for i, name in enumerate(list_of_names): - plt.plot(all_pred_epochs[i], all_val_class_IoUs[i][:, c_i], linewidth=1, label=name) - plt.xlabel('epochs') - plt.ylabel('IoU') + plt.plot( + all_pred_epochs[i], + all_val_class_IoUs[i][:, c_i], + linewidth=1, + label=name, + ) + plt.xlabel("epochs") + plt.ylabel("IoU") # Set limits for y axis - #plt.ylim(0.8, 1) + # plt.ylim(0.8, 1) # Display legends and title plt.legend(loc=4) # Customize the graph ax = fig.gca() - ax.grid(linestyle='-.', which='both') - #ax.set_yticks(np.arange(0.8, 1.02, 0.02)) - - + ax.grid(linestyle="-.", which="both") + # ax.set_yticks(np.arange(0.8, 1.02, 0.02)) # Show all plt.show() @@ -713,23 +731,22 @@ def experiment_name_1(): """ # Using the dates of the logs, you can easily gather consecutive ones. All logs should be of the same dataset. - start = 'Log_2020-04-22_11-52-58' - end = 'Log_2023-07-29_12-40-27' + start = "Log_2020-04-22_11-52-58" + end = "Log_2023-07-29_12-40-27" # Name of the result path - res_path = 'results' + res_path = "results" # Gather logs and sort by date - logs = np.sort([join(res_path, l) for l in listdir_str(res_path) if start <= l <= end]) + logs = np.sort( + [join(res_path, l) for l in listdir_str(res_path) if start <= l <= end] + ) # Give names to the logs (for plot legends) - logs_names = ['name_log_1', - 'name_log_2', - 'name_log_3', - 'name_log_4'] + logs_names = ["name_log_1", "name_log_2", "name_log_3", "name_log_4"] # safe check log names - logs_names = np.array(logs_names[:len(logs)]) + logs_names = np.array(logs_names[: len(logs)]) return logs, logs_names @@ -743,27 +760,26 @@ def experiment_name_2(): """ # Using the dates of the logs, you can easily gather consecutive ones. All logs should be of the same dataset. - start = 'Log_2020-04-22_11-52-58' - end = 'Log_2020-05-22_11-52-58' + start = "Log_2020-04-22_11-52-58" + end = "Log_2020-05-22_11-52-58" # Name of the result path - res_path = 'results' + res_path = "results" # Gather logs and sort by date - logs = np.sort([join(res_path, l) for l in listdir_str(res_path) if start <= l <= end]) + logs = np.sort( + [join(res_path, l) for l in listdir_str(res_path) if start <= l <= end] + ) # Optionally add a specific log at a specific place in the log list - logs = logs.astype(' 'last_XXX': Automatically retrieve the last trained model on dataset XXX # > '(old_)results/Log_YYYY-MM-DD_HH-MM-SS': Directly provide the path of a trained model - chosen_log = 'results/Light_KPFCNN' + chosen_log = "results/Light_KPFCNN" # Choose the index of the checkpoint to load OR None if you want to load the current checkpoint chkp_idx = -1 @@ -111,25 +112,25 @@ if __name__ == '__main__': ############################ # Set which gpu is going to be used - GPU_ID = '0' + GPU_ID = "0" # Set GPU visible device - os.environ['CUDA_VISIBLE_DEVICES'] = GPU_ID + os.environ["CUDA_VISIBLE_DEVICES"] = GPU_ID ############### # Previous chkp ############### # Find all checkpoints in the chosen training folder - chkp_path = os.path.join(chosen_log, 'checkpoints') - chkps = [f for f in os.listdir(chkp_path) if f[:4] == 'chkp'] + chkp_path = os.path.join(chosen_log, "checkpoints") + chkps = [f for f in os.listdir(chkp_path) if f[:4] == "chkp"] # Find which snapshot to restore if chkp_idx is None: - chosen_chkp = 'current_chkp.tar' + chosen_chkp = "current_chkp.tar" else: chosen_chkp = np.sort(chkps)[chkp_idx] - chosen_chkp = os.path.join(chosen_log, 'checkpoints', chosen_chkp) + chosen_chkp = os.path.join(chosen_log, "checkpoints", chosen_chkp) # Initialize configuration class config = Config() @@ -141,10 +142,10 @@ if __name__ == '__main__': # Change parameters for the test here. For example, you can stop augmenting the input data. - #config.augment_noise = 0.0001 - #config.augment_symmetries = False - #config.batch_num = 3 - #config.in_radius = 4 + # config.augment_noise = 0.0001 + # config.augment_symmetries = False + # config.batch_num = 3 + # config.in_radius = 4 config.validation_size = 200 config.input_threads = 10 @@ -153,67 +154,69 @@ if __name__ == '__main__': ############## print() - print('Data Preparation') - print('****************') + print("Data Preparation") + print("****************") print(config.dataset) if on_val: - set = 'validation' + set = "validation" else: - set = 'test' + set = "test" # Initiate dataset - if config.dataset == 'ModelNet40': + if config.dataset == "ModelNet40": test_dataset = ModelNet40Dataset(config, train=False) test_sampler = ModelNet40Sampler(test_dataset) collate_fn = ModelNet40Collate - elif config.dataset == 'S3DIS': - test_dataset = S3DISDataset(config, set='validation', use_potentials=True) + elif config.dataset == "S3DIS": + test_dataset = S3DISDataset(config, set="validation", use_potentials=True) test_sampler = S3DISSampler(test_dataset) collate_fn = S3DISCollate - elif config.dataset == 'SemanticKitti': + elif config.dataset == "SemanticKitti": test_dataset = SemanticKittiDataset(config, set=set, balance_classes=False) test_sampler = SemanticKittiSampler(test_dataset) collate_fn = SemanticKittiCollate else: - raise ValueError('Unsupported dataset : ' + config.dataset) + raise ValueError("Unsupported dataset : " + config.dataset) # Data loader - test_loader = DataLoader(test_dataset, - batch_size=1, - sampler=test_sampler, - collate_fn=collate_fn, - num_workers=config.input_threads, - pin_memory=True) + test_loader = DataLoader( + test_dataset, + batch_size=1, + sampler=test_sampler, + collate_fn=collate_fn, + num_workers=config.input_threads, + pin_memory=True, + ) # Calibrate samplers test_sampler.calibration(test_loader, verbose=True) - print('\nModel Preparation') - print('*****************') + print("\nModel Preparation") + print("*****************") # Define network model t1 = time.time() - if config.dataset_task == 'classification': + if config.dataset_task == "classification": net = KPCNN(config) - elif config.dataset_task in ['cloud_segmentation', 'slam_segmentation']: + elif config.dataset_task in ["cloud_segmentation", "slam_segmentation"]: net = KPFCNN(config, test_dataset.label_values, test_dataset.ignored_labels) else: - raise ValueError('Unsupported dataset_task for testing: ' + config.dataset_task) + raise ValueError("Unsupported dataset_task for testing: " + config.dataset_task) # Define a visualizer class tester = ModelTester(net, chkp_path=chosen_chkp) - print('Done in {:.1f}s\n'.format(time.time() - t1)) + print("Done in {:.1f}s\n".format(time.time() - t1)) - print('\nStart test') - print('**********\n') + print("\nStart test") + print("**********\n") # Training - if config.dataset_task == 'classification': + if config.dataset_task == "classification": tester.classification_test(net, test_loader, config) - elif config.dataset_task == 'cloud_segmentation': + elif config.dataset_task == "cloud_segmentation": tester.cloud_segmentation_test(net, test_loader, config) - elif config.dataset_task == 'slam_segmentation': + elif config.dataset_task == "slam_segmentation": tester.slam_segmentation_test(net, test_loader, config) else: - raise ValueError('Unsupported dataset_task for testing: ' + config.dataset_task) \ No newline at end of file + raise ValueError("Unsupported dataset_task for testing: " + config.dataset_task) diff --git a/train_ModelNet40.py b/train_ModelNet40.py index a00aad7..f6f3c21 100644 --- a/train_ModelNet40.py +++ b/train_ModelNet40.py @@ -26,7 +26,6 @@ import signal import os import numpy as np import sys -import torch # Dataset from datasetss.ModelNet40 import * @@ -43,6 +42,7 @@ from models.architectures import KPCNN # \******************/ # + class Modelnet40Config(Config): """ Override the parameters you want to modify for this dataset @@ -53,13 +53,13 @@ class Modelnet40Config(Config): #################### # Dataset name - dataset = 'ModelNet40' + dataset = "ModelNet40" # Number of classes in the dataset (This value is overwritten by dataset class when Initializating dataset). num_classes = None # Type of task performed on this dataset (also overwritten) - dataset_task = '' + dataset_task = "" # Number of CPU threads for the input pipeline input_threads = 10 @@ -69,21 +69,23 @@ class Modelnet40Config(Config): ######################### # Define layers - architecture = ['simple', - 'resnetb', - 'resnetb_strided', - 'resnetb', - 'resnetb', - 'resnetb_strided', - 'resnetb', - 'resnetb', - 'resnetb_strided', - 'resnetb', - 'resnetb', - 'resnetb_strided', - 'resnetb', - 'resnetb', - 'global_average'] + architecture = [ + "simple", + "resnetb", + "resnetb_strided", + "resnetb", + "resnetb", + "resnetb_strided", + "resnetb", + "resnetb", + "resnetb_strided", + "resnetb", + "resnetb", + "resnetb_strided", + "resnetb", + "resnetb", + "global_average", + ] ################### # KPConv parameters @@ -105,10 +107,10 @@ class Modelnet40Config(Config): KP_extent = 1.2 # Behavior of convolutions in ('constant', 'linear', 'gaussian') - KP_influence = 'linear' + KP_influence = "linear" # Aggregation function of KPConv in ('closest', 'sum') - aggregation_mode = 'sum' + aggregation_mode = "sum" # Choice of input features in_features_dim = 1 @@ -123,10 +125,10 @@ class Modelnet40Config(Config): # Deformable offset loss # 'point2point' fitting geometry by penalizing distance from deform point to input points # 'point2plane' fitting geometry by penalizing distance from deform point to input point triplet (not implemented) - deform_fitting_mode = 'point2point' - deform_fitting_power = 1.0 # Multiplier for the fitting/repulsive loss - deform_lr_factor = 0.1 # Multiplier for learning rate applied to the deformations - repulse_extent = 1.2 # Distance of repulsion for deformed kernel points + deform_fitting_mode = "point2point" + deform_fitting_power = 1.0 # Multiplier for the fitting/repulsive loss + deform_lr_factor = 0.1 # Multiplier for learning rate applied to the deformations + repulse_extent = 1.2 # Distance of repulsion for deformed kernel points ##################### # Training parameters @@ -138,7 +140,7 @@ class Modelnet40Config(Config): # Learning rate management learning_rate = 1e-2 momentum = 0.98 - lr_decays = {i: 0.1**(1/100) for i in range(1, max_epoch)} + lr_decays = {i: 0.1 ** (1 / 100) for i in range(1, max_epoch)} grad_clip_norm = 100.0 # Number of batch @@ -156,7 +158,7 @@ class Modelnet40Config(Config): # Augmentations augment_scale_anisotropic = True augment_symmetries = [True, True, True] - augment_rotation = 'none' + augment_rotation = "none" augment_scale_min = 0.8 augment_scale_max = 1.2 augment_noise = 0.001 @@ -166,7 +168,7 @@ class Modelnet40Config(Config): # > 'none': Each point in the whole batch has the same contribution. # > 'class': Each class has the same contribution (points are weighted according to class balance) # > 'batch': Each cloud in the batch has the same contribution (points are weighted according cloud sizes) - segloss_balance = 'none' + segloss_balance = "none" # Do we nee to save convergence saving = True @@ -179,40 +181,40 @@ class Modelnet40Config(Config): # \***************/ # -if __name__ == '__main__': - +if __name__ == "__main__": ############################ # Initialize the environment ############################ # Set which gpu is going to be used - GPU_ID = '0' + GPU_ID = "0" # Set GPU visible device - os.environ['CUDA_VISIBLE_DEVICES'] = GPU_ID + os.environ["CUDA_VISIBLE_DEVICES"] = GPU_ID ############### # Previous chkp ############### # Choose here if you want to start training from a previous snapshot (None for new training) - #previous_training_path = 'Log_2020-03-19_19-53-27' - previous_training_path = '' + # previous_training_path = 'Log_2020-03-19_19-53-27' + previous_training_path = "" # Choose index of checkpoint to start from. If None, uses the latest chkp chkp_idx = None if previous_training_path: - # Find all snapshot in the chosen training folder - chkp_path = os.path.join('results', previous_training_path, 'checkpoints') - chkps = [f for f in os.listdir(chkp_path) if f[:4] == 'chkp'] + chkp_path = os.path.join("results", previous_training_path, "checkpoints") + chkps = [f for f in os.listdir(chkp_path) if f[:4] == "chkp"] # Find which snapshot to restore if chkp_idx is None: - chosen_chkp = 'current_chkp.tar' + chosen_chkp = "current_chkp.tar" else: chosen_chkp = np.sort(chkps)[chkp_idx] - chosen_chkp = os.path.join('results', previous_training_path, 'checkpoints', chosen_chkp) + chosen_chkp = os.path.join( + "results", previous_training_path, "checkpoints", chosen_chkp + ) else: chosen_chkp = None @@ -222,13 +224,13 @@ if __name__ == '__main__': ############## print() - print('Data Preparation') - print('****************') + print("Data Preparation") + print("****************") # Initialize configuration class config = Modelnet40Config() if previous_training_path: - config.load(os.path.join('results', previous_training_path)) + config.load(os.path.join("results", previous_training_path)) config.saving_path = None # Get path from argument if given @@ -244,28 +246,32 @@ if __name__ == '__main__': test_sampler = ModelNet40Sampler(test_dataset, balance_labels=True) # Initialize the dataloader - training_loader = DataLoader(training_dataset, - batch_size=1, - sampler=training_sampler, - collate_fn=ModelNet40Collate, - num_workers=config.input_threads, - pin_memory=True) - test_loader = DataLoader(test_dataset, - batch_size=1, - sampler=test_sampler, - collate_fn=ModelNet40Collate, - num_workers=config.input_threads, - pin_memory=True) + training_loader = DataLoader( + training_dataset, + batch_size=1, + sampler=training_sampler, + collate_fn=ModelNet40Collate, + num_workers=config.input_threads, + pin_memory=True, + ) + test_loader = DataLoader( + test_dataset, + batch_size=1, + sampler=test_sampler, + collate_fn=ModelNet40Collate, + num_workers=config.input_threads, + pin_memory=True, + ) # Calibrate samplers training_sampler.calibration(training_loader) test_sampler.calibration(test_loader) - #debug_timing(test_dataset, test_sampler, test_loader) - #debug_show_clouds(training_dataset, training_sampler, training_loader) + # debug_timing(test_dataset, test_sampler, test_loader) + # debug_show_clouds(training_dataset, training_sampler, training_loader) - print('\nModel Preparation') - print('*****************') + print("\nModel Preparation") + print("*****************") # Define network model t1 = time.time() @@ -273,20 +279,17 @@ if __name__ == '__main__': # Define a trainer class trainer = ModelTrainer(net, config, chkp_path=chosen_chkp) - print('Done in {:.1f}s\n'.format(time.time() - t1)) + print("Done in {:.1f}s\n".format(time.time() - t1)) - print('\nStart training') - print('**************') + print("\nStart training") + print("**************") # Training try: trainer.train(net, training_loader, test_loader, config) except: - print('Caught an error') + print("Caught an error") os.kill(os.getpid(), signal.SIGINT) - print('Forcing exit now') + print("Forcing exit now") os.kill(os.getpid(), signal.SIGINT) - - - diff --git a/train_NPM3D.py b/train_NPM3D.py index d083a37..42ec580 100644 --- a/train_NPM3D.py +++ b/train_NPM3D.py @@ -40,6 +40,7 @@ from models.architectures import KPFCNN # \******************/ # + class NPM3DConfig(Config): """ Override the parameters you want to modify for this dataset @@ -50,13 +51,13 @@ class NPM3DConfig(Config): #################### # Dataset name - dataset = 'NPM3D' + dataset = "NPM3D" # Number of classes in the dataset (This value is overwritten by dataset class when Initializating dataset). num_classes = None # Type of task performed on this dataset (also overwritten) - dataset_task = '' + dataset_task = "" # Number of CPU threads for the input pipeline input_threads = 10 @@ -66,28 +67,30 @@ class NPM3DConfig(Config): ######################### # # Define layers - architecture = ['simple', - 'resnetb', - 'resnetb_strided', - 'resnetb', - 'resnetb', - 'resnetb_strided', - 'resnetb', - 'resnetb', - 'resnetb_strided', - 'resnetb', - 'resnetb', - 'resnetb_strided', - 'resnetb', - 'resnetb', - 'nearest_upsample', - 'unary', - 'nearest_upsample', - 'unary', - 'nearest_upsample', - 'unary', - 'nearest_upsample', - 'unary'] + architecture = [ + "simple", + "resnetb", + "resnetb_strided", + "resnetb", + "resnetb", + "resnetb_strided", + "resnetb", + "resnetb", + "resnetb_strided", + "resnetb", + "resnetb", + "resnetb_strided", + "resnetb", + "resnetb", + "nearest_upsample", + "unary", + "nearest_upsample", + "unary", + "nearest_upsample", + "unary", + "nearest_upsample", + "unary", + ] ################### # KPConv parameters @@ -112,10 +115,10 @@ class NPM3DConfig(Config): KP_extent = 1.2 # Behavior of convolutions in ('constant', 'linear', 'gaussian') - KP_influence = 'linear' + KP_influence = "linear" # Aggregation function of KPConv in ('closest', 'sum') - aggregation_mode = 'sum' + aggregation_mode = "sum" # Choice of input features first_features_dim = 128 @@ -131,10 +134,10 @@ class NPM3DConfig(Config): # Deformable offset loss # 'point2point' fitting geometry by penalizing distance from deform point to input points # 'point2plane' fitting geometry by penalizing distance from deform point to input point triplet (not implemented) - deform_fitting_mode = 'point2point' - deform_fitting_power = 1.0 # Multiplier for the fitting/repulsive loss - deform_lr_factor = 0.1 # Multiplier for learning rate applied to the deformations - repulse_extent = 1.2 # Distance of repulsion for deformed kernel points + deform_fitting_mode = "point2point" + deform_fitting_power = 1.0 # Multiplier for the fitting/repulsive loss + deform_lr_factor = 0.1 # Multiplier for learning rate applied to the deformations + repulse_extent = 1.2 # Distance of repulsion for deformed kernel points ##################### # Training parameters @@ -164,7 +167,7 @@ class NPM3DConfig(Config): # Augmentations augment_scale_anisotropic = True augment_symmetries = [True, False, False] - augment_rotation = 'vertical' + augment_rotation = "vertical" augment_scale_min = 0.9 augment_scale_max = 1.1 augment_noise = 0.001 @@ -174,7 +177,7 @@ class NPM3DConfig(Config): # > 'none': Each point in the whole batch has the same contribution. # > 'class': Each class has the same contribution (points are weighted according to class balance) # > 'batch': Each cloud in the batch has the same contribution (points are weighted according cloud sizes) - segloss_balance = 'none' + segloss_balance = "none" # Do we nee to save convergence saving = True @@ -187,17 +190,16 @@ class NPM3DConfig(Config): # \***************/ # -if __name__ == '__main__': - +if __name__ == "__main__": ############################ # Initialize the environment ############################ # Set which gpu is going to be used - GPU_ID = '0' + GPU_ID = "0" # Set GPU visible device - os.environ['CUDA_VISIBLE_DEVICES'] = GPU_ID + os.environ["CUDA_VISIBLE_DEVICES"] = GPU_ID ############### # Previous chkp @@ -205,22 +207,23 @@ if __name__ == '__main__': # Choose here if you want to start training from a previous snapshot (None for new training) # previous_training_path = 'Log_2020-03-19_19-53-27' - previous_training_path = '' + previous_training_path = "" # Choose index of checkpoint to start from. If None, uses the latest chkp chkp_idx = None if previous_training_path: - # Find all snapshot in the chosen training folder - chkp_path = os.path.join('results', previous_training_path, 'checkpoints') - chkps = [f for f in os.listdir(chkp_path) if f[:4] == 'chkp'] + chkp_path = os.path.join("results", previous_training_path, "checkpoints") + chkps = [f for f in os.listdir(chkp_path) if f[:4] == "chkp"] # Find which snapshot to restore if chkp_idx is None: - chosen_chkp = 'current_chkp.tar' + chosen_chkp = "current_chkp.tar" else: chosen_chkp = np.sort(chkps)[chkp_idx] - chosen_chkp = os.path.join('results', previous_training_path, 'checkpoints', chosen_chkp) + chosen_chkp = os.path.join( + "results", previous_training_path, "checkpoints", chosen_chkp + ) else: chosen_chkp = None @@ -230,13 +233,13 @@ if __name__ == '__main__': ############## print() - print('Data Preparation') - print('****************') + print("Data Preparation") + print("****************") # Initialize configuration class config = NPM3DConfig() if previous_training_path: - config.load(os.path.join('results', previous_training_path)) + config.load(os.path.join("results", previous_training_path)) config.saving_path = None # Get path from argument if given @@ -244,26 +247,30 @@ if __name__ == '__main__': config.saving_path = sys.argv[1] # Initialize datasets - training_dataset = NPM3DDataset(config, set='training', use_potentials=True) - test_dataset = NPM3DDataset(config, set='validation', use_potentials=True) + training_dataset = NPM3DDataset(config, set="training", use_potentials=True) + test_dataset = NPM3DDataset(config, set="validation", use_potentials=True) # Initialize samplers training_sampler = NPM3DSampler(training_dataset) test_sampler = NPM3DSampler(test_dataset) # Initialize the dataloader - training_loader = DataLoader(training_dataset, - batch_size=1, - sampler=training_sampler, - collate_fn=NPM3DCollate, - num_workers=config.input_threads, - pin_memory=True) - test_loader = DataLoader(test_dataset, - batch_size=1, - sampler=test_sampler, - collate_fn=NPM3DCollate, - num_workers=config.input_threads, - pin_memory=True) + training_loader = DataLoader( + training_dataset, + batch_size=1, + sampler=training_sampler, + collate_fn=NPM3DCollate, + num_workers=config.input_threads, + pin_memory=True, + ) + test_loader = DataLoader( + test_dataset, + batch_size=1, + sampler=test_sampler, + collate_fn=NPM3DCollate, + num_workers=config.input_threads, + pin_memory=True, + ) # Calibrate samplers training_sampler.calibration(training_loader, verbose=True) @@ -274,8 +281,8 @@ if __name__ == '__main__': # debug_timing(test_dataset, test_loader) # debug_upsampling(training_dataset, training_loader) - print('\nModel Preparation') - print('*****************') + print("\nModel Preparation") + print("*****************") # Define network model t1 = time.time() @@ -283,25 +290,28 @@ if __name__ == '__main__': debug = False if debug: - print('\n*************************************\n') + print("\n*************************************\n") print(net) - print('\n*************************************\n') + print("\n*************************************\n") for param in net.parameters(): if param.requires_grad: print(param.shape) - print('\n*************************************\n') - print("Model size %i" % sum(param.numel() for param in net.parameters() if param.requires_grad)) - print('\n*************************************\n') + print("\n*************************************\n") + print( + "Model size %i" + % sum(param.numel() for param in net.parameters() if param.requires_grad) + ) + print("\n*************************************\n") # Define a trainer class trainer = ModelTrainer(net, config, chkp_path=chosen_chkp) - print('Done in {:.1f}s\n'.format(time.time() - t1)) + print("Done in {:.1f}s\n".format(time.time() - t1)) - print('\nStart training') - print('**************') + print("\nStart training") + print("**************") # Training trainer.train(net, training_loader, test_loader, config) - print('Forcing exit now') + print("Forcing exit now") os.kill(os.getpid(), signal.SIGINT) diff --git a/train_S3DIS.py b/train_S3DIS.py index 8cb54bb..9a0c17b 100644 --- a/train_S3DIS.py +++ b/train_S3DIS.py @@ -40,6 +40,7 @@ from models.architectures import KPFCNN # \******************/ # + class S3DISConfig(Config): """ Override the parameters you want to modify for this dataset @@ -50,13 +51,13 @@ class S3DISConfig(Config): #################### # Dataset name - dataset = 'S3DIS' + dataset = "S3DIS" # Number of classes in the dataset (This value is overwritten by dataset class when Initializating dataset). num_classes = None # Type of task performed on this dataset (also overwritten) - dataset_task = '' + dataset_task = "" # Number of CPU threads for the input pipeline input_threads = 10 @@ -66,28 +67,30 @@ class S3DISConfig(Config): ######################### # # Define layers - architecture = ['simple', - 'resnetb', - 'resnetb_strided', - 'resnetb', - 'resnetb', - 'resnetb_strided', - 'resnetb', - 'resnetb', - 'resnetb_strided', - 'resnetb_deformable', - 'resnetb_deformable', - 'resnetb_deformable_strided', - 'resnetb_deformable', - 'resnetb_deformable', - 'nearest_upsample', - 'unary', - 'nearest_upsample', - 'unary', - 'nearest_upsample', - 'unary', - 'nearest_upsample', - 'unary'] + architecture = [ + "simple", + "resnetb", + "resnetb_strided", + "resnetb", + "resnetb", + "resnetb_strided", + "resnetb", + "resnetb", + "resnetb_strided", + "resnetb_deformable", + "resnetb_deformable", + "resnetb_deformable_strided", + "resnetb_deformable", + "resnetb_deformable", + "nearest_upsample", + "unary", + "nearest_upsample", + "unary", + "nearest_upsample", + "unary", + "nearest_upsample", + "unary", + ] # Define layers # architecture = ['simple', @@ -136,10 +139,10 @@ class S3DISConfig(Config): KP_extent = 1.2 # Behavior of convolutions in ('constant', 'linear', 'gaussian') - KP_influence = 'linear' + KP_influence = "linear" # Aggregation function of KPConv in ('closest', 'sum') - aggregation_mode = 'sum' + aggregation_mode = "sum" # Choice of input features first_features_dim = 128 @@ -155,10 +158,10 @@ class S3DISConfig(Config): # Deformable offset loss # 'point2point' fitting geometry by penalizing distance from deform point to input points # 'point2plane' fitting geometry by penalizing distance from deform point to input point triplet (not implemented) - deform_fitting_mode = 'point2point' - deform_fitting_power = 1.0 # Multiplier for the fitting/repulsive loss - deform_lr_factor = 0.1 # Multiplier for learning rate applied to the deformations - repulse_extent = 1.2 # Distance of repulsion for deformed kernel points + deform_fitting_mode = "point2point" + deform_fitting_power = 1.0 # Multiplier for the fitting/repulsive loss + deform_lr_factor = 0.1 # Multiplier for learning rate applied to the deformations + repulse_extent = 1.2 # Distance of repulsion for deformed kernel points ##################### # Training parameters @@ -188,7 +191,7 @@ class S3DISConfig(Config): # Augmentations augment_scale_anisotropic = True augment_symmetries = [True, False, False] - augment_rotation = 'vertical' + augment_rotation = "vertical" augment_scale_min = 0.9 augment_scale_max = 1.1 augment_noise = 0.001 @@ -198,7 +201,7 @@ class S3DISConfig(Config): # > 'none': Each point in the whole batch has the same contribution. # > 'class': Each class has the same contribution (points are weighted according to class balance) # > 'batch': Each cloud in the batch has the same contribution (points are weighted according cloud sizes) - segloss_balance = 'none' + segloss_balance = "none" # Do we nee to save convergence saving = True @@ -211,17 +214,16 @@ class S3DISConfig(Config): # \***************/ # -if __name__ == '__main__': - +if __name__ == "__main__": ############################ # Initialize the environment ############################ # Set which gpu is going to be used - GPU_ID = '0' + GPU_ID = "0" # Set GPU visible device - os.environ['CUDA_VISIBLE_DEVICES'] = GPU_ID + os.environ["CUDA_VISIBLE_DEVICES"] = GPU_ID ############### # Previous chkp @@ -229,22 +231,23 @@ if __name__ == '__main__': # Choose here if you want to start training from a previous snapshot (None for new training) # previous_training_path = 'Log_2020-03-19_19-53-27' - previous_training_path = '' + previous_training_path = "" # Choose index of checkpoint to start from. If None, uses the latest chkp chkp_idx = None if previous_training_path: - # Find all snapshot in the chosen training folder - chkp_path = os.path.join('results', previous_training_path, 'checkpoints') - chkps = [f for f in os.listdir(chkp_path) if f[:4] == 'chkp'] + chkp_path = os.path.join("results", previous_training_path, "checkpoints") + chkps = [f for f in os.listdir(chkp_path) if f[:4] == "chkp"] # Find which snapshot to restore if chkp_idx is None: - chosen_chkp = 'current_chkp.tar' + chosen_chkp = "current_chkp.tar" else: chosen_chkp = np.sort(chkps)[chkp_idx] - chosen_chkp = os.path.join('results', previous_training_path, 'checkpoints', chosen_chkp) + chosen_chkp = os.path.join( + "results", previous_training_path, "checkpoints", chosen_chkp + ) else: chosen_chkp = None @@ -254,13 +257,13 @@ if __name__ == '__main__': ############## print() - print('Data Preparation') - print('****************') + print("Data Preparation") + print("****************") # Initialize configuration class config = S3DISConfig() if previous_training_path: - config.load(os.path.join('results', previous_training_path)) + config.load(os.path.join("results", previous_training_path)) config.saving_path = None # Get path from argument if given @@ -268,26 +271,30 @@ if __name__ == '__main__': config.saving_path = sys.argv[1] # Initialize datasets - training_dataset = S3DISDataset(config, set='training', use_potentials=True) - test_dataset = S3DISDataset(config, set='validation', use_potentials=True) + training_dataset = S3DISDataset(config, set="training", use_potentials=True) + test_dataset = S3DISDataset(config, set="validation", use_potentials=True) # Initialize samplers training_sampler = S3DISSampler(training_dataset) test_sampler = S3DISSampler(test_dataset) # Initialize the dataloader - training_loader = DataLoader(training_dataset, - batch_size=1, - sampler=training_sampler, - collate_fn=S3DISCollate, - num_workers=config.input_threads, - pin_memory=True) - test_loader = DataLoader(test_dataset, - batch_size=1, - sampler=test_sampler, - collate_fn=S3DISCollate, - num_workers=config.input_threads, - pin_memory=True) + training_loader = DataLoader( + training_dataset, + batch_size=1, + sampler=training_sampler, + collate_fn=S3DISCollate, + num_workers=config.input_threads, + pin_memory=True, + ) + test_loader = DataLoader( + test_dataset, + batch_size=1, + sampler=test_sampler, + collate_fn=S3DISCollate, + num_workers=config.input_threads, + pin_memory=True, + ) # Calibrate samplers training_sampler.calibration(training_loader, verbose=True) @@ -298,8 +305,8 @@ if __name__ == '__main__': # debug_timing(test_dataset, test_loader) # debug_upsampling(training_dataset, training_loader) - print('\nModel Preparation') - print('*****************') + print("\nModel Preparation") + print("*****************") # Define network model t1 = time.time() @@ -307,25 +314,28 @@ if __name__ == '__main__': debug = False if debug: - print('\n*************************************\n') + print("\n*************************************\n") print(net) - print('\n*************************************\n') + print("\n*************************************\n") for param in net.parameters(): if param.requires_grad: print(param.shape) - print('\n*************************************\n') - print("Model size %i" % sum(param.numel() for param in net.parameters() if param.requires_grad)) - print('\n*************************************\n') + print("\n*************************************\n") + print( + "Model size %i" + % sum(param.numel() for param in net.parameters() if param.requires_grad) + ) + print("\n*************************************\n") # Define a trainer class trainer = ModelTrainer(net, config, chkp_path=chosen_chkp) - print('Done in {:.1f}s\n'.format(time.time() - t1)) + print("Done in {:.1f}s\n".format(time.time() - t1)) - print('\nStart training') - print('**************') + print("\nStart training") + print("**************") # Training trainer.train(net, training_loader, test_loader, config) - print('Forcing exit now') + print("Forcing exit now") os.kill(os.getpid(), signal.SIGINT) diff --git a/train_SemanticKitti.py b/train_SemanticKitti.py index 7c1dcdb..4a5e9b3 100644 --- a/train_SemanticKitti.py +++ b/train_SemanticKitti.py @@ -26,7 +26,6 @@ import signal import os import numpy as np import sys -import torch # Dataset from datasetss.SemanticKitti import * @@ -43,6 +42,7 @@ from models.architectures import KPFCNN # \******************/ # + class SemanticKittiConfig(Config): """ Override the parameters you want to modify for this dataset @@ -53,13 +53,13 @@ class SemanticKittiConfig(Config): #################### # Dataset name - dataset = 'SemanticKitti' + dataset = "SemanticKitti" # Number of classes in the dataset (This value is overwritten by dataset class when Initializating dataset). num_classes = None # Type of task performed on this dataset (also overwritten) - dataset_task = '' + dataset_task = "" # Number of CPU threads for the input pipeline input_threads = 10 @@ -69,27 +69,29 @@ class SemanticKittiConfig(Config): ######################### # Define layers - architecture = ['simple', - 'resnetb', - 'resnetb_strided', - 'resnetb', - 'resnetb', - 'resnetb_strided', - 'resnetb', - 'resnetb', - 'resnetb_strided', - 'resnetb', - 'resnetb', - 'resnetb_strided', - 'resnetb', - 'nearest_upsample', - 'unary', - 'nearest_upsample', - 'unary', - 'nearest_upsample', - 'unary', - 'nearest_upsample', - 'unary'] + architecture = [ + "simple", + "resnetb", + "resnetb_strided", + "resnetb", + "resnetb", + "resnetb_strided", + "resnetb", + "resnetb", + "resnetb_strided", + "resnetb", + "resnetb", + "resnetb_strided", + "resnetb", + "nearest_upsample", + "unary", + "nearest_upsample", + "unary", + "nearest_upsample", + "unary", + "nearest_upsample", + "unary", + ] ################### # KPConv parameters @@ -122,10 +124,10 @@ class SemanticKittiConfig(Config): KP_extent = 1.2 # Behavior of convolutions in ('constant', 'linear', 'gaussian') - KP_influence = 'linear' + KP_influence = "linear" # Aggregation function of KPConv in ('closest', 'sum') - aggregation_mode = 'sum' + aggregation_mode = "sum" # Choice of input features first_features_dim = 128 @@ -141,10 +143,10 @@ class SemanticKittiConfig(Config): # Deformable offset loss # 'point2point' fitting geometry by penalizing distance from deform point to input points # 'point2plane' fitting geometry by penalizing distance from deform point to input point triplet (not implemented) - deform_fitting_mode = 'point2point' - deform_fitting_power = 1.0 # Multiplier for the fitting/repulsive loss - deform_lr_factor = 0.1 # Multiplier for learning rate applied to the deformations - repulse_extent = 1.2 # Distance of repulsion for deformed kernel points + deform_fitting_mode = "point2point" + deform_fitting_power = 1.0 # Multiplier for the fitting/repulsive loss + deform_lr_factor = 0.1 # Multiplier for learning rate applied to the deformations + repulse_extent = 1.2 # Distance of repulsion for deformed kernel points ##################### # Training parameters @@ -171,7 +173,7 @@ class SemanticKittiConfig(Config): # Augmentations augment_scale_anisotropic = True augment_symmetries = [True, False, False] - augment_rotation = 'vertical' + augment_rotation = "vertical" augment_scale_min = 0.8 augment_scale_max = 1.2 augment_noise = 0.001 @@ -202,17 +204,16 @@ class SemanticKittiConfig(Config): # \***************/ # -if __name__ == '__main__': - +if __name__ == "__main__": ############################ # Initialize the environment ############################ # Set which gpu is going to be used - GPU_ID = '0' + GPU_ID = "0" # Set GPU visible device - os.environ['CUDA_VISIBLE_DEVICES'] = GPU_ID + os.environ["CUDA_VISIBLE_DEVICES"] = GPU_ID ############### # Previous chkp @@ -220,22 +221,23 @@ if __name__ == '__main__': # Choose here if you want to start training from a previous snapshot (None for new training) # previous_training_path = 'Log_2020-03-19_19-53-27' - previous_training_path = '' + previous_training_path = "" # Choose index of checkpoint to start from. If None, uses the latest chkp chkp_idx = None if previous_training_path: - # Find all snapshot in the chosen training folder - chkp_path = os.path.join('results', previous_training_path, 'checkpoints') - chkps = [f for f in os.listdir(chkp_path) if f[:4] == 'chkp'] + chkp_path = os.path.join("results", previous_training_path, "checkpoints") + chkps = [f for f in os.listdir(chkp_path) if f[:4] == "chkp"] # Find which snapshot to restore if chkp_idx is None: - chosen_chkp = 'current_chkp.tar' + chosen_chkp = "current_chkp.tar" else: chosen_chkp = np.sort(chkps)[chkp_idx] - chosen_chkp = os.path.join('results', previous_training_path, 'checkpoints', chosen_chkp) + chosen_chkp = os.path.join( + "results", previous_training_path, "checkpoints", chosen_chkp + ) else: chosen_chkp = None @@ -245,13 +247,13 @@ if __name__ == '__main__': ############## print() - print('Data Preparation') - print('****************') + print("Data Preparation") + print("****************") # Initialize configuration class config = SemanticKittiConfig() if previous_training_path: - config.load(os.path.join('results', previous_training_path)) + config.load(os.path.join("results", previous_training_path)) config.saving_path = None # Get path from argument if given @@ -259,28 +261,32 @@ if __name__ == '__main__': config.saving_path = sys.argv[1] # Initialize datasets - training_dataset = SemanticKittiDataset(config, set='training', - balance_classes=True) - test_dataset = SemanticKittiDataset(config, set='validation', - balance_classes=False) + training_dataset = SemanticKittiDataset( + config, set="training", balance_classes=True + ) + test_dataset = SemanticKittiDataset(config, set="validation", balance_classes=False) # Initialize samplers training_sampler = SemanticKittiSampler(training_dataset) test_sampler = SemanticKittiSampler(test_dataset) # Initialize the dataloader - training_loader = DataLoader(training_dataset, - batch_size=1, - sampler=training_sampler, - collate_fn=SemanticKittiCollate, - num_workers=config.input_threads, - pin_memory=True) - test_loader = DataLoader(test_dataset, - batch_size=1, - sampler=test_sampler, - collate_fn=SemanticKittiCollate, - num_workers=config.input_threads, - pin_memory=True) + training_loader = DataLoader( + training_dataset, + batch_size=1, + sampler=training_sampler, + collate_fn=SemanticKittiCollate, + num_workers=config.input_threads, + pin_memory=True, + ) + test_loader = DataLoader( + test_dataset, + batch_size=1, + sampler=test_sampler, + collate_fn=SemanticKittiCollate, + num_workers=config.input_threads, + pin_memory=True, + ) # Calibrate max_in_point value training_sampler.calib_max_in(config, training_loader, verbose=True) @@ -294,8 +300,8 @@ if __name__ == '__main__': # debug_timing(test_dataset, test_loader) # debug_class_w(training_dataset, training_loader) - print('\nModel Preparation') - print('*****************') + print("\nModel Preparation") + print("*****************") # Define network model t1 = time.time() @@ -303,25 +309,28 @@ if __name__ == '__main__': debug = False if debug: - print('\n*************************************\n') + print("\n*************************************\n") print(net) - print('\n*************************************\n') + print("\n*************************************\n") for param in net.parameters(): if param.requires_grad: print(param.shape) - print('\n*************************************\n') - print("Model size %i" % sum(param.numel() for param in net.parameters() if param.requires_grad)) - print('\n*************************************\n') + print("\n*************************************\n") + print( + "Model size %i" + % sum(param.numel() for param in net.parameters() if param.requires_grad) + ) + print("\n*************************************\n") # Define a trainer class trainer = ModelTrainer(net, config, chkp_path=chosen_chkp) - print('Done in {:.1f}s\n'.format(time.time() - t1)) + print("Done in {:.1f}s\n".format(time.time() - t1)) - print('\nStart training') - print('**************') + print("\nStart training") + print("**************") # Training trainer.train(net, training_loader, test_loader, config) - print('Forcing exit now') + print("Forcing exit now") os.kill(os.getpid(), signal.SIGINT) diff --git a/utils/config.py b/utils/config.py index 094774b..02decb3 100644 --- a/utils/config.py +++ b/utils/config.py @@ -21,14 +21,14 @@ import numpy as np # Colors for printing class bcolors: - HEADER = '\033[95m' - OKBLUE = '\033[94m' - OKGREEN = '\033[92m' - WARNING = '\033[93m' - FAIL = '\033[91m' - ENDC = '\033[0m' - BOLD = '\033[1m' - UNDERLINE = '\033[4m' + HEADER = "\033[95m" + OKBLUE = "\033[94m" + OKGREEN = "\033[92m" + WARNING = "\033[93m" + FAIL = "\033[91m" + ENDC = "\033[0m" + BOLD = "\033[1m" + UNDERLINE = "\033[4m" class Config: @@ -41,10 +41,10 @@ class Config: ################## # Dataset name - dataset = '' + dataset = "" # Type of network model - dataset_task = '' + dataset_task = "" # Number of classes in the dataset num_classes = 0 @@ -69,8 +69,8 @@ class Config: architecture = [] # Decide the mode of equivariance and invariance - equivar_mode = '' - invar_mode = '' + equivar_mode = "" + invar_mode = "" # Dimension of the first feature maps first_features_dim = 64 @@ -102,14 +102,14 @@ class Config: KP_extent = 1.0 # Influence function when d < KP_extent. ('constant', 'linear', 'gaussian') When d > KP_extent, always zero - KP_influence = 'linear' + KP_influence = "linear" # Aggregation function of KPConv in ('closest', 'sum') # Decide if you sum all kernel point influences, or if you only take the influence of the closest KP - aggregation_mode = 'sum' + aggregation_mode = "sum" # Fixed points in the kernel : 'none', 'center' or 'verticals' - fixed_kernel_points = 'center' + fixed_kernel_points = "center" # Use modulateion in deformable convolutions modulated = False @@ -141,12 +141,12 @@ class Config: augment_scale_min = 0.9 augment_scale_max = 1.1 augment_symmetries = [False, False, False] - augment_rotation = 'vertical' + augment_rotation = "vertical" augment_noise = 0.005 augment_color = 0.7 # Augment with occlusions (not implemented yet) - augment_occlusion = 'none' + augment_occlusion = "none" augment_occlusion_ratio = 0.2 augment_occlusion_num = 1 @@ -154,7 +154,7 @@ class Config: weight_decay = 1e-3 # The way we balance segmentation loss DEPRECATED - segloss_balance = 'none' + segloss_balance = "none" # Choose weights for class (used in segmentation loss). Empty list for no weights class_w = [] @@ -162,10 +162,10 @@ class Config: # Deformable offset loss # 'point2point' fitting geometry by penalizing distance from deform point to input points # 'point2plane' fitting geometry by penalizing distance from deform point to input point triplet (not implemented) - deform_fitting_mode = 'point2point' - deform_fitting_power = 1.0 # Multiplier for the fitting/repulsive loss - deform_lr_factor = 0.1 # Multiplier for learning rate applied to the deformations - repulse_extent = 1.0 # Distance of repulsion for deformed kernel points + deform_fitting_mode = "point2point" + deform_fitting_power = 1.0 # Multiplier for the fitting/repulsive loss + deform_lr_factor = 0.1 # Multiplier for learning rate applied to the deformations + repulse_extent = 1.0 # Distance of repulsion for deformed kernel points # Number of batch batch_num = 10 @@ -193,7 +193,16 @@ class Config: """ # Number of layers - self.num_layers = len([block for block in self.architecture if 'pool' in block or 'strided' in block]) + 1 + self.num_layers = ( + len( + [ + block + for block in self.architecture + if "pool" in block or "strided" in block + ] + ) + + 1 + ) ################### # Deform layer list @@ -206,9 +215,13 @@ class Config: self.deform_layers = [] arch = self.architecture for block_i, block in enumerate(arch): - # Get all blocks of the layer - if not ('pool' in block or 'strided' in block or 'global' in block or 'upsample' in block): + if not ( + "pool" in block + or "strided" in block + or "global" in block + or "upsample" in block + ): layer_blocks += [block] continue @@ -217,50 +230,51 @@ class Config: deform_layer = False if layer_blocks: - if np.any(['deformable' in blck for blck in layer_blocks]): + if np.any(["deformable" in blck for blck in layer_blocks]): deform_layer = True - if 'pool' in block or 'strided' in block: - if 'deformable' in block: + if "pool" in block or "strided" in block: + if "deformable" in block: deform_layer = True self.deform_layers += [deform_layer] layer_blocks = [] # Stop when meeting a global pooling or upsampling - if 'global' in block or 'upsample' in block: + if "global" in block or "upsample" in block: break def load(self, path): - - filename = join(path, 'parameters.txt') - with open(filename, 'r') as f: + filename = join(path, "parameters.txt") + with open(filename, "r") as f: lines = f.readlines() # Class variable dictionary for line in lines: line_info = line.split() - if len(line_info) > 2 and line_info[0] != '#': - - if line_info[2] == 'None': + if len(line_info) > 2 and line_info[0] != "#": + if line_info[2] == "None": setattr(self, line_info[0], None) - elif line_info[0] == 'lr_decay_epochs': - self.lr_decays = {int(b.split(':')[0]): float(b.split(':')[1]) for b in line_info[2:]} + elif line_info[0] == "lr_decay_epochs": + self.lr_decays = { + int(b.split(":")[0]): float(b.split(":")[1]) + for b in line_info[2:] + } - elif line_info[0] == 'architecture': + elif line_info[0] == "architecture": self.architecture = [b for b in line_info[2:]] - elif line_info[0] == 'augment_symmetries': + elif line_info[0] == "augment_symmetries": self.augment_symmetries = [bool(int(b)) for b in line_info[2:]] - elif line_info[0] == 'num_classes': + elif line_info[0] == "num_classes": if len(line_info) > 3: self.num_classes = [int(c) for c in line_info[2:]] else: self.num_classes = int(line_info[2]) - elif line_info[0] == 'class_w': + elif line_info[0] == "class_w": self.class_w = [float(w) for w in line_info[2:]] elif hasattr(self, line_info[0]): @@ -275,108 +289,132 @@ class Config: self.__init__() def save(self): - - with open(join(self.saving_path, 'parameters.txt'), "w") as text_file: - - text_file.write('# -----------------------------------#\n') - text_file.write('# Parameters of the training session #\n') - text_file.write('# -----------------------------------#\n\n') + with open(join(self.saving_path, "parameters.txt"), "w") as text_file: + text_file.write("# -----------------------------------#\n") + text_file.write("# Parameters of the training session #\n") + text_file.write("# -----------------------------------#\n\n") # Input parameters - text_file.write('# Input parameters\n') - text_file.write('# ****************\n\n') - text_file.write('dataset = {:s}\n'.format(self.dataset)) - text_file.write('dataset_task = {:s}\n'.format(self.dataset_task)) + text_file.write("# Input parameters\n") + text_file.write("# ****************\n\n") + text_file.write("dataset = {:s}\n".format(self.dataset)) + text_file.write("dataset_task = {:s}\n".format(self.dataset_task)) if type(self.num_classes) is list: - text_file.write('num_classes =') + text_file.write("num_classes =") for n in self.num_classes: - text_file.write(' {:d}'.format(n)) - text_file.write('\n') + text_file.write(" {:d}".format(n)) + text_file.write("\n") else: - text_file.write('num_classes = {:d}\n'.format(self.num_classes)) - text_file.write('in_points_dim = {:d}\n'.format(self.in_points_dim)) - text_file.write('in_features_dim = {:d}\n'.format(self.in_features_dim)) - text_file.write('in_radius = {:.6f}\n'.format(self.in_radius)) - text_file.write('input_threads = {:d}\n\n'.format(self.input_threads)) + text_file.write("num_classes = {:d}\n".format(self.num_classes)) + text_file.write("in_points_dim = {:d}\n".format(self.in_points_dim)) + text_file.write("in_features_dim = {:d}\n".format(self.in_features_dim)) + text_file.write("in_radius = {:.6f}\n".format(self.in_radius)) + text_file.write("input_threads = {:d}\n\n".format(self.input_threads)) # Model parameters - text_file.write('# Model parameters\n') - text_file.write('# ****************\n\n') + text_file.write("# Model parameters\n") + text_file.write("# ****************\n\n") - text_file.write('architecture =') + text_file.write("architecture =") for a in self.architecture: - text_file.write(' {:s}'.format(a)) - text_file.write('\n') - text_file.write('equivar_mode = {:s}\n'.format(self.equivar_mode)) - text_file.write('invar_mode = {:s}\n'.format(self.invar_mode)) - text_file.write('num_layers = {:d}\n'.format(self.num_layers)) - text_file.write('first_features_dim = {:d}\n'.format(self.first_features_dim)) - text_file.write('use_batch_norm = {:d}\n'.format(int(self.use_batch_norm))) - text_file.write('batch_norm_momentum = {:.6f}\n\n'.format(self.batch_norm_momentum)) - text_file.write('segmentation_ratio = {:.6f}\n\n'.format(self.segmentation_ratio)) + text_file.write(" {:s}".format(a)) + text_file.write("\n") + text_file.write("equivar_mode = {:s}\n".format(self.equivar_mode)) + text_file.write("invar_mode = {:s}\n".format(self.invar_mode)) + text_file.write("num_layers = {:d}\n".format(self.num_layers)) + text_file.write( + "first_features_dim = {:d}\n".format(self.first_features_dim) + ) + text_file.write("use_batch_norm = {:d}\n".format(int(self.use_batch_norm))) + text_file.write( + "batch_norm_momentum = {:.6f}\n\n".format(self.batch_norm_momentum) + ) + text_file.write( + "segmentation_ratio = {:.6f}\n\n".format(self.segmentation_ratio) + ) # KPConv parameters - text_file.write('# KPConv parameters\n') - text_file.write('# *****************\n\n') + text_file.write("# KPConv parameters\n") + text_file.write("# *****************\n\n") - text_file.write('first_subsampling_dl = {:.6f}\n'.format(self.first_subsampling_dl)) - text_file.write('num_kernel_points = {:d}\n'.format(self.num_kernel_points)) - text_file.write('conv_radius = {:.6f}\n'.format(self.conv_radius)) - text_file.write('deform_radius = {:.6f}\n'.format(self.deform_radius)) - text_file.write('fixed_kernel_points = {:s}\n'.format(self.fixed_kernel_points)) - text_file.write('KP_extent = {:.6f}\n'.format(self.KP_extent)) - text_file.write('KP_influence = {:s}\n'.format(self.KP_influence)) - text_file.write('aggregation_mode = {:s}\n'.format(self.aggregation_mode)) - text_file.write('modulated = {:d}\n'.format(int(self.modulated))) - text_file.write('n_frames = {:d}\n'.format(self.n_frames)) - text_file.write('max_in_points = {:d}\n\n'.format(self.max_in_points)) - text_file.write('max_val_points = {:d}\n\n'.format(self.max_val_points)) - text_file.write('val_radius = {:.6f}\n\n'.format(self.val_radius)) + text_file.write( + "first_subsampling_dl = {:.6f}\n".format(self.first_subsampling_dl) + ) + text_file.write("num_kernel_points = {:d}\n".format(self.num_kernel_points)) + text_file.write("conv_radius = {:.6f}\n".format(self.conv_radius)) + text_file.write("deform_radius = {:.6f}\n".format(self.deform_radius)) + text_file.write( + "fixed_kernel_points = {:s}\n".format(self.fixed_kernel_points) + ) + text_file.write("KP_extent = {:.6f}\n".format(self.KP_extent)) + text_file.write("KP_influence = {:s}\n".format(self.KP_influence)) + text_file.write("aggregation_mode = {:s}\n".format(self.aggregation_mode)) + text_file.write("modulated = {:d}\n".format(int(self.modulated))) + text_file.write("n_frames = {:d}\n".format(self.n_frames)) + text_file.write("max_in_points = {:d}\n\n".format(self.max_in_points)) + text_file.write("max_val_points = {:d}\n\n".format(self.max_val_points)) + text_file.write("val_radius = {:.6f}\n\n".format(self.val_radius)) # Training parameters - text_file.write('# Training parameters\n') - text_file.write('# *******************\n\n') + text_file.write("# Training parameters\n") + text_file.write("# *******************\n\n") - text_file.write('learning_rate = {:f}\n'.format(self.learning_rate)) - text_file.write('momentum = {:f}\n'.format(self.momentum)) - text_file.write('lr_decay_epochs =') + text_file.write("learning_rate = {:f}\n".format(self.learning_rate)) + text_file.write("momentum = {:f}\n".format(self.momentum)) + text_file.write("lr_decay_epochs =") for e, d in self.lr_decays.items(): - text_file.write(' {:d}:{:f}'.format(e, d)) - text_file.write('\n') - text_file.write('grad_clip_norm = {:f}\n\n'.format(self.grad_clip_norm)) + text_file.write(" {:d}:{:f}".format(e, d)) + text_file.write("\n") + text_file.write("grad_clip_norm = {:f}\n\n".format(self.grad_clip_norm)) - - text_file.write('augment_symmetries =') + text_file.write("augment_symmetries =") for a in self.augment_symmetries: - text_file.write(' {:d}'.format(int(a))) - text_file.write('\n') - text_file.write('augment_rotation = {:s}\n'.format(self.augment_rotation)) - text_file.write('augment_noise = {:f}\n'.format(self.augment_noise)) - text_file.write('augment_occlusion = {:s}\n'.format(self.augment_occlusion)) - text_file.write('augment_occlusion_ratio = {:.6f}\n'.format(self.augment_occlusion_ratio)) - text_file.write('augment_occlusion_num = {:d}\n'.format(self.augment_occlusion_num)) - text_file.write('augment_scale_anisotropic = {:d}\n'.format(int(self.augment_scale_anisotropic))) - text_file.write('augment_scale_min = {:.6f}\n'.format(self.augment_scale_min)) - text_file.write('augment_scale_max = {:.6f}\n'.format(self.augment_scale_max)) - text_file.write('augment_color = {:.6f}\n\n'.format(self.augment_color)) + text_file.write(" {:d}".format(int(a))) + text_file.write("\n") + text_file.write("augment_rotation = {:s}\n".format(self.augment_rotation)) + text_file.write("augment_noise = {:f}\n".format(self.augment_noise)) + text_file.write("augment_occlusion = {:s}\n".format(self.augment_occlusion)) + text_file.write( + "augment_occlusion_ratio = {:.6f}\n".format( + self.augment_occlusion_ratio + ) + ) + text_file.write( + "augment_occlusion_num = {:d}\n".format(self.augment_occlusion_num) + ) + text_file.write( + "augment_scale_anisotropic = {:d}\n".format( + int(self.augment_scale_anisotropic) + ) + ) + text_file.write( + "augment_scale_min = {:.6f}\n".format(self.augment_scale_min) + ) + text_file.write( + "augment_scale_max = {:.6f}\n".format(self.augment_scale_max) + ) + text_file.write("augment_color = {:.6f}\n\n".format(self.augment_color)) - text_file.write('weight_decay = {:f}\n'.format(self.weight_decay)) - text_file.write('segloss_balance = {:s}\n'.format(self.segloss_balance)) - text_file.write('class_w =') + text_file.write("weight_decay = {:f}\n".format(self.weight_decay)) + text_file.write("segloss_balance = {:s}\n".format(self.segloss_balance)) + text_file.write("class_w =") for a in self.class_w: - text_file.write(' {:.6f}'.format(a)) - text_file.write('\n') - text_file.write('deform_fitting_mode = {:s}\n'.format(self.deform_fitting_mode)) - text_file.write('deform_fitting_power = {:.6f}\n'.format(self.deform_fitting_power)) - text_file.write('deform_lr_factor = {:.6f}\n'.format(self.deform_lr_factor)) - text_file.write('repulse_extent = {:.6f}\n'.format(self.repulse_extent)) - text_file.write('batch_num = {:d}\n'.format(self.batch_num)) - text_file.write('val_batch_num = {:d}\n'.format(self.val_batch_num)) - text_file.write('max_epoch = {:d}\n'.format(self.max_epoch)) + text_file.write(" {:.6f}".format(a)) + text_file.write("\n") + text_file.write( + "deform_fitting_mode = {:s}\n".format(self.deform_fitting_mode) + ) + text_file.write( + "deform_fitting_power = {:.6f}\n".format(self.deform_fitting_power) + ) + text_file.write("deform_lr_factor = {:.6f}\n".format(self.deform_lr_factor)) + text_file.write("repulse_extent = {:.6f}\n".format(self.repulse_extent)) + text_file.write("batch_num = {:d}\n".format(self.batch_num)) + text_file.write("val_batch_num = {:d}\n".format(self.val_batch_num)) + text_file.write("max_epoch = {:d}\n".format(self.max_epoch)) if self.epoch_steps is None: - text_file.write('epoch_steps = None\n') + text_file.write("epoch_steps = None\n") else: - text_file.write('epoch_steps = {:d}\n'.format(self.epoch_steps)) - text_file.write('validation_size = {:d}\n'.format(self.validation_size)) - text_file.write('checkpoint_gap = {:d}\n'.format(self.checkpoint_gap)) - + text_file.write("epoch_steps = {:d}\n".format(self.epoch_steps)) + text_file.write("validation_size = {:d}\n".format(self.validation_size)) + text_file.write("checkpoint_gap = {:d}\n".format(self.checkpoint_gap)) diff --git a/utils/mayavi_visu.py b/utils/mayavi_visu.py index b1c3821..60dff7f 100644 --- a/utils/mayavi_visu.py +++ b/utils/mayavi_visu.py @@ -23,20 +23,12 @@ # Basic libs -import torch import numpy as np -from sklearn.neighbors import KDTree -from os import makedirs, remove, rename, listdir -from os.path import exists, join -import time -import sys # PLY reader -from utils.ply import write_ply, read_ply # Configuration class -from utils.config import Config def show_ModelNet_models(all_points): @@ -47,7 +39,7 @@ def show_ModelNet_models(all_points): ########################### # Create figure for features - fig1 = mlab.figure('Models', bgcolor=(1, 1, 1), size=(1000, 800)) + fig1 = mlab.figure("Models", bgcolor=(1, 1, 1), size=(1000, 800)) fig1.scene.parallel_projection = False # Indices @@ -55,7 +47,6 @@ def show_ModelNet_models(all_points): file_i = 0 def update_scene(): - # clear figure mlab.clf(fig1) @@ -66,17 +57,19 @@ def show_ModelNet_models(all_points): points = (points * 1.5 + np.array([1.0, 1.0, 1.0])) * 50.0 # Show point clouds colorized with activations - activations = mlab.points3d(points[:, 0], - points[:, 1], - points[:, 2], - points[:, 2], - scale_factor=3.0, - scale_mode='none', - figure=fig1) + mlab.points3d( + points[:, 0], + points[:, 1], + points[:, 2], + points[:, 2], + scale_factor=3.0, + scale_mode="none", + figure=fig1, + ) # New title mlab.title(str(file_i), color=(0, 0, 0), size=0.3, height=0.01) - text = '<--- (press g for previous)' + 50 * ' ' + '(press h for next) --->' + text = "<--- (press g for previous)" + 50 * " " + "(press h for next) --->" mlab.text(0.01, 0.01, text, color=(0, 0, 0), width=0.98) mlab.orientation_axes() @@ -85,13 +78,11 @@ def show_ModelNet_models(all_points): def keyboard_callback(vtk_obj, event): global file_i - if vtk_obj.GetKeyCode() in ['g', 'G']: - + if vtk_obj.GetKeyCode() in ["g", "G"]: file_i = (file_i - 1) % len(all_points) update_scene() - elif vtk_obj.GetKeyCode() in ['h', 'H']: - + elif vtk_obj.GetKeyCode() in ["h", "H"]: file_i = (file_i + 1) % len(all_points) update_scene() @@ -99,7 +90,7 @@ def show_ModelNet_models(all_points): # Draw a first plot update_scene() - fig1.scene.interactor.add_observer('KeyPressEvent', keyboard_callback) + fig1.scene.interactor.add_observer("KeyPressEvent", keyboard_callback) mlab.show() @@ -111,7 +102,7 @@ def show_ModelNet_examples(clouds, cloud_normals=None, cloud_labels=None): ########################### # Create figure for features - fig1 = mlab.figure('Models', bgcolor=(1, 1, 1), size=(1000, 800)) + fig1 = mlab.figure("Models", bgcolor=(1, 1, 1), size=(1000, 800)) fig1.scene.parallel_projection = False if cloud_labels is None: @@ -123,7 +114,6 @@ def show_ModelNet_examples(clouds, cloud_normals=None, cloud_labels=None): show_normals = True def update_scene(): - # clear figure mlab.clf(fig1) @@ -139,27 +129,31 @@ def show_ModelNet_examples(clouds, cloud_normals=None, cloud_labels=None): points = (points * 1.5 + np.array([1.0, 1.0, 1.0])) * 50.0 # Show point clouds colorized with activations - activations = mlab.points3d(points[:, 0], - points[:, 1], - points[:, 2], - labels, - scale_factor=3.0, - scale_mode='none', - figure=fig1) + mlab.points3d( + points[:, 0], + points[:, 1], + points[:, 2], + labels, + scale_factor=3.0, + scale_mode="none", + figure=fig1, + ) if normals is not None and show_normals: - activations = mlab.quiver3d(points[:, 0], - points[:, 1], - points[:, 2], - normals[:, 0], - normals[:, 1], - normals[:, 2], - scale_factor=10.0, - scale_mode='none', - figure=fig1) + mlab.quiver3d( + points[:, 0], + points[:, 1], + points[:, 2], + normals[:, 0], + normals[:, 1], + normals[:, 2], + scale_factor=10.0, + scale_mode="none", + figure=fig1, + ) # New title mlab.title(str(file_i), color=(0, 0, 0), size=0.3, height=0.01) - text = '<--- (press g for previous)' + 50 * ' ' + '(press h for next) --->' + text = "<--- (press g for previous)" + 50 * " " + "(press h for next) --->" mlab.text(0.01, 0.01, text, color=(0, 0, 0), width=0.98) mlab.orientation_axes() @@ -168,15 +162,15 @@ def show_ModelNet_examples(clouds, cloud_normals=None, cloud_labels=None): def keyboard_callback(vtk_obj, event): global file_i, show_normals - if vtk_obj.GetKeyCode() in ['g', 'G']: + if vtk_obj.GetKeyCode() in ["g", "G"]: file_i = (file_i - 1) % len(clouds) update_scene() - elif vtk_obj.GetKeyCode() in ['h', 'H']: + elif vtk_obj.GetKeyCode() in ["h", "H"]: file_i = (file_i + 1) % len(clouds) update_scene() - elif vtk_obj.GetKeyCode() in ['n', 'N']: + elif vtk_obj.GetKeyCode() in ["n", "N"]: show_normals = not show_normals update_scene() @@ -184,7 +178,7 @@ def show_ModelNet_examples(clouds, cloud_normals=None, cloud_labels=None): # Draw a first plot update_scene() - fig1.scene.interactor.add_observer('KeyPressEvent', keyboard_callback) + fig1.scene.interactor.add_observer("KeyPressEvent", keyboard_callback) mlab.show() @@ -196,7 +190,7 @@ def show_neighbors(query, supports, neighbors): ########################### # Create figure for features - fig1 = mlab.figure('Models', bgcolor=(1, 1, 1), size=(1000, 800)) + fig1 = mlab.figure("Models", bgcolor=(1, 1, 1), size=(1000, 800)) fig1.scene.parallel_projection = False # Indices @@ -204,7 +198,6 @@ def show_neighbors(query, supports, neighbors): file_i = 0 def update_scene(): - # clear figure mlab.clf(fig1) @@ -212,36 +205,40 @@ def show_neighbors(query, supports, neighbors): p1 = (query * 1.5 + np.array([1.0, 1.0, 1.0])) * 50.0 p2 = (supports * 1.5 + np.array([1.0, 1.0, 1.0])) * 50.0 - l1 = p1[:, 2]*0 + l1 = p1[:, 2] * 0 l1[file_i] = 1 - l2 = p2[:, 2]*0 + 2 + l2 = p2[:, 2] * 0 + 2 l2[neighbors[file_i]] = 3 # Show point clouds colorized with activations - activations = mlab.points3d(p1[:, 0], - p1[:, 1], - p1[:, 2], - l1, - scale_factor=2.0, - scale_mode='none', - vmin=0.0, - vmax=3.0, - figure=fig1) + mlab.points3d( + p1[:, 0], + p1[:, 1], + p1[:, 2], + l1, + scale_factor=2.0, + scale_mode="none", + vmin=0.0, + vmax=3.0, + figure=fig1, + ) - activations = mlab.points3d(p2[:, 0], - p2[:, 1], - p2[:, 2], - l2, - scale_factor=3.0, - scale_mode='none', - vmin=0.0, - vmax=3.0, - figure=fig1) + mlab.points3d( + p2[:, 0], + p2[:, 1], + p2[:, 2], + l2, + scale_factor=3.0, + scale_mode="none", + vmin=0.0, + vmax=3.0, + figure=fig1, + ) # New title mlab.title(str(file_i), color=(0, 0, 0), size=0.3, height=0.01) - text = '<--- (press g for previous)' + 50 * ' ' + '(press h for next) --->' + text = "<--- (press g for previous)" + 50 * " " + "(press h for next) --->" mlab.text(0.01, 0.01, text, color=(0, 0, 0), width=0.98) mlab.orientation_axes() @@ -250,13 +247,11 @@ def show_neighbors(query, supports, neighbors): def keyboard_callback(vtk_obj, event): global file_i - if vtk_obj.GetKeyCode() in ['g', 'G']: - + if vtk_obj.GetKeyCode() in ["g", "G"]: file_i = (file_i - 1) % len(query) update_scene() - elif vtk_obj.GetKeyCode() in ['h', 'H']: - + elif vtk_obj.GetKeyCode() in ["h", "H"]: file_i = (file_i + 1) % len(query) update_scene() @@ -264,7 +259,7 @@ def show_neighbors(query, supports, neighbors): # Draw a first plot update_scene() - fig1.scene.interactor.add_observer('KeyPressEvent', keyboard_callback) + fig1.scene.interactor.add_observer("KeyPressEvent", keyboard_callback) mlab.show() @@ -276,7 +271,7 @@ def show_input_batch(batch): ########################### # Create figure for features - fig1 = mlab.figure('Input', bgcolor=(1, 1, 1), size=(1000, 800)) + fig1 = mlab.figure("Input", bgcolor=(1, 1, 1), size=(1000, 800)) fig1.scene.parallel_projection = False # Unstack batch @@ -292,18 +287,20 @@ def show_input_batch(batch): show_pools = False def update_scene(): - # clear figure mlab.clf(fig1) # Rescale points for visu p = (all_points[l_i][b_i] * 1.5 + np.array([1.0, 1.0, 1.0])) * 50.0 - labels = p[:, 2]*0 + labels = p[:, 2] * 0 if show_pools: - p2 = (all_points[l_i+1][b_i][neighb_i:neighb_i+1] * 1.5 + np.array([1.0, 1.0, 1.0])) * 50.0 + p2 = ( + all_points[l_i + 1][b_i][neighb_i : neighb_i + 1] * 1.5 + + np.array([1.0, 1.0, 1.0]) + ) * 50.0 p = np.vstack((p, p2)) - labels = np.hstack((labels, np.ones((1,), dtype=np.int32)*3)) + labels = np.hstack((labels, np.ones((1,), dtype=np.int32) * 3)) pool_inds = all_pools[l_i][b_i][neighb_i] pool_inds = pool_inds[pool_inds >= 0] labels[pool_inds] = 2 @@ -314,16 +311,17 @@ def show_input_batch(batch): labels[neighb_i] = 3 # Show point clouds colorized with activations - mlab.points3d(p[:, 0], - p[:, 1], - p[:, 2], - labels, - scale_factor=2.0, - scale_mode='none', - vmin=0.0, - vmax=3.0, - figure=fig1) - + mlab.points3d( + p[:, 0], + p[:, 1], + p[:, 2], + labels, + scale_factor=2.0, + scale_mode="none", + vmin=0.0, + vmax=3.0, + figure=fig1, + ) """ mlab.points3d(p[-2:, 0], @@ -350,12 +348,16 @@ def show_input_batch(batch): """ # New title - title_str = '<([) b_i={:d} (])> <(,) l_i={:d} (.)> <(N) n_i={:d} (M)>'.format(b_i, l_i, neighb_i) + title_str = ( + "<([) b_i={:d} (])> <(,) l_i={:d} (.)> <(N) n_i={:d} (M)>".format( + b_i, l_i, neighb_i + ) + ) mlab.title(title_str, color=(0, 0, 0), size=0.3, height=0.90) if show_pools: - text = 'pools (switch with G)' + text = "pools (switch with G)" else: - text = 'neighbors (switch with G)' + text = "neighbors (switch with G)" mlab.text(0.01, 0.01, text, color=(0, 0, 0), width=0.3) mlab.orientation_axes() @@ -364,17 +366,17 @@ def show_input_batch(batch): def keyboard_callback(vtk_obj, event): global b_i, l_i, neighb_i, show_pools - if vtk_obj.GetKeyCode() in ['[', '{']: + if vtk_obj.GetKeyCode() in ["[", "{"]: b_i = (b_i - 1) % len(all_points[l_i]) neighb_i = 0 update_scene() - elif vtk_obj.GetKeyCode() in [']', '}']: + elif vtk_obj.GetKeyCode() in ["]", "}"]: b_i = (b_i + 1) % len(all_points[l_i]) neighb_i = 0 update_scene() - elif vtk_obj.GetKeyCode() in [',', '<']: + elif vtk_obj.GetKeyCode() in [",", "<"]: if show_pools: l_i = (l_i - 1) % (len(all_points) - 1) else: @@ -382,7 +384,7 @@ def show_input_batch(batch): neighb_i = 0 update_scene() - elif vtk_obj.GetKeyCode() in ['.', '>']: + elif vtk_obj.GetKeyCode() in [".", ">"]: if show_pools: l_i = (l_i + 1) % (len(all_points) - 1) else: @@ -390,15 +392,15 @@ def show_input_batch(batch): neighb_i = 0 update_scene() - elif vtk_obj.GetKeyCode() in ['n', 'N']: + elif vtk_obj.GetKeyCode() in ["n", "N"]: neighb_i = (neighb_i - 1) % all_points[l_i][b_i].shape[0] update_scene() - elif vtk_obj.GetKeyCode() in ['m', 'M']: + elif vtk_obj.GetKeyCode() in ["m", "M"]: neighb_i = (neighb_i + 1) % all_points[l_i][b_i].shape[0] update_scene() - elif vtk_obj.GetKeyCode() in ['g', 'G']: + elif vtk_obj.GetKeyCode() in ["g", "G"]: if l_i < len(all_points) - 1: show_pools = not show_pools neighb_i = 0 @@ -408,29 +410,5 @@ def show_input_batch(batch): # Draw a first plot update_scene() - fig1.scene.interactor.add_observer('KeyPressEvent', keyboard_callback) + fig1.scene.interactor.add_observer("KeyPressEvent", keyboard_callback) mlab.show() - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/utils/metrics.py b/utils/metrics.py index 22e6bfb..2c79b32 100644 --- a/utils/metrics.py +++ b/utils/metrics.py @@ -32,6 +32,7 @@ import numpy as np # \***************/ # + def fast_confusion(true, pred, label_values=None): """ Fast confusion matrix (100x faster than Scikit learn). But only works if labels are la @@ -45,13 +46,25 @@ def fast_confusion(true, pred, label_values=None): true = np.squeeze(true) pred = np.squeeze(pred) if len(true.shape) != 1: - raise ValueError('Truth values are stored in a {:d}D array instead of 1D array'. format(len(true.shape))) + raise ValueError( + "Truth values are stored in a {:d}D array instead of 1D array".format( + len(true.shape) + ) + ) if len(pred.shape) != 1: - raise ValueError('Prediction values are stored in a {:d}D array instead of 1D array'. format(len(pred.shape))) + raise ValueError( + "Prediction values are stored in a {:d}D array instead of 1D array".format( + len(pred.shape) + ) + ) if true.dtype not in [np.int32, np.int64]: - raise ValueError('Truth values are {:s} instead of int32 or int64'.format(true.dtype)) + raise ValueError( + "Truth values are {:s} instead of int32 or int64".format(true.dtype) + ) if pred.dtype not in [np.int32, np.int64]: - raise ValueError('Prediction values are {:s} instead of int32 or int64'.format(pred.dtype)) + raise ValueError( + "Prediction values are {:s} instead of int32 or int64".format(pred.dtype) + ) true = true.astype(np.int32) pred = pred.astype(np.int32) @@ -62,9 +75,13 @@ def fast_confusion(true, pred, label_values=None): else: # Ensure they are good if given if label_values.dtype not in [np.int32, np.int64]: - raise ValueError('label values are {:s} instead of int32 or int64'.format(label_values.dtype)) + raise ValueError( + "label values are {:s} instead of int32 or int64".format( + label_values.dtype + ) + ) if len(np.unique(label_values)) < len(label_values): - raise ValueError('Given labels are not unique') + raise ValueError("Given labels are not unique") # Sort labels label_values = np.sort(label_values) @@ -72,33 +89,32 @@ def fast_confusion(true, pred, label_values=None): # Get the number of classes num_classes = len(label_values) - #print(num_classes) - #print(label_values) - #print(np.max(true)) - #print(np.max(pred)) - #print(np.max(true * num_classes + pred)) + # print(num_classes) + # print(label_values) + # print(np.max(true)) + # print(np.max(pred)) + # print(np.max(true * num_classes + pred)) # Start confusion computations if label_values[0] == 0 and label_values[-1] == num_classes - 1: - # Vectorized confusion vec_conf = np.bincount(true * num_classes + pred) # Add possible missing values due to classes not being in pred or true - #print(vec_conf.shape) - if vec_conf.shape[0] < num_classes ** 2: - vec_conf = np.pad(vec_conf, (0, num_classes ** 2 - vec_conf.shape[0]), 'constant') - #print(vec_conf.shape) + # print(vec_conf.shape) + if vec_conf.shape[0] < num_classes**2: + vec_conf = np.pad( + vec_conf, (0, num_classes**2 - vec_conf.shape[0]), "constant" + ) + # print(vec_conf.shape) # Reshape confusion in a matrix return vec_conf.reshape((num_classes, num_classes)) - else: - # Ensure no negative classes if label_values[0] < 0: - raise ValueError('Unsupported negative classes') + raise ValueError("Unsupported negative classes") # Get the data in [0,num_classes[ label_map = np.zeros((label_values[-1] + 1,), dtype=np.int32) @@ -112,12 +128,15 @@ def fast_confusion(true, pred, label_values=None): vec_conf = np.bincount(true * num_classes + pred) # Add possible missing values due to classes not being in pred or true - if vec_conf.shape[0] < num_classes ** 2: - vec_conf = np.pad(vec_conf, (0, num_classes ** 2 - vec_conf.shape[0]), 'constant') + if vec_conf.shape[0] < num_classes**2: + vec_conf = np.pad( + vec_conf, (0, num_classes**2 - vec_conf.shape[0]), "constant" + ) # Reshape confusion in a matrix return vec_conf.reshape((num_classes, num_classes)) + def metrics(confusions, ignore_unclassified=False): """ Computes different metrics from confusion matrices. @@ -128,7 +147,7 @@ def metrics(confusions, ignore_unclassified=False): """ # If the first class (often "unclassified") should be ignored, erase it from the confusion. - if (ignore_unclassified): + if ignore_unclassified: confusions[..., 0, :] = 0 confusions[..., :, 0] = 0 @@ -176,7 +195,9 @@ def smooth_metrics(confusions, smooth_n=0, ignore_unclassified=False): for epoch in range(confusions.shape[-3]): i0 = max(epoch - smooth_n, 0) i1 = min(epoch + smooth_n + 1, confusions.shape[-3]) - smoothed_confusions[..., epoch, :, :] = np.sum(confusions[..., i0:i1, :, :], axis=-3) + smoothed_confusions[..., epoch, :, :] = np.sum( + confusions[..., i0:i1, :, :], axis=-3 + ) # Compute TP, FP, FN. This assume that the second to last axis counts the truths (like the first axis of a # confusion matrix), and that the last axis counts the predictions (like the second axis of a confusion matrix) diff --git a/utils/ply.py b/utils/ply.py index 0f5bfd3..8bcc395 100644 --- a/utils/ply.py +++ b/utils/ply.py @@ -28,28 +28,29 @@ import sys # Define PLY types -ply_dtypes = dict([ - (b'int8', 'i1'), - (b'char', 'i1'), - (b'uint8', 'u1'), - (b'uchar', 'u1'), - (b'int16', 'i2'), - (b'short', 'i2'), - (b'uint16', 'u2'), - (b'ushort', 'u2'), - (b'int32', 'i4'), - (b'int', 'i4'), - (b'uint32', 'u4'), - (b'uint', 'u4'), - (b'float32', 'f4'), - (b'float', 'f4'), - (b'float64', 'f8'), - (b'double', 'f8') -]) +ply_dtypes = dict( + [ + (b"int8", "i1"), + (b"char", "i1"), + (b"uint8", "u1"), + (b"uchar", "u1"), + (b"int16", "i2"), + (b"short", "i2"), + (b"uint16", "u2"), + (b"ushort", "u2"), + (b"int32", "i4"), + (b"int", "i4"), + (b"uint32", "u4"), + (b"uint", "u4"), + (b"float32", "f4"), + (b"float", "f4"), + (b"float64", "f8"), + (b"double", "f8"), + ] +) # Numpy reader format -valid_formats = {'ascii': '', 'binary_big_endian': '>', - 'binary_little_endian': '<'} +valid_formats = {"ascii": "", "binary_big_endian": ">", "binary_little_endian": "<"} # ---------------------------------------------------------------------------------------------------------------------- @@ -65,14 +66,14 @@ def parse_header(plyfile, ext): properties = [] num_points = None - while b'end_header' not in line and line != b'': + while b"end_header" not in line and line != b"": line = plyfile.readline() - if b'element' in line: + if b"element" in line: line = line.split() num_points = int(line[2]) - elif b'property' in line: + elif b"property" in line: line = line.split() properties.append((line[2].decode(), ext + ply_dtypes[line[1]])) @@ -87,28 +88,27 @@ def parse_mesh_header(plyfile, ext): num_faces = None current_element = None - - while b'end_header' not in line and line != b'': + while b"end_header" not in line and line != b"": line = plyfile.readline() # Find point element - if b'element vertex' in line: - current_element = 'vertex' + if b"element vertex" in line: + current_element = "vertex" line = line.split() num_points = int(line[2]) - elif b'element face' in line: - current_element = 'face' + elif b"element face" in line: + current_element = "face" line = line.split() num_faces = int(line[2]) - elif b'property' in line: - if current_element == 'vertex': + elif b"property" in line: + if current_element == "vertex": line = line.split() vertex_properties.append((line[2].decode(), ext + ply_dtypes[line[1]])) - elif current_element == 'vertex': - if not line.startswith('property list uchar int'): - raise ValueError('Unsupported faces property : ' + line) + elif current_element == "vertex": + if not line.startswith("property list uchar int"): + raise ValueError("Unsupported faces property : " + line) return num_points, num_faces, vertex_properties @@ -140,7 +140,7 @@ def read_ply(filename, triangular_mesh=False): >>> data = read_ply('example.ply') >>> values = data['values'] array([0, 0, 1, 1, 0]) - + >>> points = np.vstack((data['x'], data['y'], data['z'])).T array([[ 0.466 0.595 0.324] [ 0.538 0.407 0.654] @@ -150,24 +150,21 @@ def read_ply(filename, triangular_mesh=False): """ - with open(filename, 'rb') as plyfile: - - + with open(filename, "rb") as plyfile: # Check if the file start with ply - if b'ply' not in plyfile.readline(): - raise ValueError('The file does not start whith the word ply') + if b"ply" not in plyfile.readline(): + raise ValueError("The file does not start whith the word ply") # get binary_little/big or ascii fmt = plyfile.readline().split()[1].decode() if fmt == "ascii": - raise ValueError('The file is not binary') + raise ValueError("The file is not binary") # get extension for building the numpy dtypes ext = valid_formats[fmt] # PointCloud reader vs mesh reader if triangular_mesh: - # Parse header num_points, num_faces, properties = parse_mesh_header(plyfile, ext) @@ -175,18 +172,19 @@ def read_ply(filename, triangular_mesh=False): vertex_data = np.fromfile(plyfile, dtype=properties, count=num_points) # Get face data - face_properties = [('k', ext + 'u1'), - ('v1', ext + 'i4'), - ('v2', ext + 'i4'), - ('v3', ext + 'i4')] + face_properties = [ + ("k", ext + "u1"), + ("v1", ext + "i4"), + ("v2", ext + "i4"), + ("v3", ext + "i4"), + ] faces_data = np.fromfile(plyfile, dtype=face_properties, count=num_faces) # Return vertex data and concatenated faces - faces = np.vstack((faces_data['v1'], faces_data['v2'], faces_data['v3'])).T + faces = np.vstack((faces_data["v1"], faces_data["v2"], faces_data["v3"])).T data = [vertex_data, faces] else: - # Parse header num_points, properties = parse_header(plyfile, ext) @@ -197,18 +195,17 @@ def read_ply(filename, triangular_mesh=False): def header_properties(field_list, field_names): - # List of lines to write lines = [] # First line describing element vertex - lines.append('element vertex %d' % field_list[0].shape[0]) + lines.append("element vertex %d" % field_list[0].shape[0]) # Properties lines i = 0 for fields in field_list: for field in fields.T: - lines.append('property %s %s' % (field.dtype.name, field_names[i])) + lines.append("property %s %s" % (field.dtype.name, field_names[i])) i += 1 return lines @@ -221,16 +218,16 @@ def write_ply(filename, field_list, field_names, triangular_faces=None): Parameters ---------- filename : string - the name of the file to which the data is saved. A '.ply' extension will be appended to the + the name of the file to which the data is saved. A '.ply' extension will be appended to the file name if it does no already have one. field_list : list, tuple, numpy array - the fields to be saved in the ply file. Either a numpy array, a list of numpy arrays or a - tuple of numpy arrays. Each 1D numpy array and each column of 2D numpy arrays are considered - as one field. + the fields to be saved in the ply file. Either a numpy array, a list of numpy arrays or a + tuple of numpy arrays. Each 1D numpy array and each column of 2D numpy arrays are considered + as one field. field_names : list - the name of each fields as a list of strings. Has to be the same length as the number of + the name of each fields as a list of strings. Has to be the same length as the number of fields. Examples @@ -248,57 +245,59 @@ def write_ply(filename, field_list, field_names, triangular_faces=None): """ # Format list input to the right form - field_list = list(field_list) if (type(field_list) == list or type(field_list) == tuple) else list((field_list,)) + field_list = ( + list(field_list) + if (type(field_list) == list or type(field_list) == tuple) + else list((field_list,)) + ) for i, field in enumerate(field_list): if field.ndim < 2: field_list[i] = field.reshape(-1, 1) if field.ndim > 2: - print('fields have more than 2 dimensions') - return False + print("fields have more than 2 dimensions") + return False # check all fields have the same number of data n_points = [field.shape[0] for field in field_list] if not np.all(np.equal(n_points, n_points[0])): - print('wrong field dimensions') - return False + print("wrong field dimensions") + return False # Check if field_names and field_list have same nb of column n_fields = np.sum([field.shape[1] for field in field_list]) - if (n_fields != len(field_names)): - print('wrong number of field names') + if n_fields != len(field_names): + print("wrong number of field names") return False # Add extension if not there - if not filename.endswith('.ply'): - filename += '.ply' + if not filename.endswith(".ply"): + filename += ".ply" # open in text mode to write the header - with open(filename, 'w') as plyfile: - + with open(filename, "w") as plyfile: # First magical word - header = ['ply'] + header = ["ply"] # Encoding format - header.append('format binary_' + sys.byteorder + '_endian 1.0') + header.append("format binary_" + sys.byteorder + "_endian 1.0") # Points properties description header.extend(header_properties(field_list, field_names)) # Add faces if needded if triangular_faces is not None: - header.append('element face {:d}'.format(triangular_faces.shape[0])) - header.append('property list uchar int vertex_indices') + header.append("element face {:d}".format(triangular_faces.shape[0])) + header.append("property list uchar int vertex_indices") # End of header - header.append('end_header') + header.append("end_header") # Write all lines for line in header: plyfile.write("%s\n" % line) # open in binary/append to use tofile - with open(filename, 'ab') as plyfile: - + with open(filename, "ab") as plyfile: # Create a structured array i = 0 type_list = [] @@ -317,19 +316,19 @@ def write_ply(filename, field_list, field_names, triangular_faces=None): if triangular_faces is not None: triangular_faces = triangular_faces.astype(np.int32) - type_list = [('k', 'uint8')] + [(str(ind), 'int32') for ind in range(3)] + type_list = [("k", "uint8")] + [(str(ind), "int32") for ind in range(3)] data = np.empty(triangular_faces.shape[0], dtype=type_list) - data['k'] = np.full((triangular_faces.shape[0],), 3, dtype=np.uint8) - data['0'] = triangular_faces[:, 0] - data['1'] = triangular_faces[:, 1] - data['2'] = triangular_faces[:, 2] + data["k"] = np.full((triangular_faces.shape[0],), 3, dtype=np.uint8) + data["0"] = triangular_faces[:, 0] + data["1"] = triangular_faces[:, 1] + data["2"] = triangular_faces[:, 2] data.tofile(plyfile) return True def describe_element(name, df): - """ Takes the columns of the dataframe and builds a ply-like description + """Takes the columns of the dataframe and builds a ply-like description Parameters ---------- @@ -340,16 +339,16 @@ def describe_element(name, df): ------- element: list[str] """ - property_formats = {'f': 'float', 'u': 'uchar', 'i': 'int'} - element = ['element ' + name + ' ' + str(len(df))] + property_formats = {"f": "float", "u": "uchar", "i": "int"} + element = ["element " + name + " " + str(len(df))] - if name == 'face': + if name == "face": element.append("property list uchar int points_indices") else: for i in range(len(df.columns)): # get first letter of dtype to infer format f = property_formats[str(df.dtypes[i])[0]] - element.append('property ' + f + ' ' + df.columns.values[i]) + element.append("property " + f + " " + df.columns.values[i]) - return element \ No newline at end of file + return element diff --git a/utils/tester.py b/utils/tester.py index f2f0357..4c1e96a 100644 --- a/utils/tester.py +++ b/utils/tester.py @@ -24,22 +24,18 @@ # Basic libs import torch -import torch.nn as nn import numpy as np -from os import makedirs, listdir +from os import makedirs from os.path import exists, join import time -import json -from sklearn.neighbors import KDTree # PLY reader -from utils.ply import read_ply, write_ply +from utils.ply import write_ply # Metrics from utils.metrics import IoU_from_confusions, fast_confusion -from sklearn.metrics import confusion_matrix -#from utils.visualizer import show_ModelNet_models +# from utils.visualizer import show_ModelNet_models # ---------------------------------------------------------------------------------------------------------------------- # @@ -49,12 +45,10 @@ from sklearn.metrics import confusion_matrix class ModelTester: - # Initialization methods # ------------------------------------------------------------------------------------------------------------------ def __init__(self, net, chkp_path=None, on_gpu=True): - ############ # Parameters ############ @@ -71,8 +65,8 @@ class ModelTester: ########################## checkpoint = torch.load(chkp_path) - net.load_state_dict(checkpoint['model_state_dict']) - self.epoch = checkpoint['epoch'] + net.load_state_dict(checkpoint["model_state_dict"]) + self.epoch = checkpoint["epoch"] net.eval() print("Model and training state restored.") @@ -82,7 +76,6 @@ class ModelTester: # ------------------------------------------------------------------------------------------------------------------ def classification_test(self, net, test_loader, config, num_votes=100, debug=False): - ############ # Initialize ############ @@ -91,7 +84,6 @@ class ModelTester: softmax = torch.nn.Softmax(1) # Number of classes including ignored labels - nc_tot = test_loader.dataset.num_classes # Number of classes predicted by the model nc_model = config.num_classes @@ -104,7 +96,6 @@ class ModelTester: mean_dt = np.zeros(1) last_display = time.time() while np.min(self.test_counts) < num_votes: - # Run model on all test examples # ****************************** @@ -115,12 +106,11 @@ class ModelTester: # Start validation loop for batch in test_loader: - # New time t = t[-1:] t += [time.time()] - if 'cuda' in self.device.type: + if "cuda" in self.device.type: batch.to(self.device) # Forward pass @@ -131,7 +121,7 @@ class ModelTester: targets += [batch.labels.cpu().numpy()] obj_inds += [batch.model_inds.cpu().numpy()] - if 'cuda' in self.device.type: + if "cuda" in self.device.type: torch.cuda.synchronize(self.device) # Average timing @@ -141,22 +131,28 @@ class ModelTester: # Display if (t[-1] - last_display) > 1.0: last_display = t[-1] - message = 'Test vote {:.0f} : {:.1f}% (timings : {:4.2f} {:4.2f})' - print(message.format(np.min(self.test_counts), - 100 * len(obj_inds) / config.validation_size, - 1000 * (mean_dt[0]), - 1000 * (mean_dt[1]))) + message = "Test vote {:.0f} : {:.1f}% (timings : {:4.2f} {:4.2f})" + print( + message.format( + np.min(self.test_counts), + 100 * len(obj_inds) / config.validation_size, + 1000 * (mean_dt[0]), + 1000 * (mean_dt[1]), + ) + ) # Stack all validation predictions probs = np.vstack(probs) targets = np.hstack(targets) obj_inds = np.hstack(obj_inds) if np.any(test_loader.dataset.input_labels[obj_inds] != targets): - raise ValueError('wrong object indices') + raise ValueError("wrong object indices") # Compute incremental average (predictions are always ordered) self.test_counts[obj_inds] += 1 - self.test_probs[obj_inds] += (probs - self.test_probs[obj_inds]) / (self.test_counts[obj_inds]) + self.test_probs[obj_inds] += (probs - self.test_probs[obj_inds]) / ( + self.test_counts[obj_inds] + ) # Save/Display temporary results # ****************************** @@ -164,16 +160,20 @@ class ModelTester: test_labels = np.array(test_loader.dataset.label_values) # Compute classification results - C1 = fast_confusion(test_loader.dataset.input_labels, - np.argmax(self.test_probs, axis=1), - test_labels) + C1 = fast_confusion( + test_loader.dataset.input_labels, + np.argmax(self.test_probs, axis=1), + test_labels, + ) ACC = 100 * np.sum(np.diag(C1)) / (np.sum(C1) + 1e-6) - print('Test Accuracy = {:.1f}%'.format(ACC)) + print("Test Accuracy = {:.1f}%".format(ACC)) return - def cloud_segmentation_test(self, net, test_loader, config, num_votes=100, debug=False): + def cloud_segmentation_test( + self, net, test_loader, config, num_votes=100, debug=False + ): """ Test method for cloud segmentation models """ @@ -188,36 +188,41 @@ class ModelTester: softmax = torch.nn.Softmax(1) # Number of classes including ignored labels - nc_tot = test_loader.dataset.num_classes # Number of classes predicted by the model nc_model = config.num_classes # Initiate global prediction over test clouds - self.test_probs = [np.zeros((l.shape[0], nc_model)) for l in test_loader.dataset.input_labels] + self.test_probs = [ + np.zeros((l.shape[0], nc_model)) for l in test_loader.dataset.input_labels + ] # Test saving path if config.saving: - test_path = join('test', config.saving_path.split('/')[-1]) + test_path = join("test", config.saving_path.split("/")[-1]) if not exists(test_path): makedirs(test_path) - if not exists(join(test_path, 'predictions')): - makedirs(join(test_path, 'predictions')) - if not exists(join(test_path, 'probs')): - makedirs(join(test_path, 'probs')) - if not exists(join(test_path, 'potentials')): - makedirs(join(test_path, 'potentials')) + if not exists(join(test_path, "predictions")): + makedirs(join(test_path, "predictions")) + if not exists(join(test_path, "probs")): + makedirs(join(test_path, "probs")) + if not exists(join(test_path, "potentials")): + makedirs(join(test_path, "potentials")) else: test_path = None # If on validation directly compute score - if test_loader.dataset.set == 'validation': + if test_loader.dataset.set == "validation": val_proportions = np.zeros(nc_model, dtype=np.float32) i = 0 for label_value in test_loader.dataset.label_values: if label_value not in test_loader.dataset.ignored_labels: - val_proportions[i] = np.sum([np.sum(labels == label_value) - for labels in test_loader.dataset.validation_labels]) + val_proportions[i] = np.sum( + [ + np.sum(labels == label_value) + for labels in test_loader.dataset.validation_labels + ] + ) i += 1 else: val_proportions = None @@ -235,17 +240,16 @@ class ModelTester: # Start test loop while True: - print('Initialize workers') + print("Initialize workers") for i, batch in enumerate(test_loader): - # New time t = t[-1:] t += [time.time()] if i == 0: - print('Done in {:.1f}s'.format(t[1] - t[0])) + print("Done in {:.1f}s".format(t[1] - t[0])) - if 'cuda' in self.device.type: + if "cuda" in self.device.type: batch.to(self.device) # Forward pass @@ -266,20 +270,25 @@ class ModelTester: i0 = 0 for b_i, length in enumerate(lengths): - # Get prediction - points = s_points[i0:i0 + length] - probs = stacked_probs[i0:i0 + length] - inds = in_inds[i0:i0 + length] + points = s_points[i0 : i0 + length] + probs = stacked_probs[i0 : i0 + length] + inds = in_inds[i0 : i0 + length] c_i = cloud_inds[b_i] if 0 < test_radius_ratio < 1: - mask = np.sum(points ** 2, axis=1) < (test_radius_ratio * config.in_radius) ** 2 + mask = ( + np.sum(points**2, axis=1) + < (test_radius_ratio * config.in_radius) ** 2 + ) inds = inds[mask] probs = probs[mask] # Update current probs in whole cloud - self.test_probs[c_i][inds] = test_smooth * self.test_probs[c_i][inds] + (1 - test_smooth) * probs + self.test_probs[c_i][inds] = ( + test_smooth * self.test_probs[c_i][inds] + + (1 - test_smooth) * probs + ) i0 += length # Average timing @@ -292,50 +301,69 @@ class ModelTester: # Display if (t[-1] - last_display) > 1.0: last_display = t[-1] - message = 'e{:03d}-i{:04d} => {:.1f}% (timings : {:4.2f} {:4.2f} {:4.2f})' - print(message.format(test_epoch, i, - 100 * i / config.validation_size, - 1000 * (mean_dt[0]), - 1000 * (mean_dt[1]), - 1000 * (mean_dt[2]))) + message = ( + "e{:03d}-i{:04d} => {:.1f}% (timings : {:4.2f} {:4.2f} {:4.2f})" + ) + print( + message.format( + test_epoch, + i, + 100 * i / config.validation_size, + 1000 * (mean_dt[0]), + 1000 * (mean_dt[1]), + 1000 * (mean_dt[2]), + ) + ) # Update minimum od potentials new_min = torch.min(test_loader.dataset.min_potentials) - print('Test epoch {:d}, end. Min potential = {:.1f}'.format(test_epoch, new_min)) - #print([np.mean(pots) for pots in test_loader.dataset.potentials]) + print( + "Test epoch {:d}, end. Min potential = {:.1f}".format( + test_epoch, new_min + ) + ) + # print([np.mean(pots) for pots in test_loader.dataset.potentials]) # Save predicted cloud if last_min + 1 < new_min: - # Update last_min last_min += 1 # Show vote results (On subcloud so it is not the good values here) - if test_loader.dataset.set == 'validation': - print('\nConfusion on sub clouds') + if test_loader.dataset.set == "validation": + print("\nConfusion on sub clouds") Confs = [] for i, file_path in enumerate(test_loader.dataset.files): - # Insert false columns for ignored labels probs = np.array(self.test_probs[i], copy=True) - for l_ind, label_value in enumerate(test_loader.dataset.label_values): + for l_ind, label_value in enumerate( + test_loader.dataset.label_values + ): if label_value in test_loader.dataset.ignored_labels: probs = np.insert(probs, l_ind, 0, axis=1) # Predicted labels - preds = test_loader.dataset.label_values[np.argmax(probs, axis=1)].astype(np.int32) + preds = test_loader.dataset.label_values[ + np.argmax(probs, axis=1) + ].astype(np.int32) # Targets targets = test_loader.dataset.input_labels[i] # Confs - Confs += [fast_confusion(targets, preds, test_loader.dataset.label_values)] + Confs += [ + fast_confusion( + targets, preds, test_loader.dataset.label_values + ) + ] # Regroup confusions C = np.sum(np.stack(Confs), axis=0).astype(np.float32) # Remove ignored labels from confusions - for l_ind, label_value in reversed(list(enumerate(test_loader.dataset.label_values))): + for l_ind, label_value in reversed( + list(enumerate(test_loader.dataset.label_values)) + ): if label_value in test_loader.dataset.ignored_labels: C = np.delete(C, l_ind, axis=0) C = np.delete(C, l_ind, axis=1) @@ -346,20 +374,18 @@ class ModelTester: # Compute IoUs IoUs = IoU_from_confusions(C) mIoU = np.mean(IoUs) - s = '{:5.2f} | '.format(100 * mIoU) + s = "{:5.2f} | ".format(100 * mIoU) for IoU in IoUs: - s += '{:5.2f} '.format(100 * IoU) - print(s + '\n') + s += "{:5.2f} ".format(100 * IoU) + print(s + "\n") # Save real IoU once in a while if int(np.ceil(new_min)) % 10 == 0: - # Project predictions - print('\nReproject Vote #{:d}'.format(int(np.floor(new_min)))) + print("\nReproject Vote #{:d}".format(int(np.floor(new_min)))) t1 = time.time() proj_probs = [] for i, file_path in enumerate(test_loader.dataset.files): - # print(i, file_path, test_loader.dataset.test_proj[i].shape, self.test_probs[i].shape) # print(test_loader.dataset.test_proj[i].dtype, np.max(test_loader.dataset.test_proj[i])) @@ -370,90 +396,116 @@ class ModelTester: proj_probs += [probs] # Insert false columns for ignored labels - for l_ind, label_value in enumerate(test_loader.dataset.label_values): + for l_ind, label_value in enumerate( + test_loader.dataset.label_values + ): if label_value in test_loader.dataset.ignored_labels: - proj_probs[i] = np.insert(proj_probs[i], l_ind, 0, axis=1) + proj_probs[i] = np.insert( + proj_probs[i], l_ind, 0, axis=1 + ) t2 = time.time() - print('Done in {:.1f} s\n'.format(t2 - t1)) + print("Done in {:.1f} s\n".format(t2 - t1)) # Show vote results - if test_loader.dataset.set == 'validation': - print('Confusion on full clouds') + if test_loader.dataset.set == "validation": + print("Confusion on full clouds") t1 = time.time() Confs = [] for i, file_path in enumerate(test_loader.dataset.files): - # Get the predicted labels - preds = test_loader.dataset.label_values[np.argmax(proj_probs[i], axis=1)].astype(np.int32) + preds = test_loader.dataset.label_values[ + np.argmax(proj_probs[i], axis=1) + ].astype(np.int32) # Confusion targets = test_loader.dataset.validation_labels[i] - Confs += [fast_confusion(targets, preds, test_loader.dataset.label_values)] + Confs += [ + fast_confusion( + targets, preds, test_loader.dataset.label_values + ) + ] t2 = time.time() - print('Done in {:.1f} s\n'.format(t2 - t1)) + print("Done in {:.1f} s\n".format(t2 - t1)) # Regroup confusions C = np.sum(np.stack(Confs), axis=0) # Remove ignored labels from confusions - for l_ind, label_value in reversed(list(enumerate(test_loader.dataset.label_values))): + for l_ind, label_value in reversed( + list(enumerate(test_loader.dataset.label_values)) + ): if label_value in test_loader.dataset.ignored_labels: C = np.delete(C, l_ind, axis=0) C = np.delete(C, l_ind, axis=1) IoUs = IoU_from_confusions(C) mIoU = np.mean(IoUs) - s = '{:5.2f} | '.format(100 * mIoU) + s = "{:5.2f} | ".format(100 * mIoU) for IoU in IoUs: - s += '{:5.2f} '.format(100 * IoU) - print('-' * len(s)) + s += "{:5.2f} ".format(100 * IoU) + print("-" * len(s)) print(s) - print('-' * len(s) + '\n') + print("-" * len(s) + "\n") # Save predictions - print('Saving clouds') + print("Saving clouds") t1 = time.time() for i, file_path in enumerate(test_loader.dataset.files): - # Get file points = test_loader.dataset.load_evaluation_points(file_path) # Get the predicted labels - preds = test_loader.dataset.label_values[np.argmax(proj_probs[i], axis=1)].astype(np.int32) + preds = test_loader.dataset.label_values[ + np.argmax(proj_probs[i], axis=1) + ].astype(np.int32) # Save plys - cloud_name = file_path.split('/')[-1] - test_name = join(test_path, 'predictions', cloud_name) - write_ply(test_name, - [points, preds], - ['x', 'y', 'z', 'preds']) - test_name2 = join(test_path, 'probs', cloud_name) - prob_names = ['_'.join(test_loader.dataset.label_to_names[label].split()) - for label in test_loader.dataset.label_values] - write_ply(test_name2, - [points, proj_probs[i]], - ['x', 'y', 'z'] + prob_names) + cloud_name = file_path.split("/")[-1] + test_name = join(test_path, "predictions", cloud_name) + write_ply(test_name, [points, preds], ["x", "y", "z", "preds"]) + test_name2 = join(test_path, "probs", cloud_name) + prob_names = [ + "_".join(test_loader.dataset.label_to_names[label].split()) + for label in test_loader.dataset.label_values + ] + write_ply( + test_name2, + [points, proj_probs[i]], + ["x", "y", "z"] + prob_names, + ) # Save potentials - pot_points = np.array(test_loader.dataset.pot_trees[i].data, copy=False) - pot_name = join(test_path, 'potentials', cloud_name) - pots = test_loader.dataset.potentials[i].numpy().astype(np.float32) - write_ply(pot_name, - [pot_points.astype(np.float32), pots], - ['x', 'y', 'z', 'pots']) + pot_points = np.array( + test_loader.dataset.pot_trees[i].data, copy=False + ) + pot_name = join(test_path, "potentials", cloud_name) + pots = ( + test_loader.dataset.potentials[i].numpy().astype(np.float32) + ) + write_ply( + pot_name, + [pot_points.astype(np.float32), pots], + ["x", "y", "z", "pots"], + ) # Save ascii preds - if test_loader.dataset.set == 'test': - if test_loader.dataset.name.startswith('Semantic3D'): - ascii_name = join(test_path, 'predictions', test_loader.dataset.ascii_files[cloud_name]) + if test_loader.dataset.set == "test": + if test_loader.dataset.name.startswith("Semantic3D"): + ascii_name = join( + test_path, + "predictions", + test_loader.dataset.ascii_files[cloud_name], + ) else: - ascii_name = join(test_path, 'predictions', cloud_name[:-4] + '.txt') - np.savetxt(ascii_name, preds, fmt='%d') + ascii_name = join( + test_path, "predictions", cloud_name[:-4] + ".txt" + ) + np.savetxt(ascii_name, preds, fmt="%d") t2 = time.time() - print('Done in {:.1f} s\n'.format(t2 - t1)) + print("Done in {:.1f} s\n".format(t2 - t1)) test_epoch += 1 @@ -463,7 +515,9 @@ class ModelTester: return - def slam_segmentation_test(self, net, test_loader, config, num_votes=100, debug=True): + def slam_segmentation_test( + self, net, test_loader, config, num_votes=100, debug=True + ): """ Test method for slam segmentation models """ @@ -485,29 +539,31 @@ class ModelTester: test_path = None report_path = None if config.saving: - test_path = join('test', config.saving_path.split('/')[-1]) + test_path = join("test", config.saving_path.split("/")[-1]) if not exists(test_path): makedirs(test_path) - report_path = join(test_path, 'reports') + report_path = join(test_path, "reports") if not exists(report_path): makedirs(report_path) - if test_loader.dataset.set == 'validation': - for folder in ['val_predictions', 'val_probs']: + if test_loader.dataset.set == "validation": + for folder in ["val_predictions", "val_probs"]: if not exists(join(test_path, folder)): makedirs(join(test_path, folder)) else: - for folder in ['predictions', 'probs']: + for folder in ["predictions", "probs"]: if not exists(join(test_path, folder)): makedirs(join(test_path, folder)) # Init validation container all_f_preds = [] all_f_labels = [] - if test_loader.dataset.set == 'validation': + if test_loader.dataset.set == "validation": for i, seq_frames in enumerate(test_loader.dataset.frames): all_f_preds.append([np.zeros((0,), dtype=np.int32) for _ in seq_frames]) - all_f_labels.append([np.zeros((0,), dtype=np.int32) for _ in seq_frames]) + all_f_labels.append( + [np.zeros((0,), dtype=np.int32) for _ in seq_frames] + ) ##################### # Network predictions @@ -523,17 +579,16 @@ class ModelTester: # Start test loop while True: - print('Initialize workers') + print("Initialize workers") for i, batch in enumerate(test_loader): - # New time t = t[-1:] t += [time.time()] if i == 0: - print('Done in {:.1f}s'.format(t[1] - t[0])) + print("Done in {:.1f}s".format(t[1] - t[0])) - if 'cuda' in self.device.type: + if "cuda" in self.device.type: batch.to(self.device) # Forward pass @@ -555,9 +610,8 @@ class ModelTester: i0 = 0 for b_i, length in enumerate(lengths): - # Get prediction - probs = stk_probs[i0:i0 + length] + probs = stk_probs[i0 : i0 + length] proj_inds = r_inds_list[b_i] proj_mask = r_mask_list[b_i] frame_labels = labels_list[b_i] @@ -573,97 +627,151 @@ class ModelTester: # Save probs in a binary file (uint8 format for lighter weight) seq_name = test_loader.dataset.sequences[s_ind] - if test_loader.dataset.set == 'validation': - folder = 'val_probs' - pred_folder = 'val_predictions' + if test_loader.dataset.set == "validation": + folder = "val_probs" + pred_folder = "val_predictions" else: - folder = 'probs' - pred_folder = 'predictions' - filename = '{:s}_{:07d}.npy'.format(seq_name, f_ind) + folder = "probs" + pred_folder = "predictions" + filename = "{:s}_{:07d}.npy".format(seq_name, f_ind) filepath = join(test_path, folder, filename) if exists(filepath): frame_probs_uint8 = np.load(filepath) else: - frame_probs_uint8 = np.zeros((proj_mask.shape[0], nc_model), dtype=np.uint8) - frame_probs = frame_probs_uint8[proj_mask, :].astype(np.float32) / 255 - frame_probs = test_smooth * frame_probs + (1 - test_smooth) * proj_probs - frame_probs_uint8[proj_mask, :] = (frame_probs * 255).astype(np.uint8) + frame_probs_uint8 = np.zeros( + (proj_mask.shape[0], nc_model), dtype=np.uint8 + ) + frame_probs = ( + frame_probs_uint8[proj_mask, :].astype(np.float32) / 255 + ) + frame_probs = ( + test_smooth * frame_probs + (1 - test_smooth) * proj_probs + ) + frame_probs_uint8[proj_mask, :] = (frame_probs * 255).astype( + np.uint8 + ) np.save(filepath, frame_probs_uint8) # Save some prediction in ply format for visual - if test_loader.dataset.set == 'validation': - + if test_loader.dataset.set == "validation": # Insert false columns for ignored labels frame_probs_uint8_bis = frame_probs_uint8.copy() - for l_ind, label_value in enumerate(test_loader.dataset.label_values): + for l_ind, label_value in enumerate( + test_loader.dataset.label_values + ): if label_value in test_loader.dataset.ignored_labels: - frame_probs_uint8_bis = np.insert(frame_probs_uint8_bis, l_ind, 0, axis=1) + frame_probs_uint8_bis = np.insert( + frame_probs_uint8_bis, l_ind, 0, axis=1 + ) # Predicted labels - frame_preds = test_loader.dataset.label_values[np.argmax(frame_probs_uint8_bis, - axis=1)].astype(np.int32) + frame_preds = test_loader.dataset.label_values[ + np.argmax(frame_probs_uint8_bis, axis=1) + ].astype(np.int32) # Save some of the frame pots if f_ind % 20 == 0: - seq_path = join(test_loader.dataset.path, 'sequences', test_loader.dataset.sequences[s_ind]) - velo_file = join(seq_path, 'velodyne', test_loader.dataset.frames[s_ind][f_ind] + '.bin') + seq_path = join( + test_loader.dataset.path, + "sequences", + test_loader.dataset.sequences[s_ind], + ) + velo_file = join( + seq_path, + "velodyne", + test_loader.dataset.frames[s_ind][f_ind] + ".bin", + ) frame_points = np.fromfile(velo_file, dtype=np.float32) frame_points = frame_points.reshape((-1, 4)) - predpath = join(test_path, pred_folder, filename[:-4] + '.ply') - #pots = test_loader.dataset.f_potentials[s_ind][f_ind] + predpath = join( + test_path, pred_folder, filename[:-4] + ".ply" + ) + # pots = test_loader.dataset.f_potentials[s_ind][f_ind] pots = np.zeros((0,)) if pots.shape[0] > 0: - write_ply(predpath, - [frame_points[:, :3], frame_labels, frame_preds, pots], - ['x', 'y', 'z', 'gt', 'pre', 'pots']) + write_ply( + predpath, + [ + frame_points[:, :3], + frame_labels, + frame_preds, + pots, + ], + ["x", "y", "z", "gt", "pre", "pots"], + ) else: - write_ply(predpath, - [frame_points[:, :3], frame_labels, frame_preds], - ['x', 'y', 'z', 'gt', 'pre']) + write_ply( + predpath, + [frame_points[:, :3], frame_labels, frame_preds], + ["x", "y", "z", "gt", "pre"], + ) # Also Save lbl probabilities - probpath = join(test_path, folder, filename[:-4] + '_probs.ply') - lbl_names = [test_loader.dataset.label_to_names[l] - for l in test_loader.dataset.label_values - if l not in test_loader.dataset.ignored_labels] - write_ply(probpath, - [frame_points[:, :3], frame_probs_uint8], - ['x', 'y', 'z'] + lbl_names) + probpath = join( + test_path, folder, filename[:-4] + "_probs.ply" + ) + lbl_names = [ + test_loader.dataset.label_to_names[l] + for l in test_loader.dataset.label_values + if l not in test_loader.dataset.ignored_labels + ] + write_ply( + probpath, + [frame_points[:, :3], frame_probs_uint8], + ["x", "y", "z"] + lbl_names, + ) # keep frame preds in memory all_f_preds[s_ind][f_ind] = frame_preds all_f_labels[s_ind][f_ind] = frame_labels else: - # Save some of the frame preds if f_inds[b_i, 1] % 100 == 0: - # Insert false columns for ignored labels - for l_ind, label_value in enumerate(test_loader.dataset.label_values): + for l_ind, label_value in enumerate( + test_loader.dataset.label_values + ): if label_value in test_loader.dataset.ignored_labels: - frame_probs_uint8 = np.insert(frame_probs_uint8, l_ind, 0, axis=1) + frame_probs_uint8 = np.insert( + frame_probs_uint8, l_ind, 0, axis=1 + ) # Predicted labels - frame_preds = test_loader.dataset.label_values[np.argmax(frame_probs_uint8, - axis=1)].astype(np.int32) + frame_preds = test_loader.dataset.label_values[ + np.argmax(frame_probs_uint8, axis=1) + ].astype(np.int32) # Load points - seq_path = join(test_loader.dataset.path, 'sequences', test_loader.dataset.sequences[s_ind]) - velo_file = join(seq_path, 'velodyne', test_loader.dataset.frames[s_ind][f_ind] + '.bin') + seq_path = join( + test_loader.dataset.path, + "sequences", + test_loader.dataset.sequences[s_ind], + ) + velo_file = join( + seq_path, + "velodyne", + test_loader.dataset.frames[s_ind][f_ind] + ".bin", + ) frame_points = np.fromfile(velo_file, dtype=np.float32) frame_points = frame_points.reshape((-1, 4)) - predpath = join(test_path, pred_folder, filename[:-4] + '.ply') - #pots = test_loader.dataset.f_potentials[s_ind][f_ind] + predpath = join( + test_path, pred_folder, filename[:-4] + ".ply" + ) + # pots = test_loader.dataset.f_potentials[s_ind][f_ind] pots = np.zeros((0,)) if pots.shape[0] > 0: - write_ply(predpath, - [frame_points[:, :3], frame_preds, pots], - ['x', 'y', 'z', 'pre', 'pots']) + write_ply( + predpath, + [frame_points[:, :3], frame_preds, pots], + ["x", "y", "z", "pre", "pots"], + ) else: - write_ply(predpath, - [frame_points[:, :3], frame_preds], - ['x', 'y', 'z', 'pre']) + write_ply( + predpath, + [frame_points[:, :3], frame_preds], + ["x", "y", "z", "pre"], + ) # Stack all prediction for this epoch i0 += length @@ -675,30 +783,45 @@ class ModelTester: # Display if (t[-1] - last_display) > 1.0: last_display = t[-1] - message = 'e{:03d}-i{:04d} => {:.1f}% (timings : {:4.2f} {:4.2f} {:4.2f}) / pots {:d} => {:.1f}%' - min_pot = int(torch.floor(torch.min(test_loader.dataset.potentials))) - pot_num = torch.sum(test_loader.dataset.potentials > min_pot + 0.5).type(torch.int32).item() - current_num = pot_num + (i + 1 - config.validation_size) * config.val_batch_num - print(message.format(test_epoch, i, - 100 * i / config.validation_size, - 1000 * (mean_dt[0]), - 1000 * (mean_dt[1]), - 1000 * (mean_dt[2]), - min_pot, - 100.0 * current_num / len(test_loader.dataset.potentials))) - + message = "e{:03d}-i{:04d} => {:.1f}% (timings : {:4.2f} {:4.2f} {:4.2f}) / pots {:d} => {:.1f}%" + min_pot = int( + torch.floor(torch.min(test_loader.dataset.potentials)) + ) + pot_num = ( + torch.sum(test_loader.dataset.potentials > min_pot + 0.5) + .type(torch.int32) + .item() + ) + current_num = ( + pot_num + + (i + 1 - config.validation_size) * config.val_batch_num + ) + print( + message.format( + test_epoch, + i, + 100 * i / config.validation_size, + 1000 * (mean_dt[0]), + 1000 * (mean_dt[1]), + 1000 * (mean_dt[2]), + min_pot, + 100.0 * current_num / len(test_loader.dataset.potentials), + ) + ) # Update minimum od potentials new_min = torch.min(test_loader.dataset.potentials) - print('Test epoch {:d}, end. Min potential = {:.1f}'.format(test_epoch, new_min)) + print( + "Test epoch {:d}, end. Min potential = {:.1f}".format( + test_epoch, new_min + ) + ) if last_min + 1 < new_min: - # Update last_min last_min += 1 - if test_loader.dataset.set == 'validation' and last_min % 1 == 0: - + if test_loader.dataset.set == "validation" and last_min % 1 == 0: ##################################### # Results on the whole validation set ##################################### @@ -706,13 +829,13 @@ class ModelTester: # Confusions for our subparts of validation set Confs = np.zeros((len(predictions), nc_tot, nc_tot), dtype=np.int32) for i, (preds, truth) in enumerate(zip(predictions, targets)): - # Confusions - Confs[i, :, :] = fast_confusion(truth, preds, test_loader.dataset.label_values).astype(np.int32) - + Confs[i, :, :] = fast_confusion( + truth, preds, test_loader.dataset.label_values + ).astype(np.int32) # Show vote results - print('\nCompute confusion') + print("\nCompute confusion") val_preds = [] val_labels = [] @@ -723,21 +846,25 @@ class ModelTester: val_preds = np.hstack(val_preds) val_labels = np.hstack(val_labels) t2 = time.time() - C_tot = fast_confusion(val_labels, val_preds, test_loader.dataset.label_values) + C_tot = fast_confusion( + val_labels, val_preds, test_loader.dataset.label_values + ) t3 = time.time() - print(' Stacking time : {:.1f}s'.format(t2 - t1)) - print('Confusion time : {:.1f}s'.format(t3 - t2)) + print(" Stacking time : {:.1f}s".format(t2 - t1)) + print("Confusion time : {:.1f}s".format(t3 - t2)) - s1 = '\n' + s1 = "\n" for cc in C_tot: for c in cc: - s1 += '{:7.0f} '.format(c) - s1 += '\n' + s1 += "{:7.0f} ".format(c) + s1 += "\n" if debug: print(s1) # Remove ignored labels from confusions - for l_ind, label_value in reversed(list(enumerate(test_loader.dataset.label_values))): + for l_ind, label_value in reversed( + list(enumerate(test_loader.dataset.label_values)) + ): if label_value in test_loader.dataset.ignored_labels: C_tot = np.delete(C_tot, l_ind, axis=0) C_tot = np.delete(C_tot, l_ind, axis=1) @@ -747,21 +874,23 @@ class ModelTester: # Compute IoUs mIoU = np.mean(val_IoUs) - s2 = '{:5.2f} | '.format(100 * mIoU) + s2 = "{:5.2f} | ".format(100 * mIoU) for IoU in val_IoUs: - s2 += '{:5.2f} '.format(100 * IoU) - print(s2 + '\n') + s2 += "{:5.2f} ".format(100 * IoU) + print(s2 + "\n") # Save a report - report_file = join(report_path, 'report_{:04d}.txt'.format(int(np.floor(last_min)))) - str = 'Report of the confusion and metrics\n' - str += '***********************************\n\n\n' - str += 'Confusion matrix:\n\n' + report_file = join( + report_path, "report_{:04d}.txt".format(int(np.floor(last_min))) + ) + str = "Report of the confusion and metrics\n" + str += "***********************************\n\n\n" + str += "Confusion matrix:\n\n" str += s1 - str += '\nIoU values:\n\n' + str += "\nIoU values:\n\n" str += s2 - str += '\n\n' - with open(report_file, 'w') as f: + str += "\n\n" + with open(report_file, "w") as f: f.write(str) test_epoch += 1 @@ -771,28 +900,3 @@ class ModelTester: break return - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/utils/trainer.py b/utils/trainer.py index 4568a60..55ca273 100644 --- a/utils/trainer.py +++ b/utils/trainer.py @@ -24,24 +24,17 @@ # Basic libs import torch -import torch.nn as nn import numpy as np -import pickle -import os from os import makedirs, remove from os.path import exists, join import time -import sys # PLY reader -from utils.ply import read_ply, write_ply +from utils.ply import write_ply # Metrics from utils.metrics import IoU_from_confusions, fast_confusion from utils.config import Config -from sklearn.neighbors import KDTree - -from models.blocks import KPConv # ---------------------------------------------------------------------------------------------------------------------- @@ -52,7 +45,6 @@ from models.blocks import KPConv class ModelTrainer: - # Initialization methods # ------------------------------------------------------------------------------------------------------------------ @@ -75,14 +67,15 @@ class ModelTrainer: self.step = 0 # Optimizer with specific learning rate for deformable KPConv - deform_params = [v for k, v in net.named_parameters() if 'offset' in k] - other_params = [v for k, v in net.named_parameters() if 'offset' not in k] + deform_params = [v for k, v in net.named_parameters() if "offset" in k] + other_params = [v for k, v in net.named_parameters() if "offset" not in k] deform_lr = config.learning_rate * config.deform_lr_factor - self.optimizer = torch.optim.SGD([{'params': other_params}, - {'params': deform_params, 'lr': deform_lr}], - lr=config.learning_rate, - momentum=config.momentum, - weight_decay=config.weight_decay) + self.optimizer = torch.optim.SGD( + [{"params": other_params}, {"params": deform_params, "lr": deform_lr}], + lr=config.learning_rate, + momentum=config.momentum, + weight_decay=config.weight_decay, + ) # Choose to train on CPU or GPU if on_gpu and torch.cuda.is_available(): @@ -95,24 +88,26 @@ class ModelTrainer: # Load previous checkpoint ########################## - if (chkp_path is not None): + if chkp_path is not None: if finetune: checkpoint = torch.load(chkp_path) - net.load_state_dict(checkpoint['model_state_dict']) + net.load_state_dict(checkpoint["model_state_dict"]) net.train() print("Model restored and ready for finetuning.") else: checkpoint = torch.load(chkp_path) - net.load_state_dict(checkpoint['model_state_dict']) - self.optimizer.load_state_dict(checkpoint['optimizer_state_dict']) - self.epoch = checkpoint['epoch'] + net.load_state_dict(checkpoint["model_state_dict"]) + self.optimizer.load_state_dict(checkpoint["optimizer_state_dict"]) + self.epoch = checkpoint["epoch"] net.train() print("Model and training state restored.") # Path of the result folder if config.saving: if config.saving_path is None: - config.saving_path = time.strftime('results/Log_%Y-%m-%d_%H-%M-%S', time.gmtime()) + config.saving_path = time.strftime( + "results/Log_%Y-%m-%d_%H-%M-%S", time.gmtime() + ) if not exists(config.saving_path): makedirs(config.saving_path) config.save() @@ -133,17 +128,17 @@ class ModelTrainer: if config.saving: # Training log file - with open(join(config.saving_path, 'training.txt'), "w") as file: - file.write('epochs steps out_loss offset_loss train_accuracy time\n') + with open(join(config.saving_path, "training.txt"), "w") as file: + file.write("epochs steps out_loss offset_loss train_accuracy time\n") # Killing file (simply delete this file when you want to stop the training) - PID_file = join(config.saving_path, 'running_PID.txt') + PID_file = join(config.saving_path, "running_PID.txt") if not exists(PID_file): with open(PID_file, "w") as file: - file.write('Launched with PyCharm') + file.write("Launched with PyCharm") # Checkpoints directory - checkpoint_directory = join(config.saving_path, 'checkpoints') + checkpoint_directory = join(config.saving_path, "checkpoints") if not exists(checkpoint_directory): makedirs(checkpoint_directory) else: @@ -158,14 +153,12 @@ class ModelTrainer: # Start training loop for epoch in range(config.max_epoch): - # Remove File for kill signal if epoch == config.max_epoch - 1 and exists(PID_file): remove(PID_file) self.step = 0 for batch in training_loader: - # Check kill signal (running_PID.txt deleted) if config.saving and not exists(PID_file): continue @@ -178,7 +171,7 @@ class ModelTrainer: t = t[-1:] t += [time.time()] - if 'cuda' in self.device.type: + if "cuda" in self.device.type: batch.to(self.device) # zero the parameter gradients @@ -195,11 +188,12 @@ class ModelTrainer: loss.backward() if config.grad_clip_norm > 0: - #torch.nn.utils.clip_grad_norm_(net.parameters(), config.grad_clip_norm) - torch.nn.utils.clip_grad_value_(net.parameters(), config.grad_clip_norm) + # torch.nn.utils.clip_grad_norm_(net.parameters(), config.grad_clip_norm) + torch.nn.utils.clip_grad_value_( + net.parameters(), config.grad_clip_norm + ) self.optimizer.step() - torch.cuda.empty_cache() torch.cuda.synchronize(self.device) @@ -214,25 +208,33 @@ class ModelTrainer: # Console display (only one per second) if (t[-1] - last_display) > 1.0: last_display = t[-1] - message = 'e{:03d}-i{:04d} => L={:.3f} acc={:3.0f}% / t(ms): {:5.1f} {:5.1f} {:5.1f})' - print(message.format(self.epoch, self.step, - loss.item(), - 100*acc, - 1000 * mean_dt[0], - 1000 * mean_dt[1], - 1000 * mean_dt[2])) + message = "e{:03d}-i{:04d} => L={:.3f} acc={:3.0f}% / t(ms): {:5.1f} {:5.1f} {:5.1f})" + print( + message.format( + self.epoch, + self.step, + loss.item(), + 100 * acc, + 1000 * mean_dt[0], + 1000 * mean_dt[1], + 1000 * mean_dt[2], + ) + ) # Log file if config.saving: - with open(join(config.saving_path, 'training.txt'), "a") as file: - message = '{:d} {:d} {:.3f} {:.3f} {:.3f} {:.3f}\n' - file.write(message.format(self.epoch, - self.step, - net.output_loss, - net.reg_loss, - acc, - t[-1] - t0)) - + with open(join(config.saving_path, "training.txt"), "a") as file: + message = "{:d} {:d} {:.3f} {:.3f} {:.3f} {:.3f}\n" + file.write( + message.format( + self.epoch, + self.step, + net.output_loss, + net.reg_loss, + acc, + t[-1] - t0, + ) + ) self.step += 1 @@ -247,7 +249,7 @@ class ModelTrainer: # Update learning rate if self.epoch in config.lr_decays: for param_group in self.optimizer.param_groups: - param_group['lr'] *= config.lr_decays[self.epoch] + param_group["lr"] *= config.lr_decays[self.epoch] # Update epoch self.epoch += 1 @@ -255,18 +257,22 @@ class ModelTrainer: # Saving if config.saving: # Get current state dict - save_dict = {'epoch': self.epoch, - 'model_state_dict': net.state_dict(), - 'optimizer_state_dict': self.optimizer.state_dict(), - 'saving_path': config.saving_path} + save_dict = { + "epoch": self.epoch, + "model_state_dict": net.state_dict(), + "optimizer_state_dict": self.optimizer.state_dict(), + "saving_path": config.saving_path, + } # Save current state of the network (for restoring purposes) - checkpoint_path = join(checkpoint_directory, 'current_chkp.tar') + checkpoint_path = join(checkpoint_directory, "current_chkp.tar") torch.save(save_dict, checkpoint_path) # Save checkpoints occasionally if (self.epoch + 1) % config.checkpoint_gap == 0: - checkpoint_path = join(checkpoint_directory, 'chkp_{:04d}.tar'.format(self.epoch + 1)) + checkpoint_path = join( + checkpoint_directory, "chkp_{:04d}.tar".format(self.epoch + 1) + ) torch.save(save_dict, checkpoint_path) # Validation @@ -274,24 +280,23 @@ class ModelTrainer: self.validation(net, val_loader, config) net.train() - print('Finished Training') + print("Finished Training") return # Validation methods # ------------------------------------------------------------------------------------------------------------------ def validation(self, net, val_loader, config: Config): - - if config.dataset_task == 'classification': + if config.dataset_task == "classification": self.object_classification_validation(net, val_loader, config) - elif config.dataset_task == 'segmentation': + elif config.dataset_task == "segmentation": self.object_segmentation_validation(net, val_loader, config) - elif config.dataset_task == 'cloud_segmentation': + elif config.dataset_task == "cloud_segmentation": self.cloud_segmentation_validation(net, val_loader, config) - elif config.dataset_task == 'slam_segmentation': + elif config.dataset_task == "slam_segmentation": self.slam_segmentation_validation(net, val_loader, config) else: - raise ValueError('No validation method implemented for this network type') + raise ValueError("No validation method implemented for this network type") def object_classification_validation(self, net, val_loader, config): """ @@ -313,7 +318,7 @@ class ModelTrainer: softmax = torch.nn.Softmax(1) # Initialize global prediction over all models - if not hasattr(self, 'val_probs'): + if not hasattr(self, "val_probs"): self.val_probs = np.zeros((val_loader.dataset.num_models, nc_model)) ##################### @@ -330,12 +335,11 @@ class ModelTrainer: # Start validation loop for batch in val_loader: - # New time t = t[-1:] t += [time.time()] - if 'cuda' in self.device.type: + if "cuda" in self.device.type: batch.to(self.device) # Forward pass @@ -354,10 +358,14 @@ class ModelTrainer: # Display if (t[-1] - last_display) > 1.0: last_display = t[-1] - message = 'Validation : {:.1f}% (timings : {:4.2f} {:4.2f})' - print(message.format(100 * len(obj_inds) / config.validation_size, - 1000 * (mean_dt[0]), - 1000 * (mean_dt[1]))) + message = "Validation : {:.1f}% (timings : {:4.2f} {:4.2f})" + print( + message.format( + 100 * len(obj_inds) / config.validation_size, + 1000 * (mean_dt[0]), + 1000 * (mean_dt[1]), + ) + ) # Stack all validation predictions probs = np.vstack(probs) @@ -368,7 +376,9 @@ class ModelTrainer: # Voting validation ################### - self.val_probs[obj_inds] = val_smooth * self.val_probs[obj_inds] + (1-val_smooth) * probs + self.val_probs[obj_inds] = ( + val_smooth * self.val_probs[obj_inds] + (1 - val_smooth) * probs + ) ############ # Confusions @@ -377,39 +387,38 @@ class ModelTrainer: validation_labels = np.array(val_loader.dataset.label_values) # Compute classification results - C1 = fast_confusion(targets, - np.argmax(probs, axis=1), - validation_labels) + C1 = fast_confusion(targets, np.argmax(probs, axis=1), validation_labels) # Compute votes confusion - C2 = fast_confusion(val_loader.dataset.input_labels, - np.argmax(self.val_probs, axis=1), - validation_labels) - + C2 = fast_confusion( + val_loader.dataset.input_labels, + np.argmax(self.val_probs, axis=1), + validation_labels, + ) # Saving (optionnal) if config.saving: print("Save confusions") conf_list = [C1, C2] - file_list = ['val_confs.txt', 'vote_confs.txt'] + file_list = ["val_confs.txt", "vote_confs.txt"] for conf, conf_file in zip(conf_list, file_list): test_file = join(config.saving_path, conf_file) if exists(test_file): with open(test_file, "a") as text_file: for line in conf: for value in line: - text_file.write('%d ' % value) - text_file.write('\n') + text_file.write("%d " % value) + text_file.write("\n") else: with open(test_file, "w") as text_file: for line in conf: for value in line: - text_file.write('%d ' % value) - text_file.write('\n') + text_file.write("%d " % value) + text_file.write("\n") val_ACC = 100 * np.sum(np.diag(C1)) / (np.sum(C1) + 1e-6) vote_ACC = 100 * np.sum(np.diag(C2)) / (np.sum(C2) + 1e-6) - print('Accuracies : val = {:.1f}% / vote = {:.1f}%'.format(val_ACC, vote_ACC)) + print("Accuracies : val = {:.1f}% / vote = {:.1f}%".format(val_ACC, vote_ACC)) return C1 @@ -438,19 +447,25 @@ class ModelTrainer: # Number of classes predicted by the model nc_model = config.num_classes - #print(nc_tot) - #print(nc_model) + # print(nc_tot) + # print(nc_model) # Initiate global prediction over validation clouds - if not hasattr(self, 'validation_probs'): - self.validation_probs = [np.zeros((l.shape[0], nc_model)) - for l in val_loader.dataset.input_labels] + if not hasattr(self, "validation_probs"): + self.validation_probs = [ + np.zeros((l.shape[0], nc_model)) + for l in val_loader.dataset.input_labels + ] self.val_proportions = np.zeros(nc_model, dtype=np.float32) i = 0 for label_value in val_loader.dataset.label_values: if label_value not in val_loader.dataset.ignored_labels: - self.val_proportions[i] = np.sum([np.sum(labels == label_value) - for labels in val_loader.dataset.validation_labels]) + self.val_proportions[i] = np.sum( + [ + np.sum(labels == label_value) + for labels in val_loader.dataset.validation_labels + ] + ) i += 1 ##################### @@ -464,17 +479,15 @@ class ModelTrainer: last_display = time.time() mean_dt = np.zeros(1) - t1 = time.time() # Start validation loop for i, batch in enumerate(val_loader): - # New time t = t[-1:] t += [time.time()] - if 'cuda' in self.device.type: + if "cuda" in self.device.type: batch.to(self.device) # Forward pass @@ -493,16 +506,17 @@ class ModelTrainer: i0 = 0 for b_i, length in enumerate(lengths): - # Get prediction - target = labels[i0:i0 + length] - probs = stacked_probs[i0:i0 + length] - inds = in_inds[i0:i0 + length] + target = labels[i0 : i0 + length] + probs = stacked_probs[i0 : i0 + length] + inds = in_inds[i0 : i0 + length] c_i = cloud_inds[b_i] # Update current probs in whole cloud - self.validation_probs[c_i][inds] = val_smooth * self.validation_probs[c_i][inds] \ - + (1 - val_smooth) * probs + self.validation_probs[c_i][inds] = ( + val_smooth * self.validation_probs[c_i][inds] + + (1 - val_smooth) * probs + ) # Stack all prediction for this epoch predictions.append(probs) @@ -516,17 +530,20 @@ class ModelTrainer: # Display if (t[-1] - last_display) > 1.0: last_display = t[-1] - message = 'Validation : {:.1f}% (timings : {:4.2f} {:4.2f})' - print(message.format(100 * i / config.validation_size, - 1000 * (mean_dt[0]), - 1000 * (mean_dt[1]))) + message = "Validation : {:.1f}% (timings : {:4.2f} {:4.2f})" + print( + message.format( + 100 * i / config.validation_size, + 1000 * (mean_dt[0]), + 1000 * (mean_dt[1]), + ) + ) t2 = time.time() # Confusions for our subparts of validation set Confs = np.zeros((len(predictions), nc_tot, nc_tot), dtype=np.int32) for i, (probs, truth) in enumerate(zip(predictions, targets)): - # Insert false columns for ignored labels for l_ind, label_value in enumerate(val_loader.dataset.label_values): if label_value in val_loader.dataset.ignored_labels: @@ -536,8 +553,9 @@ class ModelTrainer: preds = val_loader.dataset.label_values[np.argmax(probs, axis=1)] # Confusions - Confs[i, :, :] = fast_confusion(truth, preds, val_loader.dataset.label_values).astype(np.int32) - + Confs[i, :, :] = fast_confusion( + truth, preds, val_loader.dataset.label_values + ).astype(np.int32) t3 = time.time() @@ -545,7 +563,9 @@ class ModelTrainer: C = np.sum(Confs, axis=0).astype(np.float32) # Remove ignored labels from confusions - for l_ind, label_value in reversed(list(enumerate(val_loader.dataset.label_values))): + for l_ind, label_value in reversed( + list(enumerate(val_loader.dataset.label_values)) + ): if label_value in val_loader.dataset.ignored_labels: C = np.delete(C, l_ind, axis=0) C = np.delete(C, l_ind, axis=1) @@ -553,7 +573,6 @@ class ModelTrainer: # Balance with real validation proportions C *= np.expand_dims(self.val_proportions / (np.sum(C, axis=1) + 1e-6), 1) - t4 = time.time() # Objects IoU @@ -563,15 +582,14 @@ class ModelTrainer: # Saving (optionnal) if config.saving: - # Name of saving file - test_file = join(config.saving_path, 'val_IoUs.txt') + test_file = join(config.saving_path, "val_IoUs.txt") # Line to write: - line = '' + line = "" for IoU in IoUs: - line += '{:.3f} '.format(IoU) - line = line + '\n' + line += "{:.3f} ".format(IoU) + line = line + "\n" # Write in file if exists(test_file): @@ -583,33 +601,36 @@ class ModelTrainer: # Save potentials if val_loader.dataset.use_potentials: - pot_path = join(config.saving_path, 'potentials') + pot_path = join(config.saving_path, "potentials") if not exists(pot_path): makedirs(pot_path) files = val_loader.dataset.files for i, file_path in enumerate(files): - pot_points = np.array(val_loader.dataset.pot_trees[i].data, copy=False) - cloud_name = file_path.split('/')[-1] + pot_points = np.array( + val_loader.dataset.pot_trees[i].data, copy=False + ) + cloud_name = file_path.split("/")[-1] pot_name = join(pot_path, cloud_name) pots = val_loader.dataset.potentials[i].numpy().astype(np.float32) - write_ply(pot_name, - [pot_points.astype(np.float32), pots], - ['x', 'y', 'z', 'pots']) + write_ply( + pot_name, + [pot_points.astype(np.float32), pots], + ["x", "y", "z", "pots"], + ) t6 = time.time() # Print instance mean mIoU = 100 * np.mean(IoUs) - print('{:s} mean IoU = {:.1f}%'.format(config.dataset, mIoU)) + print("{:s} mean IoU = {:.1f}%".format(config.dataset, mIoU)) # Save predicted cloud occasionally if config.saving and (self.epoch + 1) % config.checkpoint_gap == 0: - val_path = join(config.saving_path, 'val_preds_{:d}'.format(self.epoch + 1)) + val_path = join(config.saving_path, "val_preds_{:d}".format(self.epoch + 1)) if not exists(val_path): makedirs(val_path) files = val_loader.dataset.files for i, file_path in enumerate(files): - # Get points points = val_loader.dataset.load_evaluation_points(file_path) @@ -622,34 +643,36 @@ class ModelTrainer: sub_probs = np.insert(sub_probs, l_ind, 0, axis=1) # Get the predicted labels - sub_preds = val_loader.dataset.label_values[np.argmax(sub_probs, axis=1).astype(np.int32)] + sub_preds = val_loader.dataset.label_values[ + np.argmax(sub_probs, axis=1).astype(np.int32) + ] # Reproject preds on the evaluations points preds = (sub_preds[val_loader.dataset.test_proj[i]]).astype(np.int32) # Path of saved validation file - cloud_name = file_path.split('/')[-1] + cloud_name = file_path.split("/")[-1] val_name = join(val_path, cloud_name) # Save file labels = val_loader.dataset.validation_labels[i].astype(np.int32) - write_ply(val_name, - [points, preds, labels], - ['x', 'y', 'z', 'preds', 'class']) + write_ply( + val_name, [points, preds, labels], ["x", "y", "z", "preds", "class"] + ) # Display timings t7 = time.time() if debug: - print('\n************************\n') - print('Validation timings:') - print('Init ...... {:.1f}s'.format(t1 - t0)) - print('Loop ...... {:.1f}s'.format(t2 - t1)) - print('Confs ..... {:.1f}s'.format(t3 - t2)) - print('Confs bis . {:.1f}s'.format(t4 - t3)) - print('IoU ....... {:.1f}s'.format(t5 - t4)) - print('Save1 ..... {:.1f}s'.format(t6 - t5)) - print('Save2 ..... {:.1f}s'.format(t7 - t6)) - print('\n************************\n') + print("\n************************\n") + print("Validation timings:") + print("Init ...... {:.1f}s".format(t1 - t0)) + print("Loop ...... {:.1f}s".format(t2 - t1)) + print("Confs ..... {:.1f}s".format(t3 - t2)) + print("Confs bis . {:.1f}s".format(t4 - t3)) + print("IoU ....... {:.1f}s".format(t5 - t4)) + print("Save1 ..... {:.1f}s".format(t6 - t5)) + print("Save2 ..... {:.1f}s".format(t7 - t6)) + print("\n************************\n") return @@ -669,12 +692,11 @@ class ModelTrainer: return # Choose validation smoothing parameter (0 for no smothing, 0.99 for big smoothing) - val_smooth = 0.95 softmax = torch.nn.Softmax(1) # Create folder for validation predictions - if not exists (join(config.saving_path, 'val_preds')): - makedirs(join(config.saving_path, 'val_preds')) + if not exists(join(config.saving_path, "val_preds")): + makedirs(join(config.saving_path, "val_preds")) # initiate the dataset validation containers val_loader.dataset.val_points = [] @@ -696,17 +718,15 @@ class ModelTrainer: last_display = time.time() mean_dt = np.zeros(1) - t1 = time.time() # Start validation loop for i, batch in enumerate(val_loader): - # New time t = t[-1:] t += [time.time()] - if 'cuda' in self.device.type: + if "cuda" in self.device.type: batch.to(self.device) # Forward pass @@ -726,9 +746,8 @@ class ModelTrainer: i0 = 0 for b_i, length in enumerate(lengths): - # Get prediction - probs = stk_probs[i0:i0 + length] + probs = stk_probs[i0 : i0 + length] proj_inds = r_inds_list[b_i] proj_mask = r_mask_list[b_i] frame_labels = labels_list[b_i] @@ -751,8 +770,10 @@ class ModelTrainer: preds = val_loader.dataset.label_values[np.argmax(proj_probs, axis=1)] # Save predictions in a binary file - filename = '{:s}_{:07d}.npy'.format(val_loader.dataset.sequences[s_ind], f_ind) - filepath = join(config.saving_path, 'val_preds', filename) + filename = "{:s}_{:07d}.npy".format( + val_loader.dataset.sequences[s_ind], f_ind + ) + filepath = join(config.saving_path, "val_preds", filename) if exists(filepath): frame_preds = np.load(filepath) else: @@ -762,18 +783,30 @@ class ModelTrainer: # Save some of the frame pots if f_ind % 20 == 0: - seq_path = join(val_loader.dataset.path, 'sequences', val_loader.dataset.sequences[s_ind]) - velo_file = join(seq_path, 'velodyne', val_loader.dataset.frames[s_ind][f_ind] + '.bin') + seq_path = join( + val_loader.dataset.path, + "sequences", + val_loader.dataset.sequences[s_ind], + ) + velo_file = join( + seq_path, + "velodyne", + val_loader.dataset.frames[s_ind][f_ind] + ".bin", + ) frame_points = np.fromfile(velo_file, dtype=np.float32) frame_points = frame_points.reshape((-1, 4)) - write_ply(filepath[:-4] + '_pots.ply', - [frame_points[:, :3], frame_labels, frame_preds], - ['x', 'y', 'z', 'gt', 'pre']) + write_ply( + filepath[:-4] + "_pots.ply", + [frame_points[:, :3], frame_labels, frame_preds], + ["x", "y", "z", "gt", "pre"], + ) # Update validation confusions - frame_C = fast_confusion(frame_labels, - frame_preds.astype(np.int32), - val_loader.dataset.label_values) + frame_C = fast_confusion( + frame_labels, + frame_preds.astype(np.int32), + val_loader.dataset.label_values, + ) val_loader.dataset.val_confs[s_ind][f_ind, :, :] = frame_C # Stack all prediction for this epoch @@ -790,19 +823,24 @@ class ModelTrainer: # Display if (t[-1] - last_display) > 1.0: last_display = t[-1] - message = 'Validation : {:.1f}% (timings : {:4.2f} {:4.2f})' - print(message.format(100 * i / config.validation_size, - 1000 * (mean_dt[0]), - 1000 * (mean_dt[1]))) + message = "Validation : {:.1f}% (timings : {:4.2f} {:4.2f})" + print( + message.format( + 100 * i / config.validation_size, + 1000 * (mean_dt[0]), + 1000 * (mean_dt[1]), + ) + ) t2 = time.time() # Confusions for our subparts of validation set Confs = np.zeros((len(predictions), nc_tot, nc_tot), dtype=np.int32) for i, (preds, truth) in enumerate(zip(predictions, targets)): - # Confusions - Confs[i, :, :] = fast_confusion(truth, preds, val_loader.dataset.label_values).astype(np.int32) + Confs[i, :, :] = fast_confusion( + truth, preds, val_loader.dataset.label_values + ).astype(np.int32) t3 = time.time() @@ -814,10 +852,14 @@ class ModelTrainer: C = np.sum(Confs, axis=0).astype(np.float32) # Balance with real validation proportions - C *= np.expand_dims(val_loader.dataset.class_proportions / (np.sum(C, axis=1) + 1e-6), 1) + C *= np.expand_dims( + val_loader.dataset.class_proportions / (np.sum(C, axis=1) + 1e-6), 1 + ) # Remove ignored labels from confusions - for l_ind, label_value in reversed(list(enumerate(val_loader.dataset.label_values))): + for l_ind, label_value in reversed( + list(enumerate(val_loader.dataset.label_values)) + ): if label_value in val_loader.dataset.ignored_labels: C = np.delete(C, l_ind, axis=0) C = np.delete(C, l_ind, axis=1) @@ -832,19 +874,25 @@ class ModelTrainer: t4 = time.time() # Sum all validation confusions - C_tot = [np.sum(seq_C, axis=0) for seq_C in val_loader.dataset.val_confs if len(seq_C) > 0] + C_tot = [ + np.sum(seq_C, axis=0) + for seq_C in val_loader.dataset.val_confs + if len(seq_C) > 0 + ] C_tot = np.sum(np.stack(C_tot, axis=0), axis=0) if debug: - s = '\n' + s = "\n" for cc in C_tot: for c in cc: - s += '{:8.1f} '.format(c) - s += '\n' + s += "{:8.1f} ".format(c) + s += "\n" print(s) # Remove ignored labels from confusions - for l_ind, label_value in reversed(list(enumerate(val_loader.dataset.label_values))): + for l_ind, label_value in reversed( + list(enumerate(val_loader.dataset.label_values)) + ): if label_value in val_loader.dataset.ignored_labels: C_tot = np.delete(C_tot, l_ind, axis=0) C_tot = np.delete(C_tot, l_ind, axis=1) @@ -856,19 +904,17 @@ class ModelTrainer: # Saving (optionnal) if config.saving: - IoU_list = [IoUs, val_IoUs] - file_list = ['subpart_IoUs.txt', 'val_IoUs.txt'] + file_list = ["subpart_IoUs.txt", "val_IoUs.txt"] for IoUs_to_save, IoU_file in zip(IoU_list, file_list): - # Name of saving file test_file = join(config.saving_path, IoU_file) # Line to write: - line = '' + line = "" for IoU in IoUs_to_save: - line += '{:.3f} '.format(IoU) - line = line + '\n' + line += "{:.3f} ".format(IoU) + line = line + "\n" # Write in file if exists(test_file): @@ -880,57 +926,22 @@ class ModelTrainer: # Print instance mean mIoU = 100 * np.mean(IoUs) - print('{:s} : subpart mIoU = {:.1f} %'.format(config.dataset, mIoU)) + print("{:s} : subpart mIoU = {:.1f} %".format(config.dataset, mIoU)) mIoU = 100 * np.mean(val_IoUs) - print('{:s} : val mIoU = {:.1f} %'.format(config.dataset, mIoU)) + print("{:s} : val mIoU = {:.1f} %".format(config.dataset, mIoU)) t6 = time.time() # Display timings if debug: - print('\n************************\n') - print('Validation timings:') - print('Init ...... {:.1f}s'.format(t1 - t0)) - print('Loop ...... {:.1f}s'.format(t2 - t1)) - print('Confs ..... {:.1f}s'.format(t3 - t2)) - print('IoU1 ...... {:.1f}s'.format(t4 - t3)) - print('IoU2 ...... {:.1f}s'.format(t5 - t4)) - print('Save ...... {:.1f}s'.format(t6 - t5)) - print('\n************************\n') + print("\n************************\n") + print("Validation timings:") + print("Init ...... {:.1f}s".format(t1 - t0)) + print("Loop ...... {:.1f}s".format(t2 - t1)) + print("Confs ..... {:.1f}s".format(t3 - t2)) + print("IoU1 ...... {:.1f}s".format(t4 - t3)) + print("IoU2 ...... {:.1f}s".format(t5 - t4)) + print("Save ...... {:.1f}s".format(t6 - t5)) + print("\n************************\n") return - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/utils/visualizer.py b/utils/visualizer.py index cda24b6..2a158e8 100644 --- a/utils/visualizer.py +++ b/utils/visualizer.py @@ -26,19 +26,18 @@ import torch import numpy as np from sklearn.neighbors import KDTree -from os import makedirs, remove, rename, listdir -from os.path import exists, join +from os import listdir +from os.path import join import time from mayavi import mlab -import sys from models.blocks import KPConv # PLY reader -from utils.ply import write_ply, read_ply +from utils.ply import write_ply # Configuration class -from utils.config import Config, bcolors +from utils.config import bcolors # ---------------------------------------------------------------------------------------------------------------------- @@ -49,7 +48,6 @@ from utils.config import Config, bcolors class ModelVisualizer: - # Initialization methods # ------------------------------------------------------------------------------------------------------------------ @@ -81,13 +79,13 @@ class ModelVisualizer: checkpoint = torch.load(chkp_path) new_dict = {} - for k, v in checkpoint['model_state_dict'].items(): - if 'blocs' in k: - k = k.replace('blocs', 'blocks') + for k, v in checkpoint["model_state_dict"].items(): + if "blocs" in k: + k = k.replace("blocs", "blocks") new_dict[k] = v net.load_state_dict(new_dict) - self.epoch = checkpoint['epoch'] + self.epoch = checkpoint["epoch"] net.eval() print("\nModel state restored from {:s}.".format(chkp_path)) @@ -105,8 +103,10 @@ class ModelVisualizer: # First choose the visualized deformations ########################################## - print('\nList of the deformable convolution available (chosen one highlighted in green)') - fmt_str = ' {:}{:2d} > KPConv(r={:.3f}, Din={:d}, Dout={:d}){:}' + print( + "\nList of the deformable convolution available (chosen one highlighted in green)" + ) + fmt_str = " {:}{:2d} > KPConv(r={:.3f}, Din={:d}, Dout={:d}){:}" deform_convs = [] for m in net.modules(): if isinstance(m, KPConv) and m.deformable: @@ -114,27 +114,34 @@ class ModelVisualizer: color = bcolors.OKGREEN else: color = bcolors.FAIL - print(fmt_str.format(color, len(deform_convs), m.radius, m.in_channels, m.out_channels, bcolors.ENDC)) + print( + fmt_str.format( + color, + len(deform_convs), + m.radius, + m.in_channels, + m.out_channels, + bcolors.ENDC, + ) + ) deform_convs.append(m) ################ # Initialization ################ - print('\n****************************************************\n') + print("\n****************************************************\n") # Loop variables - t0 = time.time() + time.time() t = [time.time()] - last_display = time.time() - mean_dt = np.zeros(1) + time.time() + np.zeros(1) count = 0 # Start training loop for epoch in range(config.max_epoch): - for batch in loader: - ################## # Processing batch ################## @@ -143,16 +150,20 @@ class ModelVisualizer: t = t[-1:] t += [time.time()] - if 'cuda' in self.device.type: + if "cuda" in self.device.type: batch.to(self.device) # Forward pass - outputs = net(batch, config) - original_KP = deform_convs[deform_idx].kernel_points.cpu().detach().numpy() - stacked_deformed_KP = deform_convs[deform_idx].deformed_KP.cpu().detach().numpy() + net(batch, config) + original_KP = ( + deform_convs[deform_idx].kernel_points.cpu().detach().numpy() + ) + stacked_deformed_KP = ( + deform_convs[deform_idx].deformed_KP.cpu().detach().numpy() + ) count += batch.lengths[0].shape[0] - if 'cuda' in self.device.type: + if "cuda" in self.device.type: torch.cuda.synchronize(self.device) # Find layer @@ -171,17 +182,23 @@ class ModelVisualizer: lookuptrees = [] i0 = 0 for b_i, length in enumerate(batch.lengths[0]): - in_points.append(batch.points[0][i0:i0 + length].cpu().detach().numpy()) + in_points.append( + batch.points[0][i0 : i0 + length].cpu().detach().numpy() + ) if batch.features.shape[1] == 4: - in_colors.append(batch.features[i0:i0 + length, 1:].cpu().detach().numpy()) + in_colors.append( + batch.features[i0 : i0 + length, 1:].cpu().detach().numpy() + ) else: in_colors.append(None) i0 += length i0 = 0 for b_i, length in enumerate(batch.lengths[l]): - points.append(batch.points[l][i0:i0 + length].cpu().detach().numpy()) - deformed_KP.append(stacked_deformed_KP[i0:i0 + length]) + points.append( + batch.points[l][i0 : i0 + length].cpu().detach().numpy() + ) + deformed_KP.append(stacked_deformed_KP[i0 : i0 + length]) lookuptrees.append(KDTree(points[-1])) i0 += length @@ -190,7 +207,9 @@ class ModelVisualizer: ########################### # Create figure for features - fig1 = mlab.figure('Deformations', bgcolor=(1.0, 1.0, 1.0), size=(1280, 920)) + fig1 = mlab.figure( + "Deformations", bgcolor=(1.0, 1.0, 1.0), size=(1280, 920) + ) fig1.scene.parallel_projection = False # Indices @@ -204,26 +223,41 @@ class ModelVisualizer: aim_point = np.zeros((1, 3)) def picker_callback(picker): - """ Picker callback: this get called when on pick events. - """ + """Picker callback: this get called when on pick events.""" global plots, aim_point - if 'in_points' in plots: - if plots['in_points'].actor.actor._vtk_obj in [o._vtk_obj for o in picker.actors]: - point_rez = plots['in_points'].glyph.glyph_source.glyph_source.output.points.to_array().shape[0] + if "in_points" in plots: + if plots["in_points"].actor.actor._vtk_obj in [ + o._vtk_obj for o in picker.actors + ]: + point_rez = ( + plots["in_points"] + .glyph.glyph_source.glyph_source.output.points.to_array() + .shape[0] + ) new_point_i = int(np.floor(picker.point_id / point_rez)) - if new_point_i < len(plots['in_points'].mlab_source.points): + if new_point_i < len(plots["in_points"].mlab_source.points): # Get closest point in the layer we are interested in - aim_point = plots['in_points'].mlab_source.points[new_point_i:new_point_i + 1] + aim_point = plots["in_points"].mlab_source.points[ + new_point_i : new_point_i + 1 + ] update_scene() - if 'points' in plots: - if plots['points'].actor.actor._vtk_obj in [o._vtk_obj for o in picker.actors]: - point_rez = plots['points'].glyph.glyph_source.glyph_source.output.points.to_array().shape[0] + if "points" in plots: + if plots["points"].actor.actor._vtk_obj in [ + o._vtk_obj for o in picker.actors + ]: + point_rez = ( + plots["points"] + .glyph.glyph_source.glyph_source.output.points.to_array() + .shape[0] + ) new_point_i = int(np.floor(picker.point_id / point_rez)) - if new_point_i < len(plots['points'].mlab_source.points): + if new_point_i < len(plots["points"].mlab_source.points): # Get closest point in the layer we are interested in - aim_point = plots['points'].mlab_source.points[new_point_i:new_point_i + 1] + aim_point = plots["points"].mlab_source.points[ + new_point_i : new_point_i + 1 + ] update_scene() def update_scene(): @@ -243,61 +277,68 @@ class ModelVisualizer: p = points[obj_i] # Rescale points for visu - p = (p * 1.5 / config.in_radius) - + p = p * 1.5 / config.in_radius # Show point cloud if show_in_p <= 1: - plots['points'] = mlab.points3d(p[:, 0], - p[:, 1], - p[:, 2], - resolution=8, - scale_factor=p_scale, - scale_mode='none', - color=(0, 1, 1), - figure=fig1) + plots["points"] = mlab.points3d( + p[:, 0], + p[:, 1], + p[:, 2], + resolution=8, + scale_factor=p_scale, + scale_mode="none", + color=(0, 1, 1), + figure=fig1, + ) if show_in_p >= 1: - # Get points and colors in_p = in_points[obj_i] - in_p = (in_p * 1.5 / config.in_radius) + in_p = in_p * 1.5 / config.in_radius # Color point cloud if possible in_c = in_colors[obj_i] if in_c is not None: - # Primitives - scalars = np.arange(len(in_p)) # Key point: set an integer for each point + scalars = np.arange( + len(in_p) + ) # Key point: set an integer for each point # Define color table (including alpha), which must be uint8 and [0,255] colors = np.hstack((in_c, np.ones_like(in_c[:, :1]))) colors = (colors * 255).astype(np.uint8) - plots['in_points'] = mlab.points3d(in_p[:, 0], - in_p[:, 1], - in_p[:, 2], - scalars, - resolution=8, - scale_factor=p_scale*0.8, - scale_mode='none', - figure=fig1) - plots['in_points'].module_manager.scalar_lut_manager.lut.table = colors + plots["in_points"] = mlab.points3d( + in_p[:, 0], + in_p[:, 1], + in_p[:, 2], + scalars, + resolution=8, + scale_factor=p_scale * 0.8, + scale_mode="none", + figure=fig1, + ) + plots[ + "in_points" + ].module_manager.scalar_lut_manager.lut.table = colors else: - - plots['in_points'] = mlab.points3d(in_p[:, 0], - in_p[:, 1], - in_p[:, 2], - resolution=8, - scale_factor=p_scale*0.8, - scale_mode='none', - figure=fig1) - + plots["in_points"] = mlab.points3d( + in_p[:, 0], + in_p[:, 1], + in_p[:, 2], + resolution=8, + scale_factor=p_scale * 0.8, + scale_mode="none", + figure=fig1, + ) # Get KP locations rescaled_aim_point = aim_point * config.in_radius / 1.5 - point_i = lookuptrees[obj_i].query(rescaled_aim_point, return_distance=False)[0][0] + point_i = lookuptrees[obj_i].query( + rescaled_aim_point, return_distance=False + )[0][0] if offsets: KP = points[obj_i][point_i] + deformed_KP[obj_i][point_i] scals = np.ones_like(KP[:, 0]) @@ -305,35 +346,46 @@ class ModelVisualizer: KP = points[obj_i][point_i] + original_KP scals = np.zeros_like(KP[:, 0]) - KP = (KP * 1.5 / config.in_radius) - - plots['KP'] = mlab.points3d(KP[:, 0], - KP[:, 1], - KP[:, 2], - scals, - colormap='autumn', - resolution=8, - scale_factor=1.2*p_scale, - scale_mode='none', - vmin=0, - vmax=1, - figure=fig1) + KP = KP * 1.5 / config.in_radius + plots["KP"] = mlab.points3d( + KP[:, 0], + KP[:, 1], + KP[:, 2], + scals, + colormap="autumn", + resolution=8, + scale_factor=1.2 * p_scale, + scale_mode="none", + vmin=0, + vmax=1, + figure=fig1, + ) if True: - plots['center'] = mlab.points3d(p[point_i, 0], - p[point_i, 1], - p[point_i, 2], - scale_factor=1.1*p_scale, - scale_mode='none', - color=(0, 1, 0), - figure=fig1) + plots["center"] = mlab.points3d( + p[point_i, 0], + p[point_i, 1], + p[point_i, 2], + scale_factor=1.1 * p_scale, + scale_mode="none", + color=(0, 1, 0), + figure=fig1, + ) # New title - plots['title'] = mlab.title(str(obj_i), color=(0, 0, 0), size=0.3, height=0.01) - text = '<--- (press g for previous)' + 50 * ' ' + '(press h for next) --->' - plots['text'] = mlab.text(0.01, 0.01, text, color=(0, 0, 0), width=0.98) - plots['orient'] = mlab.orientation_axes() + plots["title"] = mlab.title( + str(obj_i), color=(0, 0, 0), size=0.3, height=0.01 + ) + text = ( + "<--- (press g for previous)" + + 50 * " " + + "(press h for next) --->" + ) + plots["text"] = mlab.text( + 0.01, 0.01, text, color=(0, 0, 0), width=0.98 + ) + plots["orient"] = mlab.orientation_axes() # Set the saved view mlab.view(*v) @@ -347,12 +399,10 @@ class ModelVisualizer: # Get KP locations KP_def = points[obj_i][point_i] + deformed_KP[obj_i][point_i] - KP_def = (KP_def * 1.5 / config.in_radius) - KP_def_color = (1, 0, 0) + KP_def = KP_def * 1.5 / config.in_radius KP_rigid = points[obj_i][point_i] + original_KP - KP_rigid = (KP_rigid * 1.5 / config.in_radius) - KP_rigid_color = (1, 0.7, 0) + KP_rigid = KP_rigid * 1.5 / config.in_radius if offsets: t_list = np.linspace(0, 1, 150, dtype=np.float32) @@ -362,10 +412,12 @@ class ModelVisualizer: @mlab.animate(delay=10) def anim(): for t in t_list: - plots['KP'].mlab_source.set(x=t * KP_def[:, 0] + (1 - t) * KP_rigid[:, 0], - y=t * KP_def[:, 1] + (1 - t) * KP_rigid[:, 1], - z=t * KP_def[:, 2] + (1 - t) * KP_rigid[:, 2], - scalars=t * np.ones_like(KP_def[:, 0])) + plots["KP"].mlab_source.set( + x=t * KP_def[:, 0] + (1 - t) * KP_rigid[:, 0], + y=t * KP_def[:, 1] + (1 - t) * KP_rigid[:, 1], + z=t * KP_def[:, 2] + (1 - t) * KP_rigid[:, 2], + scalars=t * np.ones_like(KP_def[:, 0]), + ) yield @@ -376,58 +428,63 @@ class ModelVisualizer: def keyboard_callback(vtk_obj, event): global obj_i, point_i, offsets, p_scale, show_in_p - if vtk_obj.GetKeyCode() in ['b', 'B']: + if vtk_obj.GetKeyCode() in ["b", "B"]: p_scale /= 1.5 update_scene() - elif vtk_obj.GetKeyCode() in ['n', 'N']: + elif vtk_obj.GetKeyCode() in ["n", "N"]: p_scale *= 1.5 update_scene() - if vtk_obj.GetKeyCode() in ['g', 'G']: + if vtk_obj.GetKeyCode() in ["g", "G"]: obj_i = (obj_i - 1) % len(deformed_KP) point_i = 0 update_scene() - elif vtk_obj.GetKeyCode() in ['h', 'H']: + elif vtk_obj.GetKeyCode() in ["h", "H"]: obj_i = (obj_i + 1) % len(deformed_KP) point_i = 0 update_scene() - elif vtk_obj.GetKeyCode() in ['k', 'K']: + elif vtk_obj.GetKeyCode() in ["k", "K"]: offsets = not offsets animate_kernel() - elif vtk_obj.GetKeyCode() in ['z', 'Z']: + elif vtk_obj.GetKeyCode() in ["z", "Z"]: show_in_p = (show_in_p + 1) % 3 update_scene() - elif vtk_obj.GetKeyCode() in ['0']: - - print('Saving') + elif vtk_obj.GetKeyCode() in ["0"]: + print("Saving") # Find a new name file_i = 0 - file_name = 'KP_{:03d}.ply'.format(file_i) - files = [f for f in listdir('KP_clouds') if f.endswith('.ply')] + file_name = "KP_{:03d}.ply".format(file_i) + files = [f for f in listdir("KP_clouds") if f.endswith(".ply")] while file_name in files: file_i += 1 - file_name = 'KP_{:03d}.ply'.format(file_i) + file_name = "KP_{:03d}.ply".format(file_i) KP_deform = points[obj_i][point_i] + deformed_KP[obj_i][point_i] KP_normal = points[obj_i][point_i] + original_KP # Save - write_ply(join('KP_clouds', file_name), - [in_points[obj_i], in_colors[obj_i]], - ['x', 'y', 'z', 'red', 'green', 'blue']) - write_ply(join('KP_clouds', 'KP_{:03d}_deform.ply'.format(file_i)), - [KP_deform], - ['x', 'y', 'z']) - write_ply(join('KP_clouds', 'KP_{:03d}_normal.ply'.format(file_i)), - [KP_normal], - ['x', 'y', 'z']) - print('OK') + write_ply( + join("KP_clouds", file_name), + [in_points[obj_i], in_colors[obj_i]], + ["x", "y", "z", "red", "green", "blue"], + ) + write_ply( + join("KP_clouds", "KP_{:03d}_deform.ply".format(file_i)), + [KP_deform], + ["x", "y", "z"], + ) + write_ply( + join("KP_clouds", "KP_{:03d}_normal.ply".format(file_i)), + [KP_normal], + ["x", "y", "z"], + ) + print("OK") return @@ -435,7 +492,7 @@ class ModelVisualizer: pick_func = fig1.on_mouse_pick(picker_callback) pick_func.tolerance = 0.01 update_scene() - fig1.scene.interactor.add_observer('KeyPressEvent', keyboard_callback) + fig1.scene.interactor.add_observer("KeyPressEvent", keyboard_callback) mlab.show() return @@ -445,13 +502,12 @@ class ModelVisualizer: def show_ModelNet_models(all_points): - ########################### # Interactive visualization ########################### # Create figure for features - fig1 = mlab.figure('Models', bgcolor=(1, 1, 1), size=(1000, 800)) + fig1 = mlab.figure("Models", bgcolor=(1, 1, 1), size=(1000, 800)) fig1.scene.parallel_projection = False # Indices @@ -459,7 +515,6 @@ def show_ModelNet_models(all_points): file_i = 0 def update_scene(): - # clear figure mlab.clf(fig1) @@ -470,17 +525,19 @@ def show_ModelNet_models(all_points): points = (points * 1.5 + np.array([1.0, 1.0, 1.0])) * 50.0 # Show point clouds colorized with activations - activations = mlab.points3d(points[:, 0], - points[:, 1], - points[:, 2], - points[:, 2], - scale_factor=3.0, - scale_mode='none', - figure=fig1) + mlab.points3d( + points[:, 0], + points[:, 1], + points[:, 2], + points[:, 2], + scale_factor=3.0, + scale_mode="none", + figure=fig1, + ) # New title mlab.title(str(file_i), color=(0, 0, 0), size=0.3, height=0.01) - text = '<--- (press g for previous)' + 50 * ' ' + '(press h for next) --->' + text = "<--- (press g for previous)" + 50 * " " + "(press h for next) --->" mlab.text(0.01, 0.01, text, color=(0, 0, 0), width=0.98) mlab.orientation_axes() @@ -489,13 +546,11 @@ def show_ModelNet_models(all_points): def keyboard_callback(vtk_obj, event): global file_i - if vtk_obj.GetKeyCode() in ['g', 'G']: - + if vtk_obj.GetKeyCode() in ["g", "G"]: file_i = (file_i - 1) % len(all_points) update_scene() - elif vtk_obj.GetKeyCode() in ['h', 'H']: - + elif vtk_obj.GetKeyCode() in ["h", "H"]: file_i = (file_i + 1) % len(all_points) update_scene() @@ -503,29 +558,5 @@ def show_ModelNet_models(all_points): # Draw a first plot update_scene() - fig1.scene.interactor.add_observer('KeyPressEvent', keyboard_callback) + fig1.scene.interactor.add_observer("KeyPressEvent", keyboard_callback) mlab.show() - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/visualize_deformations.py b/visualize_deformations.py index dd05e35..8ebf570 100644 --- a/visualize_deformations.py +++ b/visualize_deformations.py @@ -22,11 +22,8 @@ # # Common libs -import signal import os import numpy as np -import sys -import torch # Dataset from datasetss.ModelNet40 import * @@ -44,20 +41,25 @@ from models.architectures import KPCNN, KPFCNN # \***************/ # -def model_choice(chosen_log): +def model_choice(chosen_log): ########################### # Call the test initializer ########################### # Automatically retrieve the last trained model - if chosen_log in ['last_ModelNet40', 'last_ShapeNetPart', 'last_S3DIS']: - + if chosen_log in ["last_ModelNet40", "last_ShapeNetPart", "last_S3DIS"]: # Dataset name - test_dataset = '_'.join(chosen_log.split('_')[1:]) + test_dataset = "_".join(chosen_log.split("_")[1:]) # List all training logs - logs = np.sort([os.path.join('results', f) for f in os.listdir('results') if f.startswith('Log')]) + logs = np.sort( + [ + os.path.join("results", f) + for f in os.listdir("results") + if f.startswith("Log") + ] + ) # Find the last log of asked dataset for log in logs[::-1]: @@ -67,12 +69,12 @@ def model_choice(chosen_log): chosen_log = log break - if chosen_log in ['last_ModelNet40', 'last_ShapeNetPart', 'last_S3DIS']: + if chosen_log in ["last_ModelNet40", "last_ShapeNetPart", "last_S3DIS"]: raise ValueError('No log of the dataset "' + test_dataset + '" found') # Check if log exists if not os.path.exists(chosen_log): - raise ValueError('The given log does not exists: ' + chosen_log) + raise ValueError("The given log does not exists: " + chosen_log) return chosen_log @@ -83,8 +85,7 @@ def model_choice(chosen_log): # \***************/ # -if __name__ == '__main__': - +if __name__ == "__main__": ############################### # Choose the model to visualize ############################### @@ -94,7 +95,7 @@ if __name__ == '__main__': # > 'last_XXX': Automatically retrieve the last trained model on dataset XXX # > 'results/Log_YYYY-MM-DD_HH-MM-SS': Directly provide the path of a trained model - chosen_log = 'results/Log_2020-04-23_19-42-18' + chosen_log = "results/Log_2020-04-23_19-42-18" # Choose the index of the checkpoint to load OR None if you want to load the current checkpoint chkp_idx = None @@ -110,25 +111,25 @@ if __name__ == '__main__': ############################ # Set which gpu is going to be used - GPU_ID = '0' + GPU_ID = "0" # Set GPU visible device - os.environ['CUDA_VISIBLE_DEVICES'] = GPU_ID + os.environ["CUDA_VISIBLE_DEVICES"] = GPU_ID ############### # Previous chkp ############### # Find all checkpoints in the chosen training folder - chkp_path = os.path.join(chosen_log, 'checkpoints') - chkps = [f for f in os.listdir(chkp_path) if f[:4] == 'chkp'] + chkp_path = os.path.join(chosen_log, "checkpoints") + chkps = [f for f in os.listdir(chkp_path) if f[:4] == "chkp"] # Find which snapshot to restore if chkp_idx is None: - chosen_chkp = 'current_chkp.tar' + chosen_chkp = "current_chkp.tar" else: chosen_chkp = np.sort(chkps)[chkp_idx] - chosen_chkp = os.path.join(chosen_log, 'checkpoints', chosen_chkp) + chosen_chkp = os.path.join(chosen_log, "checkpoints", chosen_chkp) # Initialize configuration class config = Config() @@ -150,53 +151,54 @@ if __name__ == '__main__': ############## print() - print('Data Preparation') - print('****************') + print("Data Preparation") + print("****************") # Initiate dataset - if config.dataset.startswith('ModelNet40'): + if config.dataset.startswith("ModelNet40"): test_dataset = ModelNet40Dataset(config, train=False) test_sampler = ModelNet40Sampler(test_dataset) collate_fn = ModelNet40Collate - elif config.dataset == 'S3DIS': - test_dataset = S3DISDataset(config, set='validation', use_potentials=True) + elif config.dataset == "S3DIS": + test_dataset = S3DISDataset(config, set="validation", use_potentials=True) test_sampler = S3DISSampler(test_dataset) collate_fn = S3DISCollate else: - raise ValueError('Unsupported dataset : ' + config.dataset) + raise ValueError("Unsupported dataset : " + config.dataset) # Data loader - test_loader = DataLoader(test_dataset, - batch_size=1, - sampler=test_sampler, - collate_fn=collate_fn, - num_workers=config.input_threads, - pin_memory=True) + test_loader = DataLoader( + test_dataset, + batch_size=1, + sampler=test_sampler, + collate_fn=collate_fn, + num_workers=config.input_threads, + pin_memory=True, + ) # Calibrate samplers test_sampler.calibration(test_loader, verbose=True) - print('\nModel Preparation') - print('*****************') + print("\nModel Preparation") + print("*****************") # Define network model t1 = time.time() - if config.dataset_task == 'classification': + if config.dataset_task == "classification": net = KPCNN(config) - elif config.dataset_task in ['cloud_segmentation', 'slam_segmentation']: + elif config.dataset_task in ["cloud_segmentation", "slam_segmentation"]: net = KPFCNN(config, test_dataset.label_values, test_dataset.ignored_labels) else: - raise ValueError('Unsupported dataset_task for deformation visu: ' + config.dataset_task) + raise ValueError( + "Unsupported dataset_task for deformation visu: " + config.dataset_task + ) # Define a visualizer class visualizer = ModelVisualizer(net, config, chkp_path=chosen_chkp, on_gpu=False) - print('Done in {:.1f}s\n'.format(time.time() - t1)) + print("Done in {:.1f}s\n".format(time.time() - t1)) - print('\nStart visualization') - print('*******************') + print("\nStart visualization") + print("*******************") # Training visualizer.show_deformable_kernels(net, test_loader, config, deform_idx) - - -