Compare commits
No commits in common. "6e682482d25e4df677fed6548c2f18c38eb3f097" and "d0ac8183662c7ff617145d818df72a191b0508e7" have entirely different histories.
6e682482d2
...
d0ac818366
1
.gitignore
vendored
1
.gitignore
vendored
|
@ -1,4 +1,3 @@
|
|||
Data/
|
||||
|
||||
**/build
|
||||
**/desktop.ini
|
||||
|
|
21
LICENSE.txt
21
LICENSE.txt
|
@ -1,21 +0,0 @@
|
|||
MIT License
|
||||
|
||||
Copyright (c) 2019 HuguesTHOMAS
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
|
@ -1,5 +1,5 @@
|
|||
|
||||
![Intro figure](./doc/Github_intro.png)
|
||||
![Intro figure](https://github.com/HuguesTHOMAS/KPConv-PyTorch/blob/master/doc/Github_intro.png)
|
||||
|
||||
Created by Hugues THOMAS
|
||||
|
||||
|
|
|
@ -7,20 +7,22 @@ import numpy.distutils.misc_util
|
|||
# Adding sources of the project
|
||||
# *****************************
|
||||
|
||||
SOURCES = [
|
||||
"../cpp_utils/cloud/cloud.cpp",
|
||||
SOURCES = ["../cpp_utils/cloud/cloud.cpp",
|
||||
"neighbors/neighbors.cpp",
|
||||
"wrapper.cpp",
|
||||
]
|
||||
"wrapper.cpp"]
|
||||
|
||||
module = Extension(
|
||||
name="radius_neighbors",
|
||||
module = Extension(name="radius_neighbors",
|
||||
sources=SOURCES,
|
||||
extra_compile_args=["-std=c++11", "-D_GLIBCXX_USE_CXX11_ABI=0"],
|
||||
)
|
||||
extra_compile_args=['-std=c++11',
|
||||
'-D_GLIBCXX_USE_CXX11_ABI=0'])
|
||||
|
||||
|
||||
setup(ext_modules=[module], include_dirs=numpy.distutils.misc_util.get_numpy_include_dirs())
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
setup(
|
||||
ext_modules=[module],
|
||||
include_dirs=numpy.distutils.misc_util.get_numpy_include_dirs(),
|
||||
)
|
||||
|
|
|
@ -7,20 +7,22 @@ import numpy.distutils.misc_util
|
|||
# Adding sources of the project
|
||||
# *****************************
|
||||
|
||||
SOURCES = [
|
||||
"../cpp_utils/cloud/cloud.cpp",
|
||||
SOURCES = ["../cpp_utils/cloud/cloud.cpp",
|
||||
"grid_subsampling/grid_subsampling.cpp",
|
||||
"wrapper.cpp",
|
||||
]
|
||||
"wrapper.cpp"]
|
||||
|
||||
module = Extension(
|
||||
name="grid_subsampling",
|
||||
module = Extension(name="grid_subsampling",
|
||||
sources=SOURCES,
|
||||
extra_compile_args=["-std=c++11", "-D_GLIBCXX_USE_CXX11_ABI=0"],
|
||||
)
|
||||
extra_compile_args=['-std=c++11',
|
||||
'-D_GLIBCXX_USE_CXX11_ABI=0'])
|
||||
|
||||
|
||||
setup(ext_modules=[module], include_dirs=numpy.distutils.misc_util.get_numpy_include_dirs())
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
setup(
|
||||
ext_modules=[module],
|
||||
include_dirs=numpy.distutils.misc_util.get_numpy_include_dirs(),
|
||||
)
|
||||
|
|
|
@ -27,17 +27,19 @@ import time
|
|||
import numpy as np
|
||||
import pickle
|
||||
import torch
|
||||
import math
|
||||
|
||||
|
||||
# OS functions
|
||||
from os import listdir
|
||||
from os.path import exists, join
|
||||
|
||||
# Dataset parent class
|
||||
from datasetss.common import PointCloudDataset
|
||||
from datasets.common import PointCloudDataset
|
||||
from torch.utils.data import Sampler, get_worker_info
|
||||
from utils.mayavi_visu import *
|
||||
|
||||
from datasetss.common import grid_subsampling
|
||||
from datasets.common import grid_subsampling
|
||||
from utils.config import bcolors
|
||||
|
||||
# ----------------------------------------------------------------------------------------------------------------------
|
||||
|
@ -53,55 +55,53 @@ class ModelNet40Dataset(PointCloudDataset):
|
|||
"""
|
||||
This dataset is small enough to be stored in-memory, so load all point clouds here
|
||||
"""
|
||||
PointCloudDataset.__init__(self, "ModelNet40")
|
||||
PointCloudDataset.__init__(self, 'ModelNet40')
|
||||
|
||||
############
|
||||
# Parameters
|
||||
############
|
||||
|
||||
# Dict from labels to names
|
||||
self.label_to_names = {
|
||||
0: "airplane",
|
||||
1: "bathtub",
|
||||
2: "bed",
|
||||
3: "bench",
|
||||
4: "bookshelf",
|
||||
5: "bottle",
|
||||
6: "bowl",
|
||||
7: "car",
|
||||
8: "chair",
|
||||
9: "cone",
|
||||
10: "cup",
|
||||
11: "curtain",
|
||||
12: "desk",
|
||||
13: "door",
|
||||
14: "dresser",
|
||||
15: "flower_pot",
|
||||
16: "glass_box",
|
||||
17: "guitar",
|
||||
18: "keyboard",
|
||||
19: "lamp",
|
||||
20: "laptop",
|
||||
21: "mantel",
|
||||
22: "monitor",
|
||||
23: "night_stand",
|
||||
24: "person",
|
||||
25: "piano",
|
||||
26: "plant",
|
||||
27: "radio",
|
||||
28: "range_hood",
|
||||
29: "sink",
|
||||
30: "sofa",
|
||||
31: "stairs",
|
||||
32: "stool",
|
||||
33: "table",
|
||||
34: "tent",
|
||||
35: "toilet",
|
||||
36: "tv_stand",
|
||||
37: "vase",
|
||||
38: "wardrobe",
|
||||
39: "xbox",
|
||||
}
|
||||
self.label_to_names = {0: 'airplane',
|
||||
1: 'bathtub',
|
||||
2: 'bed',
|
||||
3: 'bench',
|
||||
4: 'bookshelf',
|
||||
5: 'bottle',
|
||||
6: 'bowl',
|
||||
7: 'car',
|
||||
8: 'chair',
|
||||
9: 'cone',
|
||||
10: 'cup',
|
||||
11: 'curtain',
|
||||
12: 'desk',
|
||||
13: 'door',
|
||||
14: 'dresser',
|
||||
15: 'flower_pot',
|
||||
16: 'glass_box',
|
||||
17: 'guitar',
|
||||
18: 'keyboard',
|
||||
19: 'lamp',
|
||||
20: 'laptop',
|
||||
21: 'mantel',
|
||||
22: 'monitor',
|
||||
23: 'night_stand',
|
||||
24: 'person',
|
||||
25: 'piano',
|
||||
26: 'plant',
|
||||
27: 'radio',
|
||||
28: 'range_hood',
|
||||
29: 'sink',
|
||||
30: 'sofa',
|
||||
31: 'stairs',
|
||||
32: 'stool',
|
||||
33: 'table',
|
||||
34: 'tent',
|
||||
35: 'toilet',
|
||||
36: 'tv_stand',
|
||||
37: 'vase',
|
||||
38: 'wardrobe',
|
||||
39: 'xbox'}
|
||||
|
||||
# Initialize a bunch of variables concerning class labels
|
||||
self.init_labels()
|
||||
|
@ -110,10 +110,10 @@ class ModelNet40Dataset(PointCloudDataset):
|
|||
self.ignored_labels = np.array([])
|
||||
|
||||
# Dataset folder
|
||||
self.path = "./Data/ModelNet40"
|
||||
self.path = '../../Data/ModelNet40'
|
||||
|
||||
# Type of task conducted on this dataset
|
||||
self.dataset_task = "classification"
|
||||
self.dataset_task = 'classification'
|
||||
|
||||
# Update number of class and data task in configuration
|
||||
config.num_classes = self.num_classes
|
||||
|
@ -128,31 +128,22 @@ class ModelNet40Dataset(PointCloudDataset):
|
|||
# Number of models and models used per epoch
|
||||
if self.train:
|
||||
self.num_models = 9843
|
||||
if (
|
||||
config.epoch_steps
|
||||
and config.epoch_steps * config.batch_num < self.num_models
|
||||
):
|
||||
if config.epoch_steps and config.epoch_steps * config.batch_num < self.num_models:
|
||||
self.epoch_n = config.epoch_steps * config.batch_num
|
||||
else:
|
||||
self.epoch_n = self.num_models
|
||||
else:
|
||||
self.num_models = 2468
|
||||
self.epoch_n = min(
|
||||
self.num_models, config.validation_size * config.batch_num
|
||||
)
|
||||
self.epoch_n = min(self.num_models, config.validation_size * config.batch_num)
|
||||
|
||||
#############
|
||||
# Load models
|
||||
#############
|
||||
|
||||
if 0 < self.config.first_subsampling_dl <= 0.01:
|
||||
raise ValueError("subsampling_parameter too low (should be over 1 cm")
|
||||
raise ValueError('subsampling_parameter too low (should be over 1 cm')
|
||||
|
||||
(
|
||||
self.input_points,
|
||||
self.input_normals,
|
||||
self.input_labels,
|
||||
) = self.load_subsampled_clouds(orient_correction)
|
||||
self.input_points, self.input_normals, self.input_labels = self.load_subsampled_clouds(orient_correction)
|
||||
|
||||
return
|
||||
|
||||
|
@ -180,6 +171,7 @@ class ModelNet40Dataset(PointCloudDataset):
|
|||
R_list = []
|
||||
|
||||
for p_i in idx_list:
|
||||
|
||||
# Get points and labels
|
||||
points = self.input_points[p_i].astype(np.float32)
|
||||
normals = self.input_normals[p_i].astype(np.float32)
|
||||
|
@ -200,7 +192,7 @@ class ModelNet40Dataset(PointCloudDataset):
|
|||
# Concatenate batch
|
||||
###################
|
||||
|
||||
# show_ModelNet_examples(tp_list, cloud_normals=tn_list)
|
||||
#show_ModelNet_examples(tp_list, cloud_normals=tn_list)
|
||||
|
||||
stacked_points = np.concatenate(tp_list, axis=0)
|
||||
stacked_normals = np.concatenate(tn_list, axis=0)
|
||||
|
@ -217,9 +209,7 @@ class ModelNet40Dataset(PointCloudDataset):
|
|||
elif self.config.in_features_dim == 4:
|
||||
stacked_features = np.hstack((stacked_features, stacked_normals))
|
||||
else:
|
||||
raise ValueError(
|
||||
"Only accepted input dimensions are 1, 4 and 7 (without and with XYZ)"
|
||||
)
|
||||
raise ValueError('Only accepted input dimensions are 1, 4 and 7 (without and with XYZ)')
|
||||
|
||||
#######################
|
||||
# Create network inputs
|
||||
|
@ -229,9 +219,10 @@ class ModelNet40Dataset(PointCloudDataset):
|
|||
#
|
||||
|
||||
# Get the whole input list
|
||||
input_list = self.classification_inputs(
|
||||
stacked_points, stacked_features, labels, stack_lengths
|
||||
)
|
||||
input_list = self.classification_inputs(stacked_points,
|
||||
stacked_features,
|
||||
labels,
|
||||
stack_lengths)
|
||||
|
||||
# Add scale and rotation for testing
|
||||
input_list += [scales, rots, model_inds]
|
||||
|
@ -239,38 +230,31 @@ class ModelNet40Dataset(PointCloudDataset):
|
|||
return input_list
|
||||
|
||||
def load_subsampled_clouds(self, orient_correction):
|
||||
|
||||
# Restart timer
|
||||
t0 = time.time()
|
||||
|
||||
# Load wanted points if possible
|
||||
if self.train:
|
||||
split = "training"
|
||||
split ='training'
|
||||
else:
|
||||
split = "test"
|
||||
split = 'test'
|
||||
|
||||
print(
|
||||
"\nLoading {:s} points subsampled at {:.3f}".format(
|
||||
split, self.config.first_subsampling_dl
|
||||
)
|
||||
)
|
||||
filename = join(
|
||||
self.path,
|
||||
"{:s}_{:.3f}_record.pkl".format(split, self.config.first_subsampling_dl),
|
||||
)
|
||||
print('\nLoading {:s} points subsampled at {:.3f}'.format(split, self.config.first_subsampling_dl))
|
||||
filename = join(self.path, '{:s}_{:.3f}_record.pkl'.format(split, self.config.first_subsampling_dl))
|
||||
|
||||
if exists(filename):
|
||||
with open(filename, "rb") as file:
|
||||
with open(filename, 'rb') as file:
|
||||
input_points, input_normals, input_labels = pickle.load(file)
|
||||
|
||||
# Else compute them from original points
|
||||
else:
|
||||
|
||||
# Collect training file names
|
||||
if self.train:
|
||||
names = np.loadtxt(
|
||||
join(self.path, "modelnet40_train.txt"), dtype=np.str
|
||||
)
|
||||
names = np.loadtxt(join(self.path, 'modelnet40_train.txt'), dtype=np.str)
|
||||
else:
|
||||
names = np.loadtxt(join(self.path, "modelnet40_test.txt"), dtype=np.str)
|
||||
names = np.loadtxt(join(self.path, 'modelnet40_test.txt'), dtype=np.str)
|
||||
|
||||
# Initialize containers
|
||||
input_points = []
|
||||
|
@ -279,54 +263,49 @@ class ModelNet40Dataset(PointCloudDataset):
|
|||
# Advanced display
|
||||
N = len(names)
|
||||
progress_n = 30
|
||||
fmt_str = "[{:<" + str(progress_n) + "}] {:5.1f}%"
|
||||
fmt_str = '[{:<' + str(progress_n) + '}] {:5.1f}%'
|
||||
|
||||
# Collect point clouds
|
||||
for i, cloud_name in enumerate(names):
|
||||
|
||||
# Read points
|
||||
class_folder = "_".join(cloud_name.split("_")[:-1])
|
||||
txt_file = join(self.path, class_folder, cloud_name) + ".txt"
|
||||
data = np.loadtxt(txt_file, delimiter=",", dtype=np.float32)
|
||||
class_folder = '_'.join(cloud_name.split('_')[:-1])
|
||||
txt_file = join(self.path, class_folder, cloud_name) + '.txt'
|
||||
data = np.loadtxt(txt_file, delimiter=',', dtype=np.float32)
|
||||
|
||||
# Subsample them
|
||||
if self.config.first_subsampling_dl > 0:
|
||||
points, normals = grid_subsampling(
|
||||
data[:, :3],
|
||||
points, normals = grid_subsampling(data[:, :3],
|
||||
features=data[:, 3:],
|
||||
sampleDl=self.config.first_subsampling_dl,
|
||||
)
|
||||
sampleDl=self.config.first_subsampling_dl)
|
||||
else:
|
||||
points = data[:, :3]
|
||||
normals = data[:, 3:]
|
||||
|
||||
print("", end="\r")
|
||||
print(
|
||||
fmt_str.format("#" * ((i * progress_n) // N), 100 * i / N),
|
||||
end="",
|
||||
flush=True,
|
||||
)
|
||||
print('', end='\r')
|
||||
print(fmt_str.format('#' * ((i * progress_n) // N), 100 * i / N), end='', flush=True)
|
||||
|
||||
# Add to list
|
||||
input_points += [points]
|
||||
input_normals += [normals]
|
||||
|
||||
print("", end="\r")
|
||||
print(fmt_str.format("#" * progress_n, 100), end="", flush=True)
|
||||
print('', end='\r')
|
||||
print(fmt_str.format('#' * progress_n, 100), end='', flush=True)
|
||||
print()
|
||||
|
||||
# Get labels
|
||||
label_names = ["_".join(name.split("_")[:-1]) for name in names]
|
||||
label_names = ['_'.join(name.split('_')[:-1]) for name in names]
|
||||
input_labels = np.array([self.name_to_label[name] for name in label_names])
|
||||
|
||||
# Save for later use
|
||||
with open(filename, "wb") as file:
|
||||
pickle.dump((input_points, input_normals, input_labels), file)
|
||||
with open(filename, 'wb') as file:
|
||||
pickle.dump((input_points,
|
||||
input_normals,
|
||||
input_labels), file)
|
||||
|
||||
lengths = [p.shape[0] for p in input_points]
|
||||
sizes = [l * 4 * 6 for l in lengths]
|
||||
print(
|
||||
"{:.1f} MB loaded in {:.1f}s".format(np.sum(sizes) * 1e-6, time.time() - t0)
|
||||
)
|
||||
print('{:.1f} MB loaded in {:.1f}s'.format(np.sum(sizes) * 1e-6, time.time() - t0))
|
||||
|
||||
if orient_correction:
|
||||
input_points = [pp[:, [0, 2, 1]] for pp in input_points]
|
||||
|
@ -334,7 +313,6 @@ class ModelNet40Dataset(PointCloudDataset):
|
|||
|
||||
return input_points, input_normals, input_labels
|
||||
|
||||
|
||||
# ----------------------------------------------------------------------------------------------------------------------
|
||||
#
|
||||
# Utility classes definition
|
||||
|
@ -344,9 +322,7 @@ class ModelNet40Dataset(PointCloudDataset):
|
|||
class ModelNet40Sampler(Sampler):
|
||||
"""Sampler for ModelNet40"""
|
||||
|
||||
def __init__(
|
||||
self, dataset: ModelNet40Dataset, use_potential=True, balance_labels=False
|
||||
):
|
||||
def __init__(self, dataset: ModelNet40Dataset, use_potential=True, balance_labels=False):
|
||||
Sampler.__init__(self, dataset)
|
||||
|
||||
# Does the sampler use potential for regular sampling
|
||||
|
@ -380,18 +356,18 @@ class ModelNet40Sampler(Sampler):
|
|||
|
||||
if self.use_potential:
|
||||
if self.balance_labels:
|
||||
|
||||
gen_indices = []
|
||||
pick_n = self.dataset.epoch_n // self.dataset.num_classes + 1
|
||||
for i, l in enumerate(self.dataset.label_values):
|
||||
|
||||
# Get the potentials of the objects of this class
|
||||
label_inds = np.where(np.equal(self.dataset.input_labels, l))[0]
|
||||
class_potentials = self.potentials[label_inds]
|
||||
|
||||
# Get the indices to generate thanks to potentials
|
||||
if pick_n < class_potentials.shape[0]:
|
||||
pick_indices = np.argpartition(class_potentials, pick_n)[
|
||||
:pick_n
|
||||
]
|
||||
pick_indices = np.argpartition(class_potentials, pick_n)[:pick_n]
|
||||
else:
|
||||
pick_indices = np.random.permutation(class_potentials.shape[0])
|
||||
class_indices = label_inds[pick_indices]
|
||||
|
@ -401,20 +377,17 @@ class ModelNet40Sampler(Sampler):
|
|||
gen_indices = np.random.permutation(np.hstack(gen_indices))
|
||||
|
||||
else:
|
||||
|
||||
# Get indices with the minimum potential
|
||||
if self.dataset.epoch_n < self.potentials.shape[0]:
|
||||
gen_indices = np.argpartition(
|
||||
self.potentials, self.dataset.epoch_n
|
||||
)[: self.dataset.epoch_n]
|
||||
gen_indices = np.argpartition(self.potentials, self.dataset.epoch_n)[:self.dataset.epoch_n]
|
||||
else:
|
||||
gen_indices = np.random.permutation(self.potentials.shape[0])
|
||||
gen_indices = np.random.permutation(gen_indices)
|
||||
|
||||
# Update potentials (Change the order for the next epoch)
|
||||
self.potentials[gen_indices] = np.ceil(self.potentials[gen_indices])
|
||||
self.potentials[gen_indices] += (
|
||||
np.random.rand(gen_indices.shape[0]) * 0.1 + 0.1
|
||||
)
|
||||
self.potentials[gen_indices] += np.random.rand(gen_indices.shape[0]) * 0.1 + 0.1
|
||||
|
||||
else:
|
||||
if self.balance_labels:
|
||||
|
@ -426,9 +399,7 @@ class ModelNet40Sampler(Sampler):
|
|||
gen_indices += [rand_inds]
|
||||
gen_indices = np.random.permutation(np.hstack(gen_indices))
|
||||
else:
|
||||
gen_indices = np.random.permutation(self.dataset.num_models)[
|
||||
: self.dataset.epoch_n
|
||||
]
|
||||
gen_indices = np.random.permutation(self.dataset.num_models)[:self.dataset.epoch_n]
|
||||
|
||||
################
|
||||
# Generator loop
|
||||
|
@ -440,6 +411,7 @@ class ModelNet40Sampler(Sampler):
|
|||
|
||||
# Generator loop
|
||||
for p_i in gen_indices:
|
||||
|
||||
# Size of picked cloud
|
||||
n = self.dataset.input_points[p_i].shape[0]
|
||||
|
||||
|
@ -478,7 +450,7 @@ class ModelNet40Sampler(Sampler):
|
|||
# Previously saved calibration
|
||||
##############################
|
||||
|
||||
print("\nStarting Calibration (use verbose=True for more details)")
|
||||
print('\nStarting Calibration (use verbose=True for more details)')
|
||||
t0 = time.time()
|
||||
|
||||
redo = False
|
||||
|
@ -487,40 +459,39 @@ class ModelNet40Sampler(Sampler):
|
|||
# ***********
|
||||
|
||||
# Load batch_limit dictionary
|
||||
batch_lim_file = join(self.dataset.path, "batch_limits.pkl")
|
||||
batch_lim_file = join(self.dataset.path, 'batch_limits.pkl')
|
||||
if exists(batch_lim_file):
|
||||
with open(batch_lim_file, "rb") as file:
|
||||
with open(batch_lim_file, 'rb') as file:
|
||||
batch_lim_dict = pickle.load(file)
|
||||
else:
|
||||
batch_lim_dict = {}
|
||||
|
||||
# Check if the batch limit associated with current parameters exists
|
||||
key = "{:.3f}_{:d}".format(
|
||||
self.dataset.config.first_subsampling_dl, self.dataset.config.batch_num
|
||||
)
|
||||
key = '{:.3f}_{:d}'.format(self.dataset.config.first_subsampling_dl,
|
||||
self.dataset.config.batch_num)
|
||||
if key in batch_lim_dict:
|
||||
self.batch_limit = batch_lim_dict[key]
|
||||
else:
|
||||
redo = True
|
||||
|
||||
if verbose:
|
||||
print("\nPrevious calibration found:")
|
||||
print("Check batch limit dictionary")
|
||||
print('\nPrevious calibration found:')
|
||||
print('Check batch limit dictionary')
|
||||
if key in batch_lim_dict:
|
||||
color = bcolors.OKGREEN
|
||||
v = str(int(batch_lim_dict[key]))
|
||||
else:
|
||||
color = bcolors.FAIL
|
||||
v = "?"
|
||||
print('{:}"{:s}": {:s}{:}'.format(color, key, v, bcolors.ENDC))
|
||||
v = '?'
|
||||
print('{:}\"{:s}\": {:s}{:}'.format(color, key, v, bcolors.ENDC))
|
||||
|
||||
# Neighbors limit
|
||||
# ***************
|
||||
|
||||
# Load neighb_limits dictionary
|
||||
neighb_lim_file = join(self.dataset.path, "neighbors_limits.pkl")
|
||||
neighb_lim_file = join(self.dataset.path, 'neighbors_limits.pkl')
|
||||
if exists(neighb_lim_file):
|
||||
with open(neighb_lim_file, "rb") as file:
|
||||
with open(neighb_lim_file, 'rb') as file:
|
||||
neighb_lim_dict = pickle.load(file)
|
||||
else:
|
||||
neighb_lim_dict = {}
|
||||
|
@ -528,13 +499,14 @@ class ModelNet40Sampler(Sampler):
|
|||
# Check if the limit associated with current parameters exists (for each layer)
|
||||
neighb_limits = []
|
||||
for layer_ind in range(self.dataset.config.num_layers):
|
||||
|
||||
dl = self.dataset.config.first_subsampling_dl * (2**layer_ind)
|
||||
if self.dataset.config.deform_layers[layer_ind]:
|
||||
r = dl * self.dataset.config.deform_radius
|
||||
else:
|
||||
r = dl * self.dataset.config.conv_radius
|
||||
|
||||
key = "{:.3f}_{:.3f}".format(dl, r)
|
||||
key = '{:.3f}_{:.3f}'.format(dl, r)
|
||||
if key in neighb_lim_dict:
|
||||
neighb_limits += [neighb_lim_dict[key]]
|
||||
|
||||
|
@ -544,37 +516,34 @@ class ModelNet40Sampler(Sampler):
|
|||
redo = True
|
||||
|
||||
if verbose:
|
||||
print("Check neighbors limit dictionary")
|
||||
print('Check neighbors limit dictionary')
|
||||
for layer_ind in range(self.dataset.config.num_layers):
|
||||
dl = self.dataset.config.first_subsampling_dl * (2**layer_ind)
|
||||
if self.dataset.config.deform_layers[layer_ind]:
|
||||
r = dl * self.dataset.config.deform_radius
|
||||
else:
|
||||
r = dl * self.dataset.config.conv_radius
|
||||
key = "{:.3f}_{:.3f}".format(dl, r)
|
||||
key = '{:.3f}_{:.3f}'.format(dl, r)
|
||||
|
||||
if key in neighb_lim_dict:
|
||||
color = bcolors.OKGREEN
|
||||
v = str(neighb_lim_dict[key])
|
||||
else:
|
||||
color = bcolors.FAIL
|
||||
v = "?"
|
||||
print('{:}"{:s}": {:s}{:}'.format(color, key, v, bcolors.ENDC))
|
||||
v = '?'
|
||||
print('{:}\"{:s}\": {:s}{:}'.format(color, key, v, bcolors.ENDC))
|
||||
|
||||
if redo:
|
||||
|
||||
############################
|
||||
# Neighbors calib parameters
|
||||
############################
|
||||
|
||||
# From config parameter, compute higher bound of neighbors number in a neighborhood
|
||||
hist_n = int(
|
||||
np.ceil(4 / 3 * np.pi * (self.dataset.config.conv_radius + 1) ** 3)
|
||||
)
|
||||
hist_n = int(np.ceil(4 / 3 * np.pi * (self.dataset.config.conv_radius + 1) ** 3))
|
||||
|
||||
# Histogram of neighborhood sizes
|
||||
neighb_hists = np.zeros(
|
||||
(self.dataset.config.num_layers, hist_n), dtype=np.int32
|
||||
)
|
||||
neighb_hists = np.zeros((self.dataset.config.num_layers, hist_n), dtype=np.int32)
|
||||
|
||||
########################
|
||||
# Batch calib parameters
|
||||
|
@ -604,11 +573,9 @@ class ModelNet40Sampler(Sampler):
|
|||
|
||||
for epoch in range(10):
|
||||
for batch_i, batch in enumerate(dataloader):
|
||||
|
||||
# Update neighborhood histogram
|
||||
counts = [
|
||||
np.sum(neighb_mat.numpy() < neighb_mat.shape[0], axis=1)
|
||||
for neighb_mat in batch.neighbors
|
||||
]
|
||||
counts = [np.sum(neighb_mat.numpy() < neighb_mat.shape[0], axis=1) for neighb_mat in batch.neighbors]
|
||||
hists = [np.bincount(c, minlength=hist_n)[:hist_n] for c in counts]
|
||||
neighb_hists += np.vstack(hists)
|
||||
|
||||
|
@ -645,68 +612,69 @@ class ModelNet40Sampler(Sampler):
|
|||
# Console display (only one per second)
|
||||
if verbose and (t - last_display) > 1.0:
|
||||
last_display = t
|
||||
message = "Step {:5d} estim_b ={:5.2f} batch_limit ={:7d}"
|
||||
print(message.format(i, estim_b, int(self.batch_limit)))
|
||||
message = 'Step {:5d} estim_b ={:5.2f} batch_limit ={:7d}'
|
||||
print(message.format(i,
|
||||
estim_b,
|
||||
int(self.batch_limit)))
|
||||
|
||||
if breaking:
|
||||
break
|
||||
|
||||
# Use collected neighbor histogram to get neighbors limit
|
||||
cumsum = np.cumsum(neighb_hists.T, axis=0)
|
||||
percentiles = np.sum(
|
||||
cumsum < (untouched_ratio * cumsum[hist_n - 1, :]), axis=0
|
||||
)
|
||||
percentiles = np.sum(cumsum < (untouched_ratio * cumsum[hist_n - 1, :]), axis=0)
|
||||
self.dataset.neighborhood_limits = percentiles
|
||||
|
||||
if verbose:
|
||||
|
||||
# Crop histogram
|
||||
while np.sum(neighb_hists[:, -1]) == 0:
|
||||
neighb_hists = neighb_hists[:, :-1]
|
||||
hist_n = neighb_hists.shape[1]
|
||||
|
||||
print("\n**************************************************\n")
|
||||
line0 = "neighbors_num "
|
||||
print('\n**************************************************\n')
|
||||
line0 = 'neighbors_num '
|
||||
for layer in range(neighb_hists.shape[0]):
|
||||
line0 += "| layer {:2d} ".format(layer)
|
||||
line0 += '| layer {:2d} '.format(layer)
|
||||
print(line0)
|
||||
for neighb_size in range(hist_n):
|
||||
line0 = " {:4d} ".format(neighb_size)
|
||||
line0 = ' {:4d} '.format(neighb_size)
|
||||
for layer in range(neighb_hists.shape[0]):
|
||||
if neighb_size > percentiles[layer]:
|
||||
color = bcolors.FAIL
|
||||
else:
|
||||
color = bcolors.OKGREEN
|
||||
line0 += "|{:}{:10d}{:} ".format(
|
||||
color, neighb_hists[layer, neighb_size], bcolors.ENDC
|
||||
)
|
||||
line0 += '|{:}{:10d}{:} '.format(color,
|
||||
neighb_hists[layer, neighb_size],
|
||||
bcolors.ENDC)
|
||||
|
||||
print(line0)
|
||||
|
||||
print("\n**************************************************\n")
|
||||
print("\nchosen neighbors limits: ", percentiles)
|
||||
print('\n**************************************************\n')
|
||||
print('\nchosen neighbors limits: ', percentiles)
|
||||
print()
|
||||
|
||||
# Save batch_limit dictionary
|
||||
key = "{:.3f}_{:d}".format(
|
||||
self.dataset.config.first_subsampling_dl, self.dataset.config.batch_num
|
||||
)
|
||||
key = '{:.3f}_{:d}'.format(self.dataset.config.first_subsampling_dl,
|
||||
self.dataset.config.batch_num)
|
||||
batch_lim_dict[key] = self.batch_limit
|
||||
with open(batch_lim_file, "wb") as file:
|
||||
with open(batch_lim_file, 'wb') as file:
|
||||
pickle.dump(batch_lim_dict, file)
|
||||
|
||||
# Save neighb_limit dictionary
|
||||
for layer_ind in range(self.dataset.config.num_layers):
|
||||
dl = self.dataset.config.first_subsampling_dl * (2**layer_ind)
|
||||
dl = self.dataset.config.first_subsampling_dl * (2 ** layer_ind)
|
||||
if self.dataset.config.deform_layers[layer_ind]:
|
||||
r = dl * self.dataset.config.deform_radius
|
||||
else:
|
||||
r = dl * self.dataset.config.conv_radius
|
||||
key = "{:.3f}_{:.3f}".format(dl, r)
|
||||
key = '{:.3f}_{:.3f}'.format(dl, r)
|
||||
neighb_lim_dict[key] = self.dataset.neighborhood_limits[layer_ind]
|
||||
with open(neighb_lim_file, "wb") as file:
|
||||
with open(neighb_lim_file, 'wb') as file:
|
||||
pickle.dump(neighb_lim_dict, file)
|
||||
|
||||
print("Calibration done in {:.1f}s\n".format(time.time() - t0))
|
||||
|
||||
print('Calibration done in {:.1f}s\n'.format(time.time() - t0))
|
||||
return
|
||||
|
||||
|
||||
|
@ -714,6 +682,7 @@ class ModelNet40CustomBatch:
|
|||
"""Custom batch definition with memory pinning for ModelNet40"""
|
||||
|
||||
def __init__(self, input_list):
|
||||
|
||||
# Get rid of batch dimension
|
||||
input_list = input_list[0]
|
||||
|
||||
|
@ -722,21 +691,13 @@ class ModelNet40CustomBatch:
|
|||
|
||||
# Extract input tensors from the list of numpy array
|
||||
ind = 0
|
||||
self.points = [
|
||||
torch.from_numpy(nparray) for nparray in input_list[ind : ind + L]
|
||||
]
|
||||
self.points = [torch.from_numpy(nparray) for nparray in input_list[ind:ind+L]]
|
||||
ind += L
|
||||
self.neighbors = [
|
||||
torch.from_numpy(nparray) for nparray in input_list[ind : ind + L]
|
||||
]
|
||||
self.neighbors = [torch.from_numpy(nparray) for nparray in input_list[ind:ind+L]]
|
||||
ind += L
|
||||
self.pools = [
|
||||
torch.from_numpy(nparray) for nparray in input_list[ind : ind + L]
|
||||
]
|
||||
self.pools = [torch.from_numpy(nparray) for nparray in input_list[ind:ind+L]]
|
||||
ind += L
|
||||
self.lengths = [
|
||||
torch.from_numpy(nparray) for nparray in input_list[ind : ind + L]
|
||||
]
|
||||
self.lengths = [torch.from_numpy(nparray) for nparray in input_list[ind:ind+L]]
|
||||
ind += L
|
||||
self.features = torch.from_numpy(input_list[ind])
|
||||
ind += 1
|
||||
|
@ -768,6 +729,7 @@ class ModelNet40CustomBatch:
|
|||
return self
|
||||
|
||||
def to(self, device):
|
||||
|
||||
self.points = [in_tensor.to(device) for in_tensor in self.points]
|
||||
self.neighbors = [in_tensor.to(device) for in_tensor in self.neighbors]
|
||||
self.pools = [in_tensor.to(device) for in_tensor in self.pools]
|
||||
|
@ -782,15 +744,15 @@ class ModelNet40CustomBatch:
|
|||
|
||||
def unstack_points(self, layer=None):
|
||||
"""Unstack the points"""
|
||||
return self.unstack_elements("points", layer)
|
||||
return self.unstack_elements('points', layer)
|
||||
|
||||
def unstack_neighbors(self, layer=None):
|
||||
"""Unstack the neighbors indices"""
|
||||
return self.unstack_elements("neighbors", layer)
|
||||
return self.unstack_elements('neighbors', layer)
|
||||
|
||||
def unstack_pools(self, layer=None):
|
||||
"""Unstack the pooling indices"""
|
||||
return self.unstack_elements("pools", layer)
|
||||
return self.unstack_elements('pools', layer)
|
||||
|
||||
def unstack_elements(self, element_name, layer=None, to_numpy=True):
|
||||
"""
|
||||
|
@ -798,31 +760,34 @@ class ModelNet40CustomBatch:
|
|||
layers
|
||||
"""
|
||||
|
||||
if element_name == "points":
|
||||
if element_name == 'points':
|
||||
elements = self.points
|
||||
elif element_name == "neighbors":
|
||||
elif element_name == 'neighbors':
|
||||
elements = self.neighbors
|
||||
elif element_name == "pools":
|
||||
elif element_name == 'pools':
|
||||
elements = self.pools[:-1]
|
||||
else:
|
||||
raise ValueError("Unknown element name: {:s}".format(element_name))
|
||||
raise ValueError('Unknown element name: {:s}'.format(element_name))
|
||||
|
||||
all_p_list = []
|
||||
for layer_i, layer_elems in enumerate(elements):
|
||||
|
||||
if layer is None or layer == layer_i:
|
||||
|
||||
i0 = 0
|
||||
p_list = []
|
||||
if element_name == "pools":
|
||||
lengths = self.lengths[layer_i + 1]
|
||||
if element_name == 'pools':
|
||||
lengths = self.lengths[layer_i+1]
|
||||
else:
|
||||
lengths = self.lengths[layer_i]
|
||||
|
||||
for b_i, length in enumerate(lengths):
|
||||
elem = layer_elems[i0 : i0 + length]
|
||||
if element_name == "neighbors":
|
||||
|
||||
elem = layer_elems[i0:i0 + length]
|
||||
if element_name == 'neighbors':
|
||||
elem[elem >= self.points[layer_i].shape[0]] = -1
|
||||
elem[elem >= 0] -= i0
|
||||
elif element_name == "pools":
|
||||
elif element_name == 'pools':
|
||||
elem[elem >= self.points[layer_i].shape[0]] = -1
|
||||
elem[elem >= 0] -= torch.sum(self.lengths[layer_i][:b_i])
|
||||
i0 += length
|
||||
|
@ -854,15 +819,16 @@ def debug_sampling(dataset, sampler, loader):
|
|||
"""Shows which labels are sampled according to strategy chosen"""
|
||||
label_sum = np.zeros((dataset.num_classes), dtype=np.int32)
|
||||
for epoch in range(10):
|
||||
|
||||
for batch_i, (points, normals, labels, indices, in_sizes) in enumerate(loader):
|
||||
# print(batch_i, tuple(points.shape), tuple(normals.shape), labels, indices, in_sizes)
|
||||
|
||||
label_sum += np.bincount(labels.numpy(), minlength=dataset.num_classes)
|
||||
print(label_sum)
|
||||
# print(sampler.potentials[:6])
|
||||
#print(sampler.potentials[:6])
|
||||
|
||||
print("******************")
|
||||
print("*******************************************")
|
||||
print('******************')
|
||||
print('*******************************************')
|
||||
|
||||
_, counts = np.unique(dataset.input_labels, return_counts=True)
|
||||
print(counts)
|
||||
|
@ -877,6 +843,7 @@ def debug_timing(dataset, sampler, loader):
|
|||
estim_b = dataset.config.batch_num
|
||||
|
||||
for epoch in range(10):
|
||||
|
||||
for batch_i, batch in enumerate(loader):
|
||||
# print(batch_i, tuple(points.shape), tuple(normals.shape), labels, indices, in_sizes)
|
||||
|
||||
|
@ -897,49 +864,56 @@ def debug_timing(dataset, sampler, loader):
|
|||
# Console display (only one per second)
|
||||
if (t[-1] - last_display) > -1.0:
|
||||
last_display = t[-1]
|
||||
message = "Step {:08d} -> (ms/batch) {:8.2f} {:8.2f} / batch = {:.2f}"
|
||||
print(
|
||||
message.format(
|
||||
batch_i, 1000 * mean_dt[0], 1000 * mean_dt[1], estim_b
|
||||
)
|
||||
)
|
||||
message = 'Step {:08d} -> (ms/batch) {:8.2f} {:8.2f} / batch = {:.2f}'
|
||||
print(message.format(batch_i,
|
||||
1000 * mean_dt[0],
|
||||
1000 * mean_dt[1],
|
||||
estim_b))
|
||||
|
||||
print("************* Epoch ended *************")
|
||||
print('************* Epoch ended *************')
|
||||
|
||||
_, counts = np.unique(dataset.input_labels, return_counts=True)
|
||||
print(counts)
|
||||
|
||||
|
||||
def debug_show_clouds(dataset, sampler, loader):
|
||||
|
||||
|
||||
for epoch in range(10):
|
||||
|
||||
clouds = []
|
||||
cloud_normals = []
|
||||
cloud_labels = []
|
||||
|
||||
L = dataset.config.num_layers
|
||||
|
||||
for batch_i, batch in enumerate(loader):
|
||||
|
||||
# Print characteristics of input tensors
|
||||
print("\nPoints tensors")
|
||||
print('\nPoints tensors')
|
||||
for i in range(L):
|
||||
print(batch.points[i].dtype, batch.points[i].shape)
|
||||
print("\nNeigbors tensors")
|
||||
print('\nNeigbors tensors')
|
||||
for i in range(L):
|
||||
print(batch.neighbors[i].dtype, batch.neighbors[i].shape)
|
||||
print("\nPools tensors")
|
||||
print('\nPools tensors')
|
||||
for i in range(L):
|
||||
print(batch.pools[i].dtype, batch.pools[i].shape)
|
||||
print("\nStack lengths")
|
||||
print('\nStack lengths')
|
||||
for i in range(L):
|
||||
print(batch.lengths[i].dtype, batch.lengths[i].shape)
|
||||
print("\nFeatures")
|
||||
print('\nFeatures')
|
||||
print(batch.features.dtype, batch.features.shape)
|
||||
print("\nLabels")
|
||||
print('\nLabels')
|
||||
print(batch.labels.dtype, batch.labels.shape)
|
||||
print("\nAugment Scales")
|
||||
print('\nAugment Scales')
|
||||
print(batch.scales.dtype, batch.scales.shape)
|
||||
print("\nAugment Rotations")
|
||||
print('\nAugment Rotations')
|
||||
print(batch.rots.dtype, batch.rots.shape)
|
||||
print("\nModel indices")
|
||||
print('\nModel indices')
|
||||
print(batch.model_inds.dtype, batch.model_inds.shape)
|
||||
|
||||
print("\nAre input tensors pinned")
|
||||
print('\nAre input tensors pinned')
|
||||
print(batch.neighbors[0].is_pinned())
|
||||
print(batch.neighbors[-1].is_pinned())
|
||||
print(batch.points[0].is_pinned())
|
||||
|
@ -951,7 +925,7 @@ def debug_show_clouds(dataset, sampler, loader):
|
|||
|
||||
show_input_batch(batch)
|
||||
|
||||
print("*******************************************")
|
||||
print('*******************************************')
|
||||
|
||||
_, counts = np.unique(dataset.input_labels, return_counts=True)
|
||||
print(counts)
|
||||
|
@ -965,6 +939,7 @@ def debug_batch_and_neighbors_calib(dataset, sampler, loader):
|
|||
mean_dt = np.zeros(2)
|
||||
|
||||
for epoch in range(10):
|
||||
|
||||
for batch_i, input_list in enumerate(loader):
|
||||
# print(batch_i, tuple(points.shape), tuple(normals.shape), labels, indices, in_sizes)
|
||||
|
||||
|
@ -982,10 +957,12 @@ def debug_batch_and_neighbors_calib(dataset, sampler, loader):
|
|||
# Console display (only one per second)
|
||||
if (t[-1] - last_display) > 1.0:
|
||||
last_display = t[-1]
|
||||
message = "Step {:08d} -> Average timings (ms/batch) {:8.2f} {:8.2f} "
|
||||
print(message.format(batch_i, 1000 * mean_dt[0], 1000 * mean_dt[1]))
|
||||
message = 'Step {:08d} -> Average timings (ms/batch) {:8.2f} {:8.2f} '
|
||||
print(message.format(batch_i,
|
||||
1000 * mean_dt[0],
|
||||
1000 * mean_dt[1]))
|
||||
|
||||
print("************* Epoch ended *************")
|
||||
print('************* Epoch ended *************')
|
||||
|
||||
_, counts = np.unique(dataset.input_labels, return_counts=True)
|
||||
print(counts)
|
||||
|
@ -999,6 +976,7 @@ class ModelNet40WorkerInitDebug:
|
|||
return
|
||||
|
||||
def __call__(self, worker_id):
|
||||
|
||||
# Print workers info
|
||||
worker_info = get_worker_info()
|
||||
print(worker_info)
|
||||
|
@ -1007,10 +985,11 @@ class ModelNet40WorkerInitDebug:
|
|||
dataset = worker_info.dataset # the dataset copy in this worker process
|
||||
|
||||
# In windows, each worker has its own copy of the dataset. In Linux, this is shared in memory
|
||||
print(dataset.input_labels.__array_interface__["data"])
|
||||
print(worker_info.dataset.input_labels.__array_interface__["data"])
|
||||
print(self.dataset.input_labels.__array_interface__["data"])
|
||||
print(dataset.input_labels.__array_interface__['data'])
|
||||
print(worker_info.dataset.input_labels.__array_interface__['data'])
|
||||
print(self.dataset.input_labels.__array_interface__['data'])
|
||||
|
||||
# configure the dataset to only process the split workload
|
||||
|
||||
return
|
||||
|
File diff suppressed because it is too large
Load diff
File diff suppressed because it is too large
Load diff
File diff suppressed because it is too large
Load diff
|
@ -21,8 +21,12 @@
|
|||
#
|
||||
|
||||
# Common libs
|
||||
import time
|
||||
import os
|
||||
import numpy as np
|
||||
from torch.utils.data import Dataset
|
||||
import sys
|
||||
import torch
|
||||
from torch.utils.data import DataLoader, Dataset
|
||||
from utils.config import Config
|
||||
from utils.mayavi_visu import *
|
||||
from kernels.kernel_points import create_3D_rotations
|
||||
|
@ -37,7 +41,6 @@ import cpp_wrappers.cpp_neighbors.radius_neighbors as cpp_neighbors
|
|||
# \***********************/
|
||||
#
|
||||
|
||||
|
||||
def grid_subsampling(points, features=None, labels=None, sampleDl=0.1, verbose=0):
|
||||
"""
|
||||
CPP wrapper for a grid subsampling (method = barycenter for points and features)
|
||||
|
@ -50,35 +53,29 @@ def grid_subsampling(points, features=None, labels=None, sampleDl=0.1, verbose=0
|
|||
"""
|
||||
|
||||
if (features is None) and (labels is None):
|
||||
return cpp_subsampling.subsample(points, sampleDl=sampleDl, verbose=verbose)
|
||||
elif labels is None:
|
||||
return cpp_subsampling.subsample(
|
||||
points, features=features, sampleDl=sampleDl, verbose=verbose
|
||||
)
|
||||
elif features is None:
|
||||
return cpp_subsampling.subsample(
|
||||
points, classes=labels, sampleDl=sampleDl, verbose=verbose
|
||||
)
|
||||
return cpp_subsampling.subsample(points,
|
||||
sampleDl=sampleDl,
|
||||
verbose=verbose)
|
||||
elif (labels is None):
|
||||
return cpp_subsampling.subsample(points,
|
||||
features=features,
|
||||
sampleDl=sampleDl,
|
||||
verbose=verbose)
|
||||
elif (features is None):
|
||||
return cpp_subsampling.subsample(points,
|
||||
classes=labels,
|
||||
sampleDl=sampleDl,
|
||||
verbose=verbose)
|
||||
else:
|
||||
return cpp_subsampling.subsample(
|
||||
points,
|
||||
return cpp_subsampling.subsample(points,
|
||||
features=features,
|
||||
classes=labels,
|
||||
sampleDl=sampleDl,
|
||||
verbose=verbose,
|
||||
)
|
||||
verbose=verbose)
|
||||
|
||||
|
||||
def batch_grid_subsampling(
|
||||
points,
|
||||
batches_len,
|
||||
features=None,
|
||||
labels=None,
|
||||
sampleDl=0.1,
|
||||
max_p=0,
|
||||
verbose=0,
|
||||
random_grid_orient=True,
|
||||
):
|
||||
def batch_grid_subsampling(points, batches_len, features=None, labels=None,
|
||||
sampleDl=0.1, max_p=0, verbose=0, random_grid_orient=True):
|
||||
"""
|
||||
CPP wrapper for a grid subsampling (method = barycenter for points and features)
|
||||
:param points: (N, 3) matrix of input points
|
||||
|
@ -92,6 +89,7 @@ def batch_grid_subsampling(
|
|||
R = None
|
||||
B = len(batches_len)
|
||||
if random_grid_orient:
|
||||
|
||||
########################################################
|
||||
# Create a random rotation matrix for each batch element
|
||||
########################################################
|
||||
|
@ -101,9 +99,7 @@ def batch_grid_subsampling(
|
|||
phi = (np.random.rand(B) - 0.5) * np.pi
|
||||
|
||||
# Create the first vector in carthesian coordinates
|
||||
u = np.vstack(
|
||||
[np.cos(theta) * np.cos(phi), np.sin(theta) * np.cos(phi), np.sin(phi)]
|
||||
)
|
||||
u = np.vstack([np.cos(theta) * np.cos(phi), np.sin(theta) * np.cos(phi), np.sin(phi)])
|
||||
|
||||
# Choose a random rotation angle
|
||||
alpha = np.random.rand(B) * 2 * np.pi
|
||||
|
@ -119,9 +115,7 @@ def batch_grid_subsampling(
|
|||
points = points.copy()
|
||||
for bi, length in enumerate(batches_len):
|
||||
# Apply the rotation
|
||||
points[i0 : i0 + length, :] = np.sum(
|
||||
np.expand_dims(points[i0 : i0 + length, :], 2) * R[bi], axis=1
|
||||
)
|
||||
points[i0:i0 + length, :] = np.sum(np.expand_dims(points[i0:i0 + length, :], 2) * R[bi], axis=1)
|
||||
i0 += length
|
||||
|
||||
#######################
|
||||
|
@ -129,73 +123,61 @@ def batch_grid_subsampling(
|
|||
#######################
|
||||
|
||||
if (features is None) and (labels is None):
|
||||
s_points, s_len = cpp_subsampling.subsample_batch(
|
||||
points, batches_len, sampleDl=sampleDl, max_p=max_p, verbose=verbose
|
||||
)
|
||||
s_points, s_len = cpp_subsampling.subsample_batch(points,
|
||||
batches_len,
|
||||
sampleDl=sampleDl,
|
||||
max_p=max_p,
|
||||
verbose=verbose)
|
||||
if random_grid_orient:
|
||||
i0 = 0
|
||||
for bi, length in enumerate(s_len):
|
||||
s_points[i0 : i0 + length, :] = np.sum(
|
||||
np.expand_dims(s_points[i0 : i0 + length, :], 2) * R[bi].T, axis=1
|
||||
)
|
||||
s_points[i0:i0 + length, :] = np.sum(np.expand_dims(s_points[i0:i0 + length, :], 2) * R[bi].T, axis=1)
|
||||
i0 += length
|
||||
return s_points, s_len
|
||||
|
||||
elif labels is None:
|
||||
s_points, s_len, s_features = cpp_subsampling.subsample_batch(
|
||||
points,
|
||||
elif (labels is None):
|
||||
s_points, s_len, s_features = cpp_subsampling.subsample_batch(points,
|
||||
batches_len,
|
||||
features=features,
|
||||
sampleDl=sampleDl,
|
||||
max_p=max_p,
|
||||
verbose=verbose,
|
||||
)
|
||||
verbose=verbose)
|
||||
if random_grid_orient:
|
||||
i0 = 0
|
||||
for bi, length in enumerate(s_len):
|
||||
# Apply the rotation
|
||||
s_points[i0 : i0 + length, :] = np.sum(
|
||||
np.expand_dims(s_points[i0 : i0 + length, :], 2) * R[bi].T, axis=1
|
||||
)
|
||||
s_points[i0:i0 + length, :] = np.sum(np.expand_dims(s_points[i0:i0 + length, :], 2) * R[bi].T, axis=1)
|
||||
i0 += length
|
||||
return s_points, s_len, s_features
|
||||
|
||||
elif features is None:
|
||||
s_points, s_len, s_labels = cpp_subsampling.subsample_batch(
|
||||
points,
|
||||
elif (features is None):
|
||||
s_points, s_len, s_labels = cpp_subsampling.subsample_batch(points,
|
||||
batches_len,
|
||||
classes=labels,
|
||||
sampleDl=sampleDl,
|
||||
max_p=max_p,
|
||||
verbose=verbose,
|
||||
)
|
||||
verbose=verbose)
|
||||
if random_grid_orient:
|
||||
i0 = 0
|
||||
for bi, length in enumerate(s_len):
|
||||
# Apply the rotation
|
||||
s_points[i0 : i0 + length, :] = np.sum(
|
||||
np.expand_dims(s_points[i0 : i0 + length, :], 2) * R[bi].T, axis=1
|
||||
)
|
||||
s_points[i0:i0 + length, :] = np.sum(np.expand_dims(s_points[i0:i0 + length, :], 2) * R[bi].T, axis=1)
|
||||
i0 += length
|
||||
return s_points, s_len, s_labels
|
||||
|
||||
else:
|
||||
s_points, s_len, s_features, s_labels = cpp_subsampling.subsample_batch(
|
||||
points,
|
||||
s_points, s_len, s_features, s_labels = cpp_subsampling.subsample_batch(points,
|
||||
batches_len,
|
||||
features=features,
|
||||
classes=labels,
|
||||
sampleDl=sampleDl,
|
||||
max_p=max_p,
|
||||
verbose=verbose,
|
||||
)
|
||||
verbose=verbose)
|
||||
if random_grid_orient:
|
||||
i0 = 0
|
||||
for bi, length in enumerate(s_len):
|
||||
# Apply the rotation
|
||||
s_points[i0 : i0 + length, :] = np.sum(
|
||||
np.expand_dims(s_points[i0 : i0 + length, :], 2) * R[bi].T, axis=1
|
||||
)
|
||||
s_points[i0:i0 + length, :] = np.sum(np.expand_dims(s_points[i0:i0 + length, :], 2) * R[bi].T, axis=1)
|
||||
i0 += length
|
||||
return s_points, s_len, s_features, s_labels
|
||||
|
||||
|
@ -211,9 +193,7 @@ def batch_neighbors(queries, supports, q_batches, s_batches, radius):
|
|||
:return: neighbors indices
|
||||
"""
|
||||
|
||||
return cpp_neighbors.batch_query(
|
||||
queries, supports, q_batches, s_batches, radius=radius
|
||||
)
|
||||
return cpp_neighbors.batch_query(queries, supports, q_batches, s_batches, radius=radius)
|
||||
|
||||
|
||||
# ----------------------------------------------------------------------------------------------------------------------
|
||||
|
@ -231,7 +211,7 @@ class PointCloudDataset(Dataset):
|
|||
"""
|
||||
|
||||
self.name = name
|
||||
self.path = ""
|
||||
self.path = ''
|
||||
self.label_to_names = {}
|
||||
self.num_classes = 0
|
||||
self.label_values = np.zeros((0,), dtype=np.int32)
|
||||
|
@ -257,6 +237,7 @@ class PointCloudDataset(Dataset):
|
|||
return 0
|
||||
|
||||
def init_labels(self):
|
||||
|
||||
# Initialize all label parameters given the label_to_names dict
|
||||
self.num_classes = len(self.label_to_names)
|
||||
self.label_values = np.sort([k for k, v in self.label_to_names.items()])
|
||||
|
@ -275,33 +256,27 @@ class PointCloudDataset(Dataset):
|
|||
R = np.eye(points.shape[1])
|
||||
|
||||
if points.shape[1] == 3:
|
||||
if self.config.augment_rotation == "vertical":
|
||||
if self.config.augment_rotation == 'vertical':
|
||||
|
||||
# Create random rotations
|
||||
theta = np.random.rand() * 2 * np.pi
|
||||
c, s = np.cos(theta), np.sin(theta)
|
||||
R = np.array([[c, -s, 0], [s, c, 0], [0, 0, 1]], dtype=np.float32)
|
||||
|
||||
elif self.config.augment_rotation == "all":
|
||||
elif self.config.augment_rotation == 'all':
|
||||
|
||||
# Choose two random angles for the first vector in polar coordinates
|
||||
theta = np.random.rand() * 2 * np.pi
|
||||
phi = (np.random.rand() - 0.5) * np.pi
|
||||
|
||||
# Create the first vector in carthesian coordinates
|
||||
u = np.array(
|
||||
[
|
||||
np.cos(theta) * np.cos(phi),
|
||||
np.sin(theta) * np.cos(phi),
|
||||
np.sin(phi),
|
||||
]
|
||||
)
|
||||
u = np.array([np.cos(theta) * np.cos(phi), np.sin(theta) * np.cos(phi), np.sin(phi)])
|
||||
|
||||
# Choose a random rotation angle
|
||||
alpha = np.random.rand() * 2 * np.pi
|
||||
|
||||
# Create the rotation matrix with this vector and angle
|
||||
R = create_3D_rotations(
|
||||
np.reshape(u, (1, -1)), np.reshape(alpha, (1, -1))
|
||||
)[0]
|
||||
R = create_3D_rotations(np.reshape(u, (1, -1)), np.reshape(alpha, (1, -1)))[0]
|
||||
|
||||
R = R.astype(np.float32)
|
||||
|
||||
|
@ -326,19 +301,17 @@ class PointCloudDataset(Dataset):
|
|||
# Noise
|
||||
#######
|
||||
|
||||
noise = (
|
||||
np.random.randn(points.shape[0], points.shape[1])
|
||||
* self.config.augment_noise
|
||||
).astype(np.float32)
|
||||
noise = (np.random.randn(points.shape[0], points.shape[1]) * self.config.augment_noise).astype(np.float32)
|
||||
|
||||
##################
|
||||
# Apply transforms
|
||||
##################
|
||||
|
||||
# Do not use np.dot because it is multi-threaded
|
||||
# augmented_points = np.dot(points, R) * scale + noise
|
||||
#augmented_points = np.dot(points, R) * scale + noise
|
||||
augmented_points = np.sum(np.expand_dims(points, 2) * R, axis=1) * scale + noise
|
||||
|
||||
|
||||
if normals is None:
|
||||
return augmented_points, scale, R
|
||||
else:
|
||||
|
@ -346,14 +319,12 @@ class PointCloudDataset(Dataset):
|
|||
normal_scale = scale[[1, 2, 0]] * scale[[2, 0, 1]]
|
||||
augmented_normals = np.dot(normals, R) * normal_scale
|
||||
# Renormalise
|
||||
augmented_normals *= 1 / (
|
||||
np.linalg.norm(augmented_normals, axis=1, keepdims=True) + 1e-6
|
||||
)
|
||||
augmented_normals *= 1 / (np.linalg.norm(augmented_normals, axis=1, keepdims=True) + 1e-6)
|
||||
|
||||
if verbose:
|
||||
test_p = [np.vstack([points, augmented_points])]
|
||||
test_n = [np.vstack([normals, augmented_normals])]
|
||||
test_l = [np.hstack([points[:, 2] * 0, augmented_points[:, 2] * 0 + 1])]
|
||||
test_l = [np.hstack([points[:, 2]*0, augmented_points[:, 2]*0+1])]
|
||||
show_ModelNet_examples(test_p, test_n, test_l)
|
||||
|
||||
return augmented_points, augmented_normals, scale, R
|
||||
|
@ -366,13 +337,16 @@ class PointCloudDataset(Dataset):
|
|||
|
||||
# crop neighbors matrix
|
||||
if len(self.neighborhood_limits) > 0:
|
||||
return neighbors[:, : self.neighborhood_limits[layer]]
|
||||
return neighbors[:, :self.neighborhood_limits[layer]]
|
||||
else:
|
||||
return neighbors
|
||||
|
||||
def classification_inputs(
|
||||
self, stacked_points, stacked_features, labels, stack_lengths
|
||||
):
|
||||
def classification_inputs(self,
|
||||
stacked_points,
|
||||
stacked_features,
|
||||
labels,
|
||||
stack_lengths):
|
||||
|
||||
# Starting radius of convolutions
|
||||
r_normal = self.config.first_subsampling_dl * self.config.conv_radius
|
||||
|
||||
|
@ -393,13 +367,9 @@ class PointCloudDataset(Dataset):
|
|||
arch = self.config.architecture
|
||||
|
||||
for block_i, block in enumerate(arch):
|
||||
|
||||
# Get all blocks of the layer
|
||||
if not (
|
||||
"pool" in block
|
||||
or "strided" in block
|
||||
or "global" in block
|
||||
or "upsample" in block
|
||||
):
|
||||
if not ('pool' in block or 'strided' in block or 'global' in block or 'upsample' in block):
|
||||
layer_blocks += [block]
|
||||
continue
|
||||
|
||||
|
@ -409,14 +379,12 @@ class PointCloudDataset(Dataset):
|
|||
deform_layer = False
|
||||
if layer_blocks:
|
||||
# Convolutions are done in this layer, compute the neighbors with the good radius
|
||||
if np.any(["deformable" in blck for blck in layer_blocks]):
|
||||
if np.any(['deformable' in blck for blck in layer_blocks]):
|
||||
r = r_normal * self.config.deform_radius / self.config.conv_radius
|
||||
deform_layer = True
|
||||
else:
|
||||
r = r_normal
|
||||
conv_i = batch_neighbors(
|
||||
stacked_points, stacked_points, stack_lengths, stack_lengths, r
|
||||
)
|
||||
conv_i = batch_neighbors(stacked_points, stacked_points, stack_lengths, stack_lengths, r)
|
||||
|
||||
else:
|
||||
# This layer only perform pooling, no neighbors required
|
||||
|
@ -426,26 +394,23 @@ class PointCloudDataset(Dataset):
|
|||
# *************************
|
||||
|
||||
# If end of layer is a pooling operation
|
||||
if "pool" in block or "strided" in block:
|
||||
if 'pool' in block or 'strided' in block:
|
||||
|
||||
# New subsampling length
|
||||
dl = 2 * r_normal / self.config.conv_radius
|
||||
|
||||
# Subsampled points
|
||||
pool_p, pool_b = batch_grid_subsampling(
|
||||
stacked_points, stack_lengths, sampleDl=dl
|
||||
)
|
||||
pool_p, pool_b = batch_grid_subsampling(stacked_points, stack_lengths, sampleDl=dl)
|
||||
|
||||
# Radius of pooled neighbors
|
||||
if "deformable" in block:
|
||||
if 'deformable' in block:
|
||||
r = r_normal * self.config.deform_radius / self.config.conv_radius
|
||||
deform_layer = True
|
||||
else:
|
||||
r = r_normal
|
||||
|
||||
# Subsample indices
|
||||
pool_i = batch_neighbors(
|
||||
pool_p, stacked_points, pool_b, stack_lengths, r
|
||||
)
|
||||
pool_i = batch_neighbors(pool_p, stacked_points, pool_b, stack_lengths, r)
|
||||
|
||||
else:
|
||||
# No pooling in the end of this layer, no pooling indices required
|
||||
|
@ -473,7 +438,7 @@ class PointCloudDataset(Dataset):
|
|||
layer_blocks = []
|
||||
|
||||
# Stop when meeting a global pooling or upsampling
|
||||
if "global" in block or "upsample" in block:
|
||||
if 'global' in block or 'upsample' in block:
|
||||
break
|
||||
|
||||
###############
|
||||
|
@ -488,9 +453,13 @@ class PointCloudDataset(Dataset):
|
|||
|
||||
return li
|
||||
|
||||
def segmentation_inputs(
|
||||
self, stacked_points, stacked_features, labels, stack_lengths
|
||||
):
|
||||
|
||||
def segmentation_inputs(self,
|
||||
stacked_points,
|
||||
stacked_features,
|
||||
labels,
|
||||
stack_lengths):
|
||||
|
||||
# Starting radius of convolutions
|
||||
r_normal = self.config.first_subsampling_dl * self.config.conv_radius
|
||||
|
||||
|
@ -512,13 +481,9 @@ class PointCloudDataset(Dataset):
|
|||
arch = self.config.architecture
|
||||
|
||||
for block_i, block in enumerate(arch):
|
||||
|
||||
# Get all blocks of the layer
|
||||
if not (
|
||||
"pool" in block
|
||||
or "strided" in block
|
||||
or "global" in block
|
||||
or "upsample" in block
|
||||
):
|
||||
if not ('pool' in block or 'strided' in block or 'global' in block or 'upsample' in block):
|
||||
layer_blocks += [block]
|
||||
continue
|
||||
|
||||
|
@ -528,14 +493,12 @@ class PointCloudDataset(Dataset):
|
|||
deform_layer = False
|
||||
if layer_blocks:
|
||||
# Convolutions are done in this layer, compute the neighbors with the good radius
|
||||
if np.any(["deformable" in blck for blck in layer_blocks]):
|
||||
if np.any(['deformable' in blck for blck in layer_blocks]):
|
||||
r = r_normal * self.config.deform_radius / self.config.conv_radius
|
||||
deform_layer = True
|
||||
else:
|
||||
r = r_normal
|
||||
conv_i = batch_neighbors(
|
||||
stacked_points, stacked_points, stack_lengths, stack_lengths, r
|
||||
)
|
||||
conv_i = batch_neighbors(stacked_points, stacked_points, stack_lengths, stack_lengths, r)
|
||||
|
||||
else:
|
||||
# This layer only perform pooling, no neighbors required
|
||||
|
@ -545,31 +508,26 @@ class PointCloudDataset(Dataset):
|
|||
# *************************
|
||||
|
||||
# If end of layer is a pooling operation
|
||||
if "pool" in block or "strided" in block:
|
||||
if 'pool' in block or 'strided' in block:
|
||||
|
||||
# New subsampling length
|
||||
dl = 2 * r_normal / self.config.conv_radius
|
||||
|
||||
# Subsampled points
|
||||
pool_p, pool_b = batch_grid_subsampling(
|
||||
stacked_points, stack_lengths, sampleDl=dl
|
||||
)
|
||||
pool_p, pool_b = batch_grid_subsampling(stacked_points, stack_lengths, sampleDl=dl)
|
||||
|
||||
# Radius of pooled neighbors
|
||||
if "deformable" in block:
|
||||
if 'deformable' in block:
|
||||
r = r_normal * self.config.deform_radius / self.config.conv_radius
|
||||
deform_layer = True
|
||||
else:
|
||||
r = r_normal
|
||||
|
||||
# Subsample indices
|
||||
pool_i = batch_neighbors(
|
||||
pool_p, stacked_points, pool_b, stack_lengths, r
|
||||
)
|
||||
pool_i = batch_neighbors(pool_p, stacked_points, pool_b, stack_lengths, r)
|
||||
|
||||
# Upsample indices (with the radius of the next layer to keep wanted density)
|
||||
up_i = batch_neighbors(
|
||||
stacked_points, pool_p, stack_lengths, pool_b, 2 * r
|
||||
)
|
||||
up_i = batch_neighbors(stacked_points, pool_p, stack_lengths, pool_b, 2 * r)
|
||||
|
||||
else:
|
||||
# No pooling in the end of this layer, no pooling indices required
|
||||
|
@ -582,7 +540,7 @@ class PointCloudDataset(Dataset):
|
|||
conv_i = self.big_neighborhood_filter(conv_i, len(input_points))
|
||||
pool_i = self.big_neighborhood_filter(pool_i, len(input_points))
|
||||
if up_i.shape[0] > 0:
|
||||
up_i = self.big_neighborhood_filter(up_i, len(input_points) + 1)
|
||||
up_i = self.big_neighborhood_filter(up_i, len(input_points)+1)
|
||||
|
||||
# Updating input lists
|
||||
input_points += [stacked_points]
|
||||
|
@ -601,7 +559,7 @@ class PointCloudDataset(Dataset):
|
|||
layer_blocks = []
|
||||
|
||||
# Stop when meeting a global pooling or upsampling
|
||||
if "global" in block or "upsample" in block:
|
||||
if 'global' in block or 'upsample' in block:
|
||||
break
|
||||
|
||||
###############
|
||||
|
@ -609,13 +567,20 @@ class PointCloudDataset(Dataset):
|
|||
###############
|
||||
|
||||
# list of network inputs
|
||||
li = (
|
||||
input_points
|
||||
+ input_neighbors
|
||||
+ input_pools
|
||||
+ input_upsamples
|
||||
+ input_stack_lengths
|
||||
)
|
||||
li = input_points + input_neighbors + input_pools + input_upsamples + input_stack_lengths
|
||||
li += [stacked_features, labels]
|
||||
|
||||
return li
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
|
@ -4,11 +4,11 @@
|
|||
### Data
|
||||
|
||||
We consider our experiment folder is located at `XXXX/Experiments/KPConv-PyTorch`. And we use a common Data folder
|
||||
loacated at `XXXX/Data`. Therefore the relative path to the Data folder is `./Data`.
|
||||
loacated at `XXXX/Data`. Therefore the relative path to the Data folder is `../../Data`.
|
||||
|
||||
Regularly sampled clouds from ModelNet40 dataset can be downloaded
|
||||
<a href="https://shapenet.cs.stanford.edu/media/modelnet40_normal_resampled.zip">here (1.6 GB)</a>.
|
||||
Uncompress the data and move it inside the folder `./Data/ModelNet40`.
|
||||
Uncompress the data and move it inside the folder `../../Data/ModelNet40`.
|
||||
|
||||
N.B. If you want to place your data anywhere else, you just have to change the variable
|
||||
`self.path` of `ModelNet40Dataset` class ([here](https://github.com/HuguesTHOMAS/KPConv-PyTorch/blob/e9d328135c0a3818ee0cf1bb5bb63434ce15c22e/datasets/ModelNet40.py#L113)).
|
||||
|
|
|
@ -4,10 +4,10 @@
|
|||
### Data
|
||||
|
||||
We consider our experiment folder is located at `XXXX/Experiments/KPConv-PyTorch`. And we use a common Data folder
|
||||
loacated at `XXXX/Data`. Therefore the relative path to the Data folder is `./Data`.
|
||||
loacated at `XXXX/Data`. Therefore the relative path to the Data folder is `../../Data`.
|
||||
|
||||
S3DIS dataset can be downloaded <a href="https://goo.gl/forms/4SoGp4KtH1jfRqEj2">here (4.8 GB)</a>.
|
||||
Download the file named `Stanford3dDataset_v1.2.zip`, uncompress the data and move it to `./Data/S3DIS`.
|
||||
Download the file named `Stanford3dDataset_v1.2.zip`, uncompress the data and move it to `../../Data/S3DIS`.
|
||||
|
||||
N.B. If you want to place your data anywhere else, you just have to change the variable
|
||||
`self.path` of `S3DISDataset` class ([here](https://github.com/HuguesTHOMAS/KPConv-PyTorch/blob/afa18c92f00c6ed771b61cb08b285d2f93446ea4/datasets/S3DIS.py#L88)).
|
||||
|
|
|
@ -4,7 +4,7 @@
|
|||
### Data
|
||||
|
||||
We consider our experiment folder is located at `XXXX/Experiments/KPConv-PyTorch`. And we use a common Data folder
|
||||
loacated at `XXXX/Data`. Therefore the relative path to the Data folder is `./Data`.
|
||||
loacated at `XXXX/Data`. Therefore the relative path to the Data folder is `../../Data`.
|
||||
|
||||
SemanticKitti dataset can be downloaded <a href="http://semantic-kitti.org/dataset.html#download">here (80 GB)</a>.
|
||||
Download the three file named:
|
||||
|
@ -12,13 +12,13 @@ Download the three file named:
|
|||
* [`data_odometry_calib.zip` (1 MB)](http://www.cvlibs.net/download.php?file=data_odometry_calib.zip)
|
||||
* [`data_odometry_labels.zip` (179 MB)](http://semantic-kitti.org/assets/data_odometry_labels.zip)
|
||||
|
||||
uncompress the data and move it to `./Data/SemanticKitti`.
|
||||
uncompress the data and move it to `../../Data/SemanticKitti`.
|
||||
|
||||
You also need to download the files
|
||||
[`semantic-kitti-all.yaml`](https://github.com/PRBonn/semantic-kitti-api/blob/master/config/semantic-kitti-all.yaml)
|
||||
and
|
||||
[`semantic-kitti.yaml`](https://github.com/PRBonn/semantic-kitti-api/blob/master/config/semantic-kitti.yaml).
|
||||
Place them in your `./Data/SemanticKitti` folder.
|
||||
Place them in your `../../Data/SemanticKitti` folder.
|
||||
|
||||
N.B. If you want to place your data anywhere else, you just have to change the variable
|
||||
`self.path` of `SemanticKittiDataset` class ([here](https://github.com/HuguesTHOMAS/KPConv-PyTorch/blob/c32e6ce94ed34a3dd9584f98d8dc0be02535dfb4/datasets/SemanticKitti.py#L65)).
|
||||
|
|
|
@ -23,8 +23,10 @@
|
|||
|
||||
|
||||
# Import numpy package and name it "np"
|
||||
import time
|
||||
import numpy as np
|
||||
import matplotlib.pyplot as plt
|
||||
from matplotlib import cm
|
||||
from os import makedirs
|
||||
from os.path import join, exists
|
||||
|
||||
|
@ -39,7 +41,6 @@ from utils.config import bcolors
|
|||
#
|
||||
#
|
||||
|
||||
|
||||
def create_3D_rotations(axis, angle):
|
||||
"""
|
||||
Create rotation matrices from a list of axes and angles. Code from wikipedia on quaternions
|
||||
|
@ -61,9 +62,7 @@ def create_3D_rotations(axis, angle):
|
|||
t19 = t2 * axis[:, 1] * axis[:, 2]
|
||||
t20 = t8 * axis[:, 0]
|
||||
t24 = axis[:, 2] * axis[:, 2]
|
||||
R = np.stack(
|
||||
[
|
||||
t1 + t2 * t3,
|
||||
R = np.stack([t1 + t2 * t3,
|
||||
t7 - t9,
|
||||
t11 + t12,
|
||||
t7 + t9,
|
||||
|
@ -71,25 +70,13 @@ def create_3D_rotations(axis, angle):
|
|||
t19 - t20,
|
||||
t11 - t12,
|
||||
t19 + t20,
|
||||
t1 + t2 * t24,
|
||||
],
|
||||
axis=1,
|
||||
)
|
||||
t1 + t2 * t24], axis=1)
|
||||
|
||||
return np.reshape(R, (-1, 3, 3))
|
||||
|
||||
|
||||
def spherical_Lloyd(
|
||||
radius,
|
||||
num_cells,
|
||||
dimension=3,
|
||||
fixed="center",
|
||||
approximation="monte-carlo",
|
||||
approx_n=5000,
|
||||
max_iter=500,
|
||||
momentum=0.9,
|
||||
verbose=0,
|
||||
):
|
||||
def spherical_Lloyd(radius, num_cells, dimension=3, fixed='center', approximation='monte-carlo',
|
||||
approx_n=5000, max_iter=500, momentum=0.9, verbose=0):
|
||||
"""
|
||||
Creation of kernel point via Lloyd algorithm. We use an approximation of the algorithm, and compute the Voronoi
|
||||
cell centers with discretization of space. The exact formula is not trivial with part of the sphere as sides.
|
||||
|
@ -122,15 +109,13 @@ def spherical_Lloyd(
|
|||
new_points = np.random.rand(num_cells, dimension) * 2 * radius0 - radius0
|
||||
kernel_points = np.vstack((kernel_points, new_points))
|
||||
d2 = np.sum(np.power(kernel_points, 2), axis=1)
|
||||
kernel_points = kernel_points[
|
||||
np.logical_and(d2 < radius0**2, (0.9 * radius0) ** 2 < d2), :
|
||||
]
|
||||
kernel_points = kernel_points[np.logical_and(d2 < radius0 ** 2, (0.9 * radius0) ** 2 < d2), :]
|
||||
kernel_points = kernel_points[:num_cells, :].reshape((num_cells, -1))
|
||||
|
||||
# Optional fixing
|
||||
if fixed == "center":
|
||||
if fixed == 'center':
|
||||
kernel_points[0, :] *= 0
|
||||
if fixed == "verticals":
|
||||
if fixed == 'verticals':
|
||||
kernel_points[:3, :] *= 0
|
||||
kernel_points[1, -1] += 2 * radius0 / 3
|
||||
kernel_points[2, -1] -= 2 * radius0 / 3
|
||||
|
@ -144,10 +129,10 @@ def spherical_Lloyd(
|
|||
fig = plt.figure()
|
||||
|
||||
# Initialize discretization in this method is chosen
|
||||
if approximation == "discretization":
|
||||
side_n = int(np.floor(approx_n ** (1.0 / dimension)))
|
||||
if approximation == 'discretization':
|
||||
side_n = int(np.floor(approx_n ** (1. / dimension)))
|
||||
dl = 2 * radius0 / side_n
|
||||
coords = np.arange(-radius0 + dl / 2, radius0, dl)
|
||||
coords = np.arange(-radius0 + dl/2, radius0, dl)
|
||||
if dimension == 2:
|
||||
x, y = np.meshgrid(coords, coords)
|
||||
X = np.vstack((np.ravel(x), np.ravel(y))).T
|
||||
|
@ -158,13 +143,11 @@ def spherical_Lloyd(
|
|||
x, y, z, t = np.meshgrid(coords, coords, coords, coords)
|
||||
X = np.vstack((np.ravel(x), np.ravel(y), np.ravel(z), np.ravel(t))).T
|
||||
else:
|
||||
raise ValueError("Unsupported dimension (max is 4)")
|
||||
elif approximation == "monte-carlo":
|
||||
raise ValueError('Unsupported dimension (max is 4)')
|
||||
elif approximation == 'monte-carlo':
|
||||
X = np.zeros((0, dimension))
|
||||
else:
|
||||
raise ValueError(
|
||||
'Wrong approximation method chosen: "{:s}"'.format(approximation)
|
||||
)
|
||||
raise ValueError('Wrong approximation method chosen: "{:s}"'.format(approximation))
|
||||
|
||||
# Only points inside the sphere are used
|
||||
d2 = np.sum(np.power(X, 2), axis=1)
|
||||
|
@ -181,8 +164,9 @@ def spherical_Lloyd(
|
|||
max_moves = np.zeros((0,))
|
||||
|
||||
for iter in range(max_iter):
|
||||
|
||||
# In the case of monte-carlo, renew the sampled points
|
||||
if approximation == "monte-carlo":
|
||||
if approximation == 'monte-carlo':
|
||||
X = np.random.rand(approx_n, dimension) * 2 * radius0 - radius0
|
||||
d2 = np.sum(np.power(X, 2), axis=1)
|
||||
X = X[d2 < radius0 * radius0, :]
|
||||
|
@ -195,7 +179,7 @@ def spherical_Lloyd(
|
|||
cell_inds = np.argmin(sq_distances, axis=1)
|
||||
centers = []
|
||||
for c in range(num_cells):
|
||||
bool_c = cell_inds == c
|
||||
bool_c = (cell_inds == c)
|
||||
num_c = np.sum(bool_c.astype(np.int32))
|
||||
if num_c > 0:
|
||||
centers.append(np.sum(X[bool_c, :], axis=0) / num_c)
|
||||
|
@ -212,42 +196,28 @@ def spherical_Lloyd(
|
|||
max_moves = np.append(max_moves, np.max(np.linalg.norm(moves, axis=1)))
|
||||
|
||||
# Optional fixing
|
||||
if fixed == "center":
|
||||
if fixed == 'center':
|
||||
kernel_points[0, :] *= 0
|
||||
if fixed == "verticals":
|
||||
if fixed == 'verticals':
|
||||
kernel_points[0, :] *= 0
|
||||
kernel_points[:3, :-1] *= 0
|
||||
|
||||
if verbose:
|
||||
print(
|
||||
"iter {:5d} / max move = {:f}".format(
|
||||
iter, np.max(np.linalg.norm(moves, axis=1))
|
||||
)
|
||||
)
|
||||
print('iter {:5d} / max move = {:f}'.format(iter, np.max(np.linalg.norm(moves, axis=1))))
|
||||
if warning:
|
||||
print(
|
||||
"{:}WARNING: at least one point has no cell{:}".format(
|
||||
bcolors.WARNING, bcolors.ENDC
|
||||
)
|
||||
)
|
||||
print('{:}WARNING: at least one point has no cell{:}'.format(bcolors.WARNING, bcolors.ENDC))
|
||||
if verbose > 1:
|
||||
plt.clf()
|
||||
plt.scatter(
|
||||
X[:, 0],
|
||||
X[:, 1],
|
||||
c=cell_inds,
|
||||
s=20.0,
|
||||
marker=".",
|
||||
cmap=plt.get_cmap("tab20"),
|
||||
)
|
||||
# plt.scatter(kernel_points[:, 0], kernel_points[:, 1], c=np.arange(num_cells), s=100.0,
|
||||
plt.scatter(X[:, 0], X[:, 1], c=cell_inds, s=20.0,
|
||||
marker='.', cmap=plt.get_cmap('tab20'))
|
||||
#plt.scatter(kernel_points[:, 0], kernel_points[:, 1], c=np.arange(num_cells), s=100.0,
|
||||
# marker='+', cmap=plt.get_cmap('tab20'))
|
||||
plt.plot(kernel_points[:, 0], kernel_points[:, 1], "k+")
|
||||
circle = plt.Circle((0, 0), radius0, color="r", fill=False)
|
||||
plt.plot(kernel_points[:, 0], kernel_points[:, 1], 'k+')
|
||||
circle = plt.Circle((0, 0), radius0, color='r', fill=False)
|
||||
fig.axes[0].add_artist(circle)
|
||||
fig.axes[0].set_xlim((-radius0 * 1.1, radius0 * 1.1))
|
||||
fig.axes[0].set_ylim((-radius0 * 1.1, radius0 * 1.1))
|
||||
fig.axes[0].set_aspect("equal")
|
||||
fig.axes[0].set_aspect('equal')
|
||||
plt.draw()
|
||||
plt.pause(0.001)
|
||||
plt.show(block=False)
|
||||
|
@ -261,45 +231,32 @@ def spherical_Lloyd(
|
|||
if dimension == 2:
|
||||
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=[10.4, 4.8])
|
||||
ax1.plot(max_moves)
|
||||
ax2.scatter(
|
||||
X[:, 0],
|
||||
X[:, 1],
|
||||
c=cell_inds,
|
||||
s=20.0,
|
||||
marker=".",
|
||||
cmap=plt.get_cmap("tab20"),
|
||||
)
|
||||
ax2.scatter(X[:, 0], X[:, 1], c=cell_inds, s=20.0,
|
||||
marker='.', cmap=plt.get_cmap('tab20'))
|
||||
# plt.scatter(kernel_points[:, 0], kernel_points[:, 1], c=np.arange(num_cells), s=100.0,
|
||||
# marker='+', cmap=plt.get_cmap('tab20'))
|
||||
ax2.plot(kernel_points[:, 0], kernel_points[:, 1], "k+")
|
||||
circle = plt.Circle((0, 0), radius0, color="r", fill=False)
|
||||
ax2.plot(kernel_points[:, 0], kernel_points[:, 1], 'k+')
|
||||
circle = plt.Circle((0, 0), radius0, color='r', fill=False)
|
||||
ax2.add_artist(circle)
|
||||
ax2.set_xlim((-radius0 * 1.1, radius0 * 1.1))
|
||||
ax2.set_ylim((-radius0 * 1.1, radius0 * 1.1))
|
||||
ax2.set_aspect("equal")
|
||||
plt.title("Check if kernel is correct.")
|
||||
ax2.set_aspect('equal')
|
||||
plt.title('Check if kernel is correct.')
|
||||
plt.draw()
|
||||
plt.show()
|
||||
|
||||
if dimension > 2:
|
||||
plt.figure()
|
||||
plt.plot(max_moves)
|
||||
plt.title("Check if kernel is correct.")
|
||||
plt.title('Check if kernel is correct.')
|
||||
plt.show()
|
||||
|
||||
# Rescale kernels with real radius
|
||||
return kernel_points * radius
|
||||
|
||||
|
||||
def kernel_point_optimization_debug(
|
||||
radius,
|
||||
num_points,
|
||||
num_kernels=1,
|
||||
dimension=3,
|
||||
fixed="center",
|
||||
ratio=0.66,
|
||||
verbose=0,
|
||||
):
|
||||
def kernel_point_optimization_debug(radius, num_points, num_kernels=1, dimension=3,
|
||||
fixed='center', ratio=0.66, verbose=0):
|
||||
"""
|
||||
Creation of kernel point via optimization of potentials.
|
||||
:param radius: Radius of the kernels
|
||||
|
@ -335,25 +292,18 @@ def kernel_point_optimization_debug(
|
|||
#######################
|
||||
|
||||
# Random kernel points
|
||||
kernel_points = (
|
||||
np.random.rand(num_kernels * num_points - 1, dimension) * diameter0 - radius0
|
||||
)
|
||||
while kernel_points.shape[0] < num_kernels * num_points:
|
||||
new_points = (
|
||||
np.random.rand(num_kernels * num_points - 1, dimension) * diameter0
|
||||
- radius0
|
||||
)
|
||||
kernel_points = np.random.rand(num_kernels * num_points - 1, dimension) * diameter0 - radius0
|
||||
while (kernel_points.shape[0] < num_kernels * num_points):
|
||||
new_points = np.random.rand(num_kernels * num_points - 1, dimension) * diameter0 - radius0
|
||||
kernel_points = np.vstack((kernel_points, new_points))
|
||||
d2 = np.sum(np.power(kernel_points, 2), axis=1)
|
||||
kernel_points = kernel_points[d2 < 0.5 * radius0 * radius0, :]
|
||||
kernel_points = kernel_points[: num_kernels * num_points, :].reshape(
|
||||
(num_kernels, num_points, -1)
|
||||
)
|
||||
kernel_points = kernel_points[:num_kernels * num_points, :].reshape((num_kernels, num_points, -1))
|
||||
|
||||
# Optionnal fixing
|
||||
if fixed == "center":
|
||||
if fixed == 'center':
|
||||
kernel_points[:, 0, :] *= 0
|
||||
if fixed == "verticals":
|
||||
if fixed == 'verticals':
|
||||
kernel_points[:, :3, :] *= 0
|
||||
kernel_points[:, 1, -1] += 2 * radius0 / 3
|
||||
kernel_points[:, 2, -1] -= 2 * radius0 / 3
|
||||
|
@ -363,13 +313,14 @@ def kernel_point_optimization_debug(
|
|||
#####################
|
||||
|
||||
# Initialize figure
|
||||
if verbose > 1:
|
||||
if verbose>1:
|
||||
fig = plt.figure()
|
||||
|
||||
saved_gradient_norms = np.zeros((10000, num_kernels))
|
||||
old_gradient_norms = np.zeros((num_kernels, num_points))
|
||||
step = -1
|
||||
while step < 10000:
|
||||
|
||||
# Increment
|
||||
step += 1
|
||||
|
||||
|
@ -380,16 +331,16 @@ def kernel_point_optimization_debug(
|
|||
A = np.expand_dims(kernel_points, axis=2)
|
||||
B = np.expand_dims(kernel_points, axis=1)
|
||||
interd2 = np.sum(np.power(A - B, 2), axis=-1)
|
||||
inter_grads = (A - B) / (np.power(np.expand_dims(interd2, -1), 3 / 2) + 1e-6)
|
||||
inter_grads = (A - B) / (np.power(np.expand_dims(interd2, -1), 3/2) + 1e-6)
|
||||
inter_grads = np.sum(inter_grads, axis=1)
|
||||
|
||||
# Derivative of the radius potential
|
||||
circle_grads = 10 * kernel_points
|
||||
circle_grads = 10*kernel_points
|
||||
|
||||
# All gradients
|
||||
gradients = inter_grads + circle_grads
|
||||
|
||||
if fixed == "verticals":
|
||||
if fixed == 'verticals':
|
||||
gradients[:, 1:3, :-1] = 0
|
||||
|
||||
# Stop condition
|
||||
|
@ -401,17 +352,9 @@ def kernel_point_optimization_debug(
|
|||
|
||||
# Stop if all moving points are gradients fixed (low gradients diff)
|
||||
|
||||
if (
|
||||
fixed == "center"
|
||||
and np.max(np.abs(old_gradient_norms[:, 1:] - gradients_norms[:, 1:]))
|
||||
< thresh
|
||||
):
|
||||
if fixed == 'center' and np.max(np.abs(old_gradient_norms[:, 1:] - gradients_norms[:, 1:])) < thresh:
|
||||
break
|
||||
elif (
|
||||
fixed == "verticals"
|
||||
and np.max(np.abs(old_gradient_norms[:, 3:] - gradients_norms[:, 3:]))
|
||||
< thresh
|
||||
):
|
||||
elif fixed == 'verticals' and np.max(np.abs(old_gradient_norms[:, 3:] - gradients_norms[:, 3:])) < thresh:
|
||||
break
|
||||
elif np.max(np.abs(old_gradient_norms - gradients_norms)) < thresh:
|
||||
break
|
||||
|
@ -424,32 +367,24 @@ def kernel_point_optimization_debug(
|
|||
moving_dists = np.minimum(moving_factor * gradients_norms, clip)
|
||||
|
||||
# Fix central point
|
||||
if fixed == "center":
|
||||
if fixed == 'center':
|
||||
moving_dists[:, 0] = 0
|
||||
if fixed == "verticals":
|
||||
if fixed == 'verticals':
|
||||
moving_dists[:, 0] = 0
|
||||
|
||||
# Move points
|
||||
kernel_points -= (
|
||||
np.expand_dims(moving_dists, -1)
|
||||
* gradients
|
||||
/ np.expand_dims(gradients_norms + 1e-6, -1)
|
||||
)
|
||||
kernel_points -= np.expand_dims(moving_dists, -1) * gradients / np.expand_dims(gradients_norms + 1e-6, -1)
|
||||
|
||||
if verbose:
|
||||
print(
|
||||
"step {:5d} / max grad = {:f}".format(
|
||||
step, np.max(gradients_norms[:, 3:])
|
||||
)
|
||||
)
|
||||
print('step {:5d} / max grad = {:f}'.format(step, np.max(gradients_norms[:, 3:])))
|
||||
if verbose > 1:
|
||||
plt.clf()
|
||||
plt.plot(kernel_points[0, :, 0], kernel_points[0, :, 1], ".")
|
||||
circle = plt.Circle((0, 0), radius, color="r", fill=False)
|
||||
plt.plot(kernel_points[0, :, 0], kernel_points[0, :, 1], '.')
|
||||
circle = plt.Circle((0, 0), radius, color='r', fill=False)
|
||||
fig.axes[0].add_artist(circle)
|
||||
fig.axes[0].set_xlim((-radius * 1.1, radius * 1.1))
|
||||
fig.axes[0].set_ylim((-radius * 1.1, radius * 1.1))
|
||||
fig.axes[0].set_aspect("equal")
|
||||
fig.axes[0].set_xlim((-radius*1.1, radius*1.1))
|
||||
fig.axes[0].set_ylim((-radius*1.1, radius*1.1))
|
||||
fig.axes[0].set_aspect('equal')
|
||||
plt.draw()
|
||||
plt.pause(0.001)
|
||||
plt.show(block=False)
|
||||
|
@ -460,7 +395,7 @@ def kernel_point_optimization_debug(
|
|||
|
||||
# Remove unused lines in the saved gradients
|
||||
if step < 10000:
|
||||
saved_gradient_norms = saved_gradient_norms[: step + 1, :]
|
||||
saved_gradient_norms = saved_gradient_norms[:step+1, :]
|
||||
|
||||
# Rescale radius to fit the wanted ratio of radius
|
||||
r = np.sqrt(np.sum(np.power(kernel_points, 2), axis=-1))
|
||||
|
@ -471,8 +406,9 @@ def kernel_point_optimization_debug(
|
|||
|
||||
|
||||
def load_kernels(radius, num_kpoints, dimension, fixed, lloyd=False):
|
||||
|
||||
# Kernel directory
|
||||
kernel_dir = "kernels/dispositions"
|
||||
kernel_dir = 'kernels/dispositions'
|
||||
if not exists(kernel_dir):
|
||||
makedirs(kernel_dir)
|
||||
|
||||
|
@ -481,28 +417,26 @@ def load_kernels(radius, num_kpoints, dimension, fixed, lloyd=False):
|
|||
lloyd = True
|
||||
|
||||
# Kernel_file
|
||||
kernel_file = join(
|
||||
kernel_dir, "k_{:03d}_{:s}_{:d}D.ply".format(num_kpoints, fixed, dimension)
|
||||
)
|
||||
kernel_file = join(kernel_dir, 'k_{:03d}_{:s}_{:d}D.ply'.format(num_kpoints, fixed, dimension))
|
||||
|
||||
# Check if already done
|
||||
if not exists(kernel_file):
|
||||
if lloyd:
|
||||
# Create kernels
|
||||
kernel_points = spherical_Lloyd(
|
||||
1.0, num_kpoints, dimension=dimension, fixed=fixed, verbose=0
|
||||
)
|
||||
kernel_points = spherical_Lloyd(1.0,
|
||||
num_kpoints,
|
||||
dimension=dimension,
|
||||
fixed=fixed,
|
||||
verbose=0)
|
||||
|
||||
else:
|
||||
# Create kernels
|
||||
kernel_points, grad_norms = kernel_point_optimization_debug(
|
||||
1.0,
|
||||
kernel_points, grad_norms = kernel_point_optimization_debug(1.0,
|
||||
num_kpoints,
|
||||
num_kernels=100,
|
||||
dimension=dimension,
|
||||
fixed=fixed,
|
||||
verbose=0,
|
||||
)
|
||||
verbose=0)
|
||||
|
||||
# Find best candidate
|
||||
best_k = np.argmin(grad_norms[-1, :])
|
||||
|
@ -510,23 +444,23 @@ def load_kernels(radius, num_kpoints, dimension, fixed, lloyd=False):
|
|||
# Save points
|
||||
kernel_points = kernel_points[best_k, :, :]
|
||||
|
||||
write_ply(kernel_file, kernel_points, ["x", "y", "z"])
|
||||
write_ply(kernel_file, kernel_points, ['x', 'y', 'z'])
|
||||
|
||||
else:
|
||||
data = read_ply(kernel_file)
|
||||
kernel_points = np.vstack((data["x"], data["y"], data["z"])).T
|
||||
kernel_points = np.vstack((data['x'], data['y'], data['z'])).T
|
||||
|
||||
# Random roations for the kernel
|
||||
# N.B. 4D random rotations not supported yet
|
||||
R = np.eye(dimension)
|
||||
theta = np.random.rand() * 2 * np.pi
|
||||
if dimension == 2:
|
||||
if fixed != "vertical":
|
||||
if fixed != 'vertical':
|
||||
c, s = np.cos(theta), np.sin(theta)
|
||||
R = np.array([[c, -s], [s, c]], dtype=np.float32)
|
||||
|
||||
elif dimension == 3:
|
||||
if fixed != "vertical":
|
||||
if fixed != 'vertical':
|
||||
c, s = np.cos(theta), np.sin(theta)
|
||||
R = np.array([[c, -s, 0], [s, c, 0], [0, 0, 1]], dtype=np.float32)
|
||||
|
||||
|
@ -534,24 +468,18 @@ def load_kernels(radius, num_kpoints, dimension, fixed, lloyd=False):
|
|||
phi = (np.random.rand() - 0.5) * np.pi
|
||||
|
||||
# Create the first vector in carthesian coordinates
|
||||
u = np.array(
|
||||
[np.cos(theta) * np.cos(phi), np.sin(theta) * np.cos(phi), np.sin(phi)]
|
||||
)
|
||||
u = np.array([np.cos(theta) * np.cos(phi), np.sin(theta) * np.cos(phi), np.sin(phi)])
|
||||
|
||||
# Choose a random rotation angle
|
||||
alpha = np.random.rand() * 2 * np.pi
|
||||
|
||||
# Create the rotation matrix with this vector and angle
|
||||
R = create_3D_rotations(np.reshape(u, (1, -1)), np.reshape(alpha, (1, -1)))[
|
||||
0
|
||||
]
|
||||
R = create_3D_rotations(np.reshape(u, (1, -1)), np.reshape(alpha, (1, -1)))[0]
|
||||
|
||||
R = R.astype(np.float32)
|
||||
|
||||
# Add a small noise
|
||||
kernel_points = kernel_points + np.random.normal(
|
||||
scale=0.01, size=kernel_points.shape
|
||||
)
|
||||
kernel_points = kernel_points + np.random.normal(scale=0.01, size=kernel_points.shape)
|
||||
|
||||
# Scale kernels
|
||||
kernel_points = radius * kernel_points
|
||||
|
|
|
@ -19,17 +19,20 @@ import numpy as np
|
|||
|
||||
|
||||
def p2p_fitting_regularizer(net):
|
||||
|
||||
fitting_loss = 0
|
||||
repulsive_loss = 0
|
||||
|
||||
for m in net.modules():
|
||||
|
||||
if isinstance(m, KPConv) and m.deformable:
|
||||
|
||||
##############
|
||||
# Fitting loss
|
||||
##############
|
||||
|
||||
# Get the distance to closest input point and normalize to be independant from layers
|
||||
KP_min_d2 = m.min_d2 / (m.KP_extent**2)
|
||||
KP_min_d2 = m.min_d2 / (m.KP_extent ** 2)
|
||||
|
||||
# Loss will be the square distance to closest input point. We use L1 because dist is already squared
|
||||
fitting_loss += net.l1(KP_min_d2, torch.zeros_like(KP_min_d2))
|
||||
|
@ -43,15 +46,9 @@ def p2p_fitting_regularizer(net):
|
|||
|
||||
# Point should not be close to each other
|
||||
for i in range(net.K):
|
||||
other_KP = torch.cat(
|
||||
[KP_locs[:, :i, :], KP_locs[:, i + 1 :, :]], dim=1
|
||||
).detach()
|
||||
distances = torch.sqrt(
|
||||
torch.sum((other_KP - KP_locs[:, i : i + 1, :]) ** 2, dim=2)
|
||||
)
|
||||
rep_loss = torch.sum(
|
||||
torch.clamp_max(distances - net.repulse_extent, max=0.0) ** 2, dim=1
|
||||
)
|
||||
other_KP = torch.cat([KP_locs[:, :i, :], KP_locs[:, i + 1:, :]], dim=1).detach()
|
||||
distances = torch.sqrt(torch.sum((other_KP - KP_locs[:, i:i + 1, :]) ** 2, dim=2))
|
||||
rep_loss = torch.sum(torch.clamp_max(distances - net.repulse_extent, max=0.0) ** 2, dim=1)
|
||||
repulsive_loss += net.l1(rep_loss, torch.zeros_like(rep_loss)) / net.K
|
||||
|
||||
return net.deform_fitting_power * (2 * fitting_loss + repulsive_loss)
|
||||
|
@ -82,32 +79,36 @@ class KPCNN(nn.Module):
|
|||
# Loop over consecutive blocks
|
||||
block_in_layer = 0
|
||||
for block_i, block in enumerate(config.architecture):
|
||||
|
||||
# Check equivariance
|
||||
if ("equivariant" in block) and (not out_dim % 3 == 0):
|
||||
raise ValueError(
|
||||
"Equivariant block but features dimension is not a factor of 3"
|
||||
)
|
||||
if ('equivariant' in block) and (not out_dim % 3 == 0):
|
||||
raise ValueError('Equivariant block but features dimension is not a factor of 3')
|
||||
|
||||
# Detect upsampling block to stop
|
||||
if "upsample" in block:
|
||||
if 'upsample' in block:
|
||||
break
|
||||
|
||||
# Apply the good block function defining tf ops
|
||||
self.block_ops.append(
|
||||
block_decider(block, r, in_dim, out_dim, layer, config)
|
||||
)
|
||||
self.block_ops.append(block_decider(block,
|
||||
r,
|
||||
in_dim,
|
||||
out_dim,
|
||||
layer,
|
||||
config))
|
||||
|
||||
|
||||
# Index of block in this layer
|
||||
block_in_layer += 1
|
||||
|
||||
# Update dimension of input from output
|
||||
if "simple" in block:
|
||||
if 'simple' in block:
|
||||
in_dim = out_dim // 2
|
||||
else:
|
||||
in_dim = out_dim
|
||||
|
||||
|
||||
# Detect change to a subsampled layer
|
||||
if "pool" in block or "strided" in block:
|
||||
if 'pool' in block or 'strided' in block:
|
||||
# Update radius and feature dimension for next layer
|
||||
layer += 1
|
||||
r *= 2
|
||||
|
@ -133,6 +134,7 @@ class KPCNN(nn.Module):
|
|||
return
|
||||
|
||||
def forward(self, batch, config):
|
||||
|
||||
# Save all block operations in a list of modules
|
||||
x = batch.features.clone().detach()
|
||||
|
||||
|
@ -158,12 +160,12 @@ class KPCNN(nn.Module):
|
|||
self.output_loss = self.criterion(outputs, labels)
|
||||
|
||||
# Regularization of deformable offsets
|
||||
if self.deform_fitting_mode == "point2point":
|
||||
if self.deform_fitting_mode == 'point2point':
|
||||
self.reg_loss = p2p_fitting_regularizer(self)
|
||||
elif self.deform_fitting_mode == "point2plane":
|
||||
raise ValueError("point2plane fitting mode not implemented yet.")
|
||||
elif self.deform_fitting_mode == 'point2plane':
|
||||
raise ValueError('point2plane fitting mode not implemented yet.')
|
||||
else:
|
||||
raise ValueError("Unknown fitting mode: " + self.deform_fitting_mode)
|
||||
raise ValueError('Unknown fitting mode: ' + self.deform_fitting_mode)
|
||||
|
||||
# Combined loss
|
||||
return self.output_loss + self.reg_loss
|
||||
|
@ -215,36 +217,36 @@ class KPFCNN(nn.Module):
|
|||
|
||||
# Loop over consecutive blocks
|
||||
for block_i, block in enumerate(config.architecture):
|
||||
|
||||
# Check equivariance
|
||||
if ("equivariant" in block) and (not out_dim % 3 == 0):
|
||||
raise ValueError(
|
||||
"Equivariant block but features dimension is not a factor of 3"
|
||||
)
|
||||
if ('equivariant' in block) and (not out_dim % 3 == 0):
|
||||
raise ValueError('Equivariant block but features dimension is not a factor of 3')
|
||||
|
||||
# Detect change to next layer for skip connection
|
||||
if np.any(
|
||||
[tmp in block for tmp in ["pool", "strided", "upsample", "global"]]
|
||||
):
|
||||
if np.any([tmp in block for tmp in ['pool', 'strided', 'upsample', 'global']]):
|
||||
self.encoder_skips.append(block_i)
|
||||
self.encoder_skip_dims.append(in_dim)
|
||||
|
||||
# Detect upsampling block to stop
|
||||
if "upsample" in block:
|
||||
if 'upsample' in block:
|
||||
break
|
||||
|
||||
# Apply the good block function defining tf ops
|
||||
self.encoder_blocks.append(
|
||||
block_decider(block, r, in_dim, out_dim, layer, config)
|
||||
)
|
||||
self.encoder_blocks.append(block_decider(block,
|
||||
r,
|
||||
in_dim,
|
||||
out_dim,
|
||||
layer,
|
||||
config))
|
||||
|
||||
# Update dimension of input from output
|
||||
if "simple" in block:
|
||||
if 'simple' in block:
|
||||
in_dim = out_dim // 2
|
||||
else:
|
||||
in_dim = out_dim
|
||||
|
||||
# Detect change to a subsampled layer
|
||||
if "pool" in block or "strided" in block:
|
||||
if 'pool' in block or 'strided' in block:
|
||||
# Update radius and feature dimension for next layer
|
||||
layer += 1
|
||||
r *= 2
|
||||
|
@ -261,36 +263,38 @@ class KPFCNN(nn.Module):
|
|||
# Find first upsampling block
|
||||
start_i = 0
|
||||
for block_i, block in enumerate(config.architecture):
|
||||
if "upsample" in block:
|
||||
if 'upsample' in block:
|
||||
start_i = block_i
|
||||
break
|
||||
|
||||
# Loop over consecutive blocks
|
||||
for block_i, block in enumerate(config.architecture[start_i:]):
|
||||
|
||||
# Add dimension of skip connection concat
|
||||
if block_i > 0 and "upsample" in config.architecture[start_i + block_i - 1]:
|
||||
if block_i > 0 and 'upsample' in config.architecture[start_i + block_i - 1]:
|
||||
in_dim += self.encoder_skip_dims[layer]
|
||||
self.decoder_concats.append(block_i)
|
||||
|
||||
# Apply the good block function defining tf ops
|
||||
self.decoder_blocks.append(
|
||||
block_decider(block, r, in_dim, out_dim, layer, config)
|
||||
)
|
||||
self.decoder_blocks.append(block_decider(block,
|
||||
r,
|
||||
in_dim,
|
||||
out_dim,
|
||||
layer,
|
||||
config))
|
||||
|
||||
# Update dimension of input from output
|
||||
in_dim = out_dim
|
||||
|
||||
# Detect change to a subsampled layer
|
||||
if "upsample" in block:
|
||||
if 'upsample' in block:
|
||||
# Update radius and feature dimension for next layer
|
||||
layer -= 1
|
||||
r *= 0.5
|
||||
out_dim = out_dim // 2
|
||||
|
||||
self.head_mlp = UnaryBlock(out_dim, config.first_features_dim, False, 0)
|
||||
self.head_softmax = UnaryBlock(
|
||||
config.first_features_dim, self.C, False, 0, no_relu=True
|
||||
)
|
||||
self.head_softmax = UnaryBlock(config.first_features_dim, self.C, False, 0, no_relu=True)
|
||||
|
||||
################
|
||||
# Network Losses
|
||||
|
@ -316,6 +320,7 @@ class KPFCNN(nn.Module):
|
|||
return
|
||||
|
||||
def forward(self, batch, config):
|
||||
|
||||
# Get input features
|
||||
x = batch.features.clone().detach()
|
||||
|
||||
|
@ -346,7 +351,7 @@ class KPFCNN(nn.Module):
|
|||
"""
|
||||
|
||||
# Set all ignored labels to -1 and correct the other label to be in [0, C-1] range
|
||||
target = -torch.ones_like(labels)
|
||||
target = - torch.ones_like(labels)
|
||||
for i, c in enumerate(self.valid_labels):
|
||||
target[labels == c] = i
|
||||
|
||||
|
@ -359,12 +364,12 @@ class KPFCNN(nn.Module):
|
|||
self.output_loss = self.criterion(outputs, target)
|
||||
|
||||
# Regularization of deformable offsets
|
||||
if self.deform_fitting_mode == "point2point":
|
||||
if self.deform_fitting_mode == 'point2point':
|
||||
self.reg_loss = p2p_fitting_regularizer(self)
|
||||
elif self.deform_fitting_mode == "point2plane":
|
||||
raise ValueError("point2plane fitting mode not implemented yet.")
|
||||
elif self.deform_fitting_mode == 'point2plane':
|
||||
raise ValueError('point2plane fitting mode not implemented yet.')
|
||||
else:
|
||||
raise ValueError("Unknown fitting mode: " + self.deform_fitting_mode)
|
||||
raise ValueError('Unknown fitting mode: ' + self.deform_fitting_mode)
|
||||
|
||||
# Combined loss
|
||||
return self.output_loss + self.reg_loss
|
||||
|
@ -378,7 +383,7 @@ class KPFCNN(nn.Module):
|
|||
"""
|
||||
|
||||
# Set all ignored labels to -1 and correct the other label to be in [0, C-1] range
|
||||
target = -torch.ones_like(labels)
|
||||
target = - torch.ones_like(labels)
|
||||
for i, c in enumerate(self.valid_labels):
|
||||
target[labels == c] = i
|
||||
|
||||
|
@ -387,3 +392,24 @@ class KPFCNN(nn.Module):
|
|||
correct = (predicted == target).sum().item()
|
||||
|
||||
return correct / total
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
|
255
models/blocks.py
255
models/blocks.py
|
@ -15,6 +15,7 @@
|
|||
#
|
||||
|
||||
|
||||
import time
|
||||
import math
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
|
@ -22,6 +23,7 @@ from torch.nn.parameter import Parameter
|
|||
from torch.nn.init import kaiming_uniform_
|
||||
from kernels.kernel_points import load_kernels
|
||||
|
||||
from utils.ply import write_ply
|
||||
|
||||
# ----------------------------------------------------------------------------------------------------------------------
|
||||
#
|
||||
|
@ -49,19 +51,19 @@ def gather(x, idx, method=2):
|
|||
return x.gather(0, idx)
|
||||
elif method == 2:
|
||||
for i, ni in enumerate(idx.size()[1:]):
|
||||
x = x.unsqueeze(i + 1)
|
||||
x = x.unsqueeze(i+1)
|
||||
new_s = list(x.size())
|
||||
new_s[i + 1] = ni
|
||||
new_s[i+1] = ni
|
||||
x = x.expand(new_s)
|
||||
n = len(idx.size())
|
||||
for i, di in enumerate(x.size()[n:]):
|
||||
idx = idx.unsqueeze(i + n)
|
||||
idx = idx.unsqueeze(i+n)
|
||||
new_s = list(idx.size())
|
||||
new_s[i + n] = di
|
||||
new_s[i+n] = di
|
||||
idx = idx.expand(new_s)
|
||||
return x.gather(0, idx)
|
||||
else:
|
||||
raise ValueError("Unkown method")
|
||||
raise ValueError('Unkown method')
|
||||
|
||||
|
||||
def radius_gaussian(sq_r, sig, eps=1e-9):
|
||||
|
@ -120,8 +122,9 @@ def global_average(x, batch_lengths):
|
|||
averaged_features = []
|
||||
i0 = 0
|
||||
for b_i, length in enumerate(batch_lengths):
|
||||
|
||||
# Average features for each batch cloud
|
||||
averaged_features.append(torch.mean(x[i0 : i0 + length], dim=0))
|
||||
averaged_features.append(torch.mean(x[i0:i0 + length], dim=0))
|
||||
|
||||
# Increment for next cloud
|
||||
i0 += length
|
||||
|
@ -138,20 +141,10 @@ def global_average(x, batch_lengths):
|
|||
|
||||
|
||||
class KPConv(nn.Module):
|
||||
def __init__(
|
||||
self,
|
||||
kernel_size,
|
||||
p_dim,
|
||||
in_channels,
|
||||
out_channels,
|
||||
KP_extent,
|
||||
radius,
|
||||
fixed_kernel_points="center",
|
||||
KP_influence="linear",
|
||||
aggregation_mode="sum",
|
||||
deformable=False,
|
||||
modulated=False,
|
||||
):
|
||||
|
||||
def __init__(self, kernel_size, p_dim, in_channels, out_channels, KP_extent, radius,
|
||||
fixed_kernel_points='center', KP_influence='linear', aggregation_mode='sum',
|
||||
deformable=False, modulated=False):
|
||||
"""
|
||||
Initialize parameters for KPConvDeformable.
|
||||
:param kernel_size: Number of kernel points.
|
||||
|
@ -187,10 +180,8 @@ class KPConv(nn.Module):
|
|||
self.offset_features = None
|
||||
|
||||
# Initialize weights
|
||||
self.weights = Parameter(
|
||||
torch.zeros((self.K, in_channels, out_channels), dtype=torch.float32),
|
||||
requires_grad=True,
|
||||
)
|
||||
self.weights = Parameter(torch.zeros((self.K, in_channels, out_channels), dtype=torch.float32),
|
||||
requires_grad=True)
|
||||
|
||||
# Initiate weights for offsets
|
||||
if deformable:
|
||||
|
@ -198,8 +189,7 @@ class KPConv(nn.Module):
|
|||
self.offset_dim = (self.p_dim + 1) * self.K
|
||||
else:
|
||||
self.offset_dim = self.p_dim * self.K
|
||||
self.offset_conv = KPConv(
|
||||
self.K,
|
||||
self.offset_conv = KPConv(self.K,
|
||||
self.p_dim,
|
||||
self.in_channels,
|
||||
self.offset_dim,
|
||||
|
@ -207,11 +197,8 @@ class KPConv(nn.Module):
|
|||
radius,
|
||||
fixed_kernel_points=fixed_kernel_points,
|
||||
KP_influence=KP_influence,
|
||||
aggregation_mode=aggregation_mode,
|
||||
)
|
||||
self.offset_bias = Parameter(
|
||||
torch.zeros(self.offset_dim, dtype=torch.float32), requires_grad=True
|
||||
)
|
||||
aggregation_mode=aggregation_mode)
|
||||
self.offset_bias = Parameter(torch.zeros(self.offset_dim, dtype=torch.float32), requires_grad=True)
|
||||
|
||||
else:
|
||||
self.offset_dim = None
|
||||
|
@ -239,36 +226,36 @@ class KPConv(nn.Module):
|
|||
"""
|
||||
|
||||
# Create one kernel disposition (as numpy array). Choose the KP distance to center thanks to the KP extent
|
||||
K_points_numpy = load_kernels(
|
||||
self.radius, self.K, dimension=self.p_dim, fixed=self.fixed_kernel_points
|
||||
)
|
||||
K_points_numpy = load_kernels(self.radius,
|
||||
self.K,
|
||||
dimension=self.p_dim,
|
||||
fixed=self.fixed_kernel_points)
|
||||
|
||||
return Parameter(
|
||||
torch.tensor(K_points_numpy, dtype=torch.float32), requires_grad=False
|
||||
)
|
||||
return Parameter(torch.tensor(K_points_numpy, dtype=torch.float32),
|
||||
requires_grad=False)
|
||||
|
||||
def forward(self, q_pts, s_pts, neighb_inds, x):
|
||||
|
||||
###################
|
||||
# Offset generation
|
||||
###################
|
||||
|
||||
if self.deformable:
|
||||
|
||||
# Get offsets with a KPConv that only takes part of the features
|
||||
self.offset_features = (
|
||||
self.offset_conv(q_pts, s_pts, neighb_inds, x) + self.offset_bias
|
||||
)
|
||||
self.offset_features = self.offset_conv(q_pts, s_pts, neighb_inds, x) + self.offset_bias
|
||||
|
||||
if self.modulated:
|
||||
|
||||
# Get offset (in normalized scale) from features
|
||||
unscaled_offsets = self.offset_features[:, : self.p_dim * self.K]
|
||||
unscaled_offsets = self.offset_features[:, :self.p_dim * self.K]
|
||||
unscaled_offsets = unscaled_offsets.view(-1, self.K, self.p_dim)
|
||||
|
||||
# Get modulations
|
||||
modulations = 2 * torch.sigmoid(
|
||||
self.offset_features[:, self.p_dim * self.K :]
|
||||
)
|
||||
modulations = 2 * torch.sigmoid(self.offset_features[:, self.p_dim * self.K:])
|
||||
|
||||
else:
|
||||
|
||||
# Get offset (in normalized scale) from features
|
||||
unscaled_offsets = self.offset_features.view(-1, self.K, self.p_dim)
|
||||
|
||||
|
@ -307,25 +294,22 @@ class KPConv(nn.Module):
|
|||
differences = neighbors - deformed_K_points
|
||||
|
||||
# Get the square distances [n_points, n_neighbors, n_kpoints]
|
||||
sq_distances = torch.sum(differences**2, dim=3)
|
||||
sq_distances = torch.sum(differences ** 2, dim=3)
|
||||
|
||||
# Optimization by ignoring points outside a deformed KP range
|
||||
if self.deformable:
|
||||
|
||||
# Save distances for loss
|
||||
self.min_d2, _ = torch.min(sq_distances, dim=1)
|
||||
|
||||
# Boolean of the neighbors in range of a kernel point [n_points, n_neighbors]
|
||||
in_range = torch.any(sq_distances < self.KP_extent**2, dim=2).type(
|
||||
torch.int32
|
||||
)
|
||||
in_range = torch.any(sq_distances < self.KP_extent ** 2, dim=2).type(torch.int32)
|
||||
|
||||
# New value of max neighbors
|
||||
new_max_neighb = torch.max(torch.sum(in_range, dim=1))
|
||||
|
||||
# For each row of neighbors, indices of the ones that are in range [n_points, new_max_neighb]
|
||||
neighb_row_bool, neighb_row_inds = torch.topk(
|
||||
in_range, new_max_neighb.item(), dim=1
|
||||
)
|
||||
neighb_row_bool, neighb_row_inds = torch.topk(in_range, new_max_neighb.item(), dim=1)
|
||||
|
||||
# Gather new neighbor indices [n_points, new_max_neighb]
|
||||
new_neighb_inds = neighb_inds.gather(1, neighb_row_inds, sparse_grad=False)
|
||||
|
@ -337,41 +321,35 @@ class KPConv(nn.Module):
|
|||
|
||||
# New shadow neighbors have to point to the last shadow point
|
||||
new_neighb_inds *= neighb_row_bool
|
||||
new_neighb_inds -= (neighb_row_bool.type(torch.int64) - 1) * int(
|
||||
s_pts.shape[0] - 1
|
||||
)
|
||||
new_neighb_inds -= (neighb_row_bool.type(torch.int64) - 1) * int(s_pts.shape[0] - 1)
|
||||
else:
|
||||
new_neighb_inds = neighb_inds
|
||||
|
||||
# Get Kernel point influences [n_points, n_kpoints, n_neighbors]
|
||||
if self.KP_influence == "constant":
|
||||
if self.KP_influence == 'constant':
|
||||
# Every point get an influence of 1.
|
||||
all_weights = torch.ones_like(sq_distances)
|
||||
all_weights = torch.transpose(all_weights, 1, 2)
|
||||
|
||||
elif self.KP_influence == "linear":
|
||||
elif self.KP_influence == 'linear':
|
||||
# Influence decrease linearly with the distance, and get to zero when d = KP_extent.
|
||||
all_weights = torch.clamp(
|
||||
1 - torch.sqrt(sq_distances) / self.KP_extent, min=0.0
|
||||
)
|
||||
all_weights = torch.clamp(1 - torch.sqrt(sq_distances) / self.KP_extent, min=0.0)
|
||||
all_weights = torch.transpose(all_weights, 1, 2)
|
||||
|
||||
elif self.KP_influence == "gaussian":
|
||||
elif self.KP_influence == 'gaussian':
|
||||
# Influence in gaussian of the distance.
|
||||
sigma = self.KP_extent * 0.3
|
||||
all_weights = radius_gaussian(sq_distances, sigma)
|
||||
all_weights = torch.transpose(all_weights, 1, 2)
|
||||
else:
|
||||
raise ValueError("Unknown influence function type (config.KP_influence)")
|
||||
raise ValueError('Unknown influence function type (config.KP_influence)')
|
||||
|
||||
# In case of closest mode, only the closest KP can influence each point
|
||||
if self.aggregation_mode == "closest":
|
||||
if self.aggregation_mode == 'closest':
|
||||
neighbors_1nn = torch.argmin(sq_distances, dim=2)
|
||||
all_weights *= torch.transpose(
|
||||
nn.functional.one_hot(neighbors_1nn, self.K), 1, 2
|
||||
)
|
||||
all_weights *= torch.transpose(nn.functional.one_hot(neighbors_1nn, self.K), 1, 2)
|
||||
|
||||
elif self.aggregation_mode != "sum":
|
||||
elif self.aggregation_mode != 'sum':
|
||||
raise ValueError("Unknown convolution mode. Should be 'closest' or 'sum'")
|
||||
|
||||
# Add a zero feature for shadow neighbors
|
||||
|
@ -395,10 +373,9 @@ class KPConv(nn.Module):
|
|||
return torch.sum(kernel_outputs, dim=0)
|
||||
|
||||
def __repr__(self):
|
||||
return "KPConv(radius: {:.2f}, in_feat: {:d}, out_feat: {:d})".format(
|
||||
self.radius, self.in_channels, self.out_channels
|
||||
)
|
||||
|
||||
return 'KPConv(radius: {:.2f}, in_feat: {:d}, out_feat: {:d})'.format(self.radius,
|
||||
self.in_channels,
|
||||
self.out_channels)
|
||||
|
||||
# ----------------------------------------------------------------------------------------------------------------------
|
||||
#
|
||||
|
@ -406,55 +383,51 @@ class KPConv(nn.Module):
|
|||
# \********************/
|
||||
#
|
||||
|
||||
def block_decider(block_name,
|
||||
radius,
|
||||
in_dim,
|
||||
out_dim,
|
||||
layer_ind,
|
||||
config):
|
||||
|
||||
def block_decider(block_name, radius, in_dim, out_dim, layer_ind, config):
|
||||
if block_name == "unary":
|
||||
return UnaryBlock(
|
||||
in_dim, out_dim, config.use_batch_norm, config.batch_norm_momentum
|
||||
)
|
||||
if block_name == 'unary':
|
||||
return UnaryBlock(in_dim, out_dim, config.use_batch_norm, config.batch_norm_momentum)
|
||||
|
||||
elif block_name in [
|
||||
"simple",
|
||||
"simple_deformable",
|
||||
"simple_invariant",
|
||||
"simple_equivariant",
|
||||
"simple_strided",
|
||||
"simple_deformable_strided",
|
||||
"simple_invariant_strided",
|
||||
"simple_equivariant_strided",
|
||||
]:
|
||||
elif block_name in ['simple',
|
||||
'simple_deformable',
|
||||
'simple_invariant',
|
||||
'simple_equivariant',
|
||||
'simple_strided',
|
||||
'simple_deformable_strided',
|
||||
'simple_invariant_strided',
|
||||
'simple_equivariant_strided']:
|
||||
return SimpleBlock(block_name, in_dim, out_dim, radius, layer_ind, config)
|
||||
|
||||
elif block_name in [
|
||||
"resnetb",
|
||||
"resnetb_invariant",
|
||||
"resnetb_equivariant",
|
||||
"resnetb_deformable",
|
||||
"resnetb_strided",
|
||||
"resnetb_deformable_strided",
|
||||
"resnetb_equivariant_strided",
|
||||
"resnetb_invariant_strided",
|
||||
]:
|
||||
return ResnetBottleneckBlock(
|
||||
block_name, in_dim, out_dim, radius, layer_ind, config
|
||||
)
|
||||
elif block_name in ['resnetb',
|
||||
'resnetb_invariant',
|
||||
'resnetb_equivariant',
|
||||
'resnetb_deformable',
|
||||
'resnetb_strided',
|
||||
'resnetb_deformable_strided',
|
||||
'resnetb_equivariant_strided',
|
||||
'resnetb_invariant_strided']:
|
||||
return ResnetBottleneckBlock(block_name, in_dim, out_dim, radius, layer_ind, config)
|
||||
|
||||
elif block_name == "max_pool" or block_name == "max_pool_wide":
|
||||
elif block_name == 'max_pool' or block_name == 'max_pool_wide':
|
||||
return MaxPoolBlock(layer_ind)
|
||||
|
||||
elif block_name == "global_average":
|
||||
elif block_name == 'global_average':
|
||||
return GlobalAverageBlock()
|
||||
|
||||
elif block_name == "nearest_upsample":
|
||||
elif block_name == 'nearest_upsample':
|
||||
return NearestUpsampleBlock(layer_ind)
|
||||
|
||||
else:
|
||||
raise ValueError(
|
||||
"Unknown block name in the architecture definition : " + block_name
|
||||
)
|
||||
raise ValueError('Unknown block name in the architecture definition : ' + block_name)
|
||||
|
||||
|
||||
class BatchNormBlock(nn.Module):
|
||||
|
||||
def __init__(self, in_dim, use_bn, bn_momentum):
|
||||
"""
|
||||
Initialize a batch normalization block. If network does not use batch normalization, replace with biases.
|
||||
|
@ -468,11 +441,9 @@ class BatchNormBlock(nn.Module):
|
|||
self.in_dim = in_dim
|
||||
if self.use_bn:
|
||||
self.batch_norm = nn.BatchNorm1d(in_dim, momentum=bn_momentum)
|
||||
# self.batch_norm = nn.InstanceNorm1d(in_dim, momentum=bn_momentum)
|
||||
#self.batch_norm = nn.InstanceNorm1d(in_dim, momentum=bn_momentum)
|
||||
else:
|
||||
self.bias = Parameter(
|
||||
torch.zeros(in_dim, dtype=torch.float32), requires_grad=True
|
||||
)
|
||||
self.bias = Parameter(torch.zeros(in_dim, dtype=torch.float32), requires_grad=True)
|
||||
return
|
||||
|
||||
def reset_parameters(self):
|
||||
|
@ -480,6 +451,7 @@ class BatchNormBlock(nn.Module):
|
|||
|
||||
def forward(self, x):
|
||||
if self.use_bn:
|
||||
|
||||
x = x.unsqueeze(2)
|
||||
x = x.transpose(0, 2)
|
||||
x = self.batch_norm(x)
|
||||
|
@ -489,14 +461,13 @@ class BatchNormBlock(nn.Module):
|
|||
return x + self.bias
|
||||
|
||||
def __repr__(self):
|
||||
return (
|
||||
"BatchNormBlock(in_feat: {:d}, momentum: {:.3f}, only_bias: {:s})".format(
|
||||
self.in_dim, self.bn_momentum, str(not self.use_bn)
|
||||
)
|
||||
)
|
||||
return 'BatchNormBlock(in_feat: {:d}, momentum: {:.3f}, only_bias: {:s})'.format(self.in_dim,
|
||||
self.bn_momentum,
|
||||
str(not self.use_bn))
|
||||
|
||||
|
||||
class UnaryBlock(nn.Module):
|
||||
|
||||
def __init__(self, in_dim, out_dim, use_bn, bn_momentum, no_relu=False):
|
||||
"""
|
||||
Initialize a standard unary block with its ReLU and BatchNorm.
|
||||
|
@ -526,12 +497,14 @@ class UnaryBlock(nn.Module):
|
|||
return x
|
||||
|
||||
def __repr__(self):
|
||||
return "UnaryBlock(in_feat: {:d}, out_feat: {:d}, BN: {:s}, ReLU: {:s})".format(
|
||||
self.in_dim, self.out_dim, str(self.use_bn), str(not self.no_relu)
|
||||
)
|
||||
return 'UnaryBlock(in_feat: {:d}, out_feat: {:d}, BN: {:s}, ReLU: {:s})'.format(self.in_dim,
|
||||
self.out_dim,
|
||||
str(self.use_bn),
|
||||
str(not self.no_relu))
|
||||
|
||||
|
||||
class SimpleBlock(nn.Module):
|
||||
|
||||
def __init__(self, block_name, in_dim, out_dim, radius, layer_ind, config):
|
||||
"""
|
||||
Initialize a simple convolution block with its ReLU and BatchNorm.
|
||||
|
@ -554,8 +527,7 @@ class SimpleBlock(nn.Module):
|
|||
self.out_dim = out_dim
|
||||
|
||||
# Define the KPConv class
|
||||
self.KPConv = KPConv(
|
||||
config.num_kernel_points,
|
||||
self.KPConv = KPConv(config.num_kernel_points,
|
||||
config.in_points_dim,
|
||||
in_dim,
|
||||
out_dim // 2,
|
||||
|
@ -564,9 +536,8 @@ class SimpleBlock(nn.Module):
|
|||
fixed_kernel_points=config.fixed_kernel_points,
|
||||
KP_influence=config.KP_influence,
|
||||
aggregation_mode=config.aggregation_mode,
|
||||
deformable="deform" in block_name,
|
||||
modulated=config.modulated,
|
||||
)
|
||||
deformable='deform' in block_name,
|
||||
modulated=config.modulated)
|
||||
|
||||
# Other opperations
|
||||
self.batch_norm = BatchNormBlock(out_dim // 2, self.use_bn, self.bn_momentum)
|
||||
|
@ -575,7 +546,8 @@ class SimpleBlock(nn.Module):
|
|||
return
|
||||
|
||||
def forward(self, x, batch):
|
||||
if "strided" in self.block_name:
|
||||
|
||||
if 'strided' in self.block_name:
|
||||
q_pts = batch.points[self.layer_ind + 1]
|
||||
s_pts = batch.points[self.layer_ind]
|
||||
neighb_inds = batch.pools[self.layer_ind]
|
||||
|
@ -589,6 +561,7 @@ class SimpleBlock(nn.Module):
|
|||
|
||||
|
||||
class ResnetBottleneckBlock(nn.Module):
|
||||
|
||||
def __init__(self, block_name, in_dim, out_dim, radius, layer_ind, config):
|
||||
"""
|
||||
Initialize a resnet bottleneck block.
|
||||
|
@ -612,15 +585,12 @@ class ResnetBottleneckBlock(nn.Module):
|
|||
|
||||
# First downscaling mlp
|
||||
if in_dim != out_dim // 4:
|
||||
self.unary1 = UnaryBlock(
|
||||
in_dim, out_dim // 4, self.use_bn, self.bn_momentum
|
||||
)
|
||||
self.unary1 = UnaryBlock(in_dim, out_dim // 4, self.use_bn, self.bn_momentum)
|
||||
else:
|
||||
self.unary1 = nn.Identity()
|
||||
|
||||
# KPConv block
|
||||
self.KPConv = KPConv(
|
||||
config.num_kernel_points,
|
||||
self.KPConv = KPConv(config.num_kernel_points,
|
||||
config.in_points_dim,
|
||||
out_dim // 4,
|
||||
out_dim // 4,
|
||||
|
@ -629,23 +599,16 @@ class ResnetBottleneckBlock(nn.Module):
|
|||
fixed_kernel_points=config.fixed_kernel_points,
|
||||
KP_influence=config.KP_influence,
|
||||
aggregation_mode=config.aggregation_mode,
|
||||
deformable="deform" in block_name,
|
||||
modulated=config.modulated,
|
||||
)
|
||||
self.batch_norm_conv = BatchNormBlock(
|
||||
out_dim // 4, self.use_bn, self.bn_momentum
|
||||
)
|
||||
deformable='deform' in block_name,
|
||||
modulated=config.modulated)
|
||||
self.batch_norm_conv = BatchNormBlock(out_dim // 4, self.use_bn, self.bn_momentum)
|
||||
|
||||
# Second upscaling mlp
|
||||
self.unary2 = UnaryBlock(
|
||||
out_dim // 4, out_dim, self.use_bn, self.bn_momentum, no_relu=True
|
||||
)
|
||||
self.unary2 = UnaryBlock(out_dim // 4, out_dim, self.use_bn, self.bn_momentum, no_relu=True)
|
||||
|
||||
# Shortcut optional mpl
|
||||
if in_dim != out_dim:
|
||||
self.unary_shortcut = UnaryBlock(
|
||||
in_dim, out_dim, self.use_bn, self.bn_momentum, no_relu=True
|
||||
)
|
||||
self.unary_shortcut = UnaryBlock(in_dim, out_dim, self.use_bn, self.bn_momentum, no_relu=True)
|
||||
else:
|
||||
self.unary_shortcut = nn.Identity()
|
||||
|
||||
|
@ -655,7 +618,8 @@ class ResnetBottleneckBlock(nn.Module):
|
|||
return
|
||||
|
||||
def forward(self, features, batch):
|
||||
if "strided" in self.block_name:
|
||||
|
||||
if 'strided' in self.block_name:
|
||||
q_pts = batch.points[self.layer_ind + 1]
|
||||
s_pts = batch.points[self.layer_ind]
|
||||
neighb_inds = batch.pools[self.layer_ind]
|
||||
|
@ -675,7 +639,7 @@ class ResnetBottleneckBlock(nn.Module):
|
|||
x = self.unary2(x)
|
||||
|
||||
# Shortcut
|
||||
if "strided" in self.block_name:
|
||||
if 'strided' in self.block_name:
|
||||
shortcut = max_pool(features, neighb_inds)
|
||||
else:
|
||||
shortcut = features
|
||||
|
@ -685,6 +649,7 @@ class ResnetBottleneckBlock(nn.Module):
|
|||
|
||||
|
||||
class GlobalAverageBlock(nn.Module):
|
||||
|
||||
def __init__(self):
|
||||
"""
|
||||
Initialize a global average block with its ReLU and BatchNorm.
|
||||
|
@ -697,6 +662,7 @@ class GlobalAverageBlock(nn.Module):
|
|||
|
||||
|
||||
class NearestUpsampleBlock(nn.Module):
|
||||
|
||||
def __init__(self, layer_ind):
|
||||
"""
|
||||
Initialize a nearest upsampling block with its ReLU and BatchNorm.
|
||||
|
@ -709,12 +675,12 @@ class NearestUpsampleBlock(nn.Module):
|
|||
return closest_pool(x, batch.upsamples[self.layer_ind - 1])
|
||||
|
||||
def __repr__(self):
|
||||
return "NearestUpsampleBlock(layer: {:d} -> {:d})".format(
|
||||
self.layer_ind, self.layer_ind - 1
|
||||
)
|
||||
return 'NearestUpsampleBlock(layer: {:d} -> {:d})'.format(self.layer_ind,
|
||||
self.layer_ind - 1)
|
||||
|
||||
|
||||
class MaxPoolBlock(nn.Module):
|
||||
|
||||
def __init__(self, layer_ind):
|
||||
"""
|
||||
Initialize a max pooling block with its ReLU and BatchNorm.
|
||||
|
@ -725,3 +691,4 @@ class MaxPoolBlock(nn.Module):
|
|||
|
||||
def forward(self, x, batch):
|
||||
return max_pool(x, batch.pools[self.layer_ind + 1])
|
||||
|
||||
|
|
|
@ -22,11 +22,14 @@
|
|||
#
|
||||
|
||||
# Common libs
|
||||
import os
|
||||
import torch
|
||||
import numpy as np
|
||||
import matplotlib.pyplot as plt
|
||||
from os.path import isfile, join, exists
|
||||
from os import listdir, remove
|
||||
from os import listdir, remove, getcwd
|
||||
from sklearn.metrics import confusion_matrix
|
||||
import time
|
||||
|
||||
# My libs
|
||||
from utils.config import Config
|
||||
|
@ -34,8 +37,9 @@ from utils.metrics import IoU_from_confusions, smooth_metrics, fast_confusion
|
|||
from utils.ply import read_ply
|
||||
|
||||
# Datasets
|
||||
from datasetss.S3DIS import S3DISDataset
|
||||
from datasetss.SemanticKitti import SemanticKittiDataset
|
||||
from datasets.ModelNet40 import ModelNet40Dataset
|
||||
from datasets.S3DIS import S3DISDataset
|
||||
from datasets.SemanticKitti import SemanticKittiDataset
|
||||
|
||||
# ----------------------------------------------------------------------------------------------------------------------
|
||||
#
|
||||
|
@ -43,8 +47,8 @@ from datasetss.SemanticKitti import SemanticKittiDataset
|
|||
# \***********************/
|
||||
#
|
||||
|
||||
|
||||
def listdir_str(path):
|
||||
|
||||
# listdir can return binary string instead od decoded string sometimes.
|
||||
# This function ensures a steady behavior
|
||||
|
||||
|
@ -59,39 +63,41 @@ def listdir_str(path):
|
|||
return f_list
|
||||
|
||||
|
||||
|
||||
def running_mean(signal, n, axis=0, stride=1):
|
||||
signal = np.array(signal)
|
||||
torch_conv = torch.nn.Conv1d(1, 1, kernel_size=2 * n + 1, stride=stride, bias=False)
|
||||
torch_conv = torch.nn.Conv1d(1, 1, kernel_size=2*n+1, stride=stride, bias=False)
|
||||
torch_conv.weight.requires_grad_(False)
|
||||
torch_conv.weight *= 0
|
||||
torch_conv.weight += 1 / (2 * n + 1)
|
||||
torch_conv.weight += 1 / (2*n+1)
|
||||
if signal.ndim == 1:
|
||||
torch_signal = torch.from_numpy(signal.reshape([1, 1, -1]).astype(np.float32))
|
||||
return torch_conv(torch_signal).squeeze().numpy()
|
||||
|
||||
elif signal.ndim == 2:
|
||||
print("TODO implement with torch and stride here")
|
||||
print('TODO implement with torch and stride here')
|
||||
smoothed = np.empty(signal.shape)
|
||||
if axis == 0:
|
||||
for i, sig in enumerate(signal):
|
||||
sig_sum = np.convolve(sig, np.ones((2 * n + 1,)), mode="same")
|
||||
sig_num = np.convolve(sig * 0 + 1, np.ones((2 * n + 1,)), mode="same")
|
||||
sig_sum = np.convolve(sig, np.ones((2*n+1,)), mode='same')
|
||||
sig_num = np.convolve(sig*0+1, np.ones((2*n+1,)), mode='same')
|
||||
smoothed[i, :] = sig_sum / sig_num
|
||||
elif axis == 1:
|
||||
for i, sig in enumerate(signal.T):
|
||||
sig_sum = np.convolve(sig, np.ones((2 * n + 1,)), mode="same")
|
||||
sig_num = np.convolve(sig * 0 + 1, np.ones((2 * n + 1,)), mode="same")
|
||||
sig_sum = np.convolve(sig, np.ones((2*n+1,)), mode='same')
|
||||
sig_num = np.convolve(sig*0+1, np.ones((2*n+1,)), mode='same')
|
||||
smoothed[:, i] = sig_sum / sig_num
|
||||
else:
|
||||
print("wrong axis")
|
||||
print('wrong axis')
|
||||
return smoothed
|
||||
|
||||
else:
|
||||
print("wrong dimensions")
|
||||
print('wrong dimensions')
|
||||
return None
|
||||
|
||||
|
||||
def IoU_class_metrics(all_IoUs, smooth_n):
|
||||
|
||||
# Get mean IoU per class for consecutive epochs to directly get a mean without further smoothing
|
||||
smoothed_IoUs = []
|
||||
for epoch in range(len(all_IoUs)):
|
||||
|
@ -105,7 +111,8 @@ def IoU_class_metrics(all_IoUs, smooth_n):
|
|||
|
||||
|
||||
def load_confusions(filename, n_class):
|
||||
with open(filename, "r") as f:
|
||||
|
||||
with open(filename, 'r') as f:
|
||||
lines = f.readlines()
|
||||
|
||||
confs = np.zeros((len(lines), n_class, n_class))
|
||||
|
@ -117,8 +124,9 @@ def load_confusions(filename, n_class):
|
|||
|
||||
|
||||
def load_training_results(path):
|
||||
filename = join(path, "training.txt")
|
||||
with open(filename, "r") as f:
|
||||
|
||||
filename = join(path, 'training.txt')
|
||||
with open(filename, 'r') as f:
|
||||
lines = f.readlines()
|
||||
|
||||
epochs = []
|
||||
|
@ -129,7 +137,7 @@ def load_training_results(path):
|
|||
t = []
|
||||
for line in lines[1:]:
|
||||
line_info = line.split()
|
||||
if len(line) > 0:
|
||||
if (len(line) > 0):
|
||||
epochs += [int(line_info[0])]
|
||||
steps += [int(line_info[1])]
|
||||
L_out += [float(line_info[2])]
|
||||
|
@ -143,7 +151,8 @@ def load_training_results(path):
|
|||
|
||||
|
||||
def load_single_IoU(filename, n_parts):
|
||||
with open(filename, "r") as f:
|
||||
|
||||
with open(filename, 'r') as f:
|
||||
lines = f.readlines()
|
||||
|
||||
# Load all IoUs
|
||||
|
@ -154,42 +163,37 @@ def load_single_IoU(filename, n_parts):
|
|||
|
||||
|
||||
def load_snap_clouds(path, dataset, only_last=False):
|
||||
cloud_folders = np.array(
|
||||
[join(path, f) for f in listdir_str(path) if f.startswith("val_preds")]
|
||||
)
|
||||
cloud_epochs = np.array([int(f.split("_")[-1]) for f in cloud_folders])
|
||||
|
||||
cloud_folders = np.array([join(path, f) for f in listdir_str(path) if f.startswith('val_preds')])
|
||||
cloud_epochs = np.array([int(f.split('_')[-1]) for f in cloud_folders])
|
||||
epoch_order = np.argsort(cloud_epochs)
|
||||
cloud_epochs = cloud_epochs[epoch_order]
|
||||
cloud_folders = cloud_folders[epoch_order]
|
||||
|
||||
Confs = np.zeros(
|
||||
(len(cloud_epochs), dataset.num_classes, dataset.num_classes), dtype=np.int32
|
||||
)
|
||||
Confs = np.zeros((len(cloud_epochs), dataset.num_classes, dataset.num_classes), dtype=np.int32)
|
||||
for c_i, cloud_folder in enumerate(cloud_folders):
|
||||
if only_last and c_i < len(cloud_epochs) - 1:
|
||||
continue
|
||||
|
||||
# Load confusion if previously saved
|
||||
conf_file = join(cloud_folder, "conf.txt")
|
||||
conf_file = join(cloud_folder, 'conf.txt')
|
||||
if isfile(conf_file):
|
||||
Confs[c_i] += np.loadtxt(conf_file, dtype=np.int32)
|
||||
|
||||
else:
|
||||
for f in listdir_str(cloud_folder):
|
||||
if f.endswith(".ply") and not f.endswith("sub.ply"):
|
||||
if f.endswith('.ply') and not f.endswith('sub.ply'):
|
||||
data = read_ply(join(cloud_folder, f))
|
||||
labels = data["class"]
|
||||
preds = data["preds"]
|
||||
Confs[c_i] += fast_confusion(
|
||||
labels, preds, dataset.label_values
|
||||
).astype(np.int32)
|
||||
labels = data['class']
|
||||
preds = data['preds']
|
||||
Confs[c_i] += fast_confusion(labels, preds, dataset.label_values).astype(np.int32)
|
||||
|
||||
np.savetxt(conf_file, Confs[c_i], "%12d")
|
||||
np.savetxt(conf_file, Confs[c_i], '%12d')
|
||||
|
||||
# Erase ply to save disk memory
|
||||
if c_i < len(cloud_folders) - 1:
|
||||
for f in listdir_str(cloud_folder):
|
||||
if f.endswith(".ply"):
|
||||
if f.endswith('.ply'):
|
||||
remove(join(cloud_folder, f))
|
||||
|
||||
# Remove ignored labels from confusions
|
||||
|
@ -209,6 +213,7 @@ def load_snap_clouds(path, dataset, only_last=False):
|
|||
|
||||
|
||||
def compare_trainings(list_of_paths, list_of_labels=None):
|
||||
|
||||
# Parameters
|
||||
# **********
|
||||
|
||||
|
@ -226,13 +231,13 @@ def compare_trainings(list_of_paths, list_of_labels=None):
|
|||
all_loss = []
|
||||
all_lr = []
|
||||
all_times = []
|
||||
all_RAMs = []
|
||||
|
||||
for path in list_of_paths:
|
||||
|
||||
print(path)
|
||||
|
||||
if ("val_IoUs.txt" in [f for f in listdir_str(path)]) or (
|
||||
"val_confs.txt" in [f for f in listdir_str(path)]
|
||||
):
|
||||
if ('val_IoUs.txt' in [f for f in listdir_str(path)]) or ('val_confs.txt' in [f for f in listdir_str(path)]):
|
||||
config = Config()
|
||||
config.load(path)
|
||||
else:
|
||||
|
@ -273,58 +278,59 @@ def compare_trainings(list_of_paths, list_of_labels=None):
|
|||
# Plots learning rate
|
||||
# *******************
|
||||
|
||||
|
||||
if plot_lr:
|
||||
# Figure
|
||||
fig = plt.figure("lr")
|
||||
fig = plt.figure('lr')
|
||||
for i, label in enumerate(list_of_labels):
|
||||
plt.plot(all_epochs[i], all_lr[i], linewidth=1, label=label)
|
||||
|
||||
# Set names for axes
|
||||
plt.xlabel("epochs")
|
||||
plt.ylabel("lr")
|
||||
plt.yscale("log")
|
||||
plt.xlabel('epochs')
|
||||
plt.ylabel('lr')
|
||||
plt.yscale('log')
|
||||
|
||||
# Display legends and title
|
||||
plt.legend(loc=1)
|
||||
|
||||
# Customize the graph
|
||||
ax = fig.gca()
|
||||
ax.grid(linestyle="-.", which="both")
|
||||
ax.grid(linestyle='-.', which='both')
|
||||
# ax.set_yticks(np.arange(0.8, 1.02, 0.02))
|
||||
|
||||
# Plots loss
|
||||
# **********
|
||||
|
||||
# Figure
|
||||
fig = plt.figure("loss")
|
||||
fig = plt.figure('loss')
|
||||
for i, label in enumerate(list_of_labels):
|
||||
plt.plot(all_epochs[i], all_loss[i], linewidth=1, label=label)
|
||||
|
||||
# Set names for axes
|
||||
plt.xlabel("epochs")
|
||||
plt.ylabel("loss")
|
||||
plt.yscale("log")
|
||||
plt.xlabel('epochs')
|
||||
plt.ylabel('loss')
|
||||
plt.yscale('log')
|
||||
|
||||
# Display legends and title
|
||||
plt.legend(loc=1)
|
||||
plt.title("Losses compare")
|
||||
plt.title('Losses compare')
|
||||
|
||||
# Customize the graph
|
||||
ax = fig.gca()
|
||||
ax.grid(linestyle="-.", which="both")
|
||||
ax.grid(linestyle='-.', which='both')
|
||||
# ax.set_yticks(np.arange(0.8, 1.02, 0.02))
|
||||
|
||||
# Plot Times
|
||||
# **********
|
||||
|
||||
# Figure
|
||||
fig = plt.figure("time")
|
||||
fig = plt.figure('time')
|
||||
for i, label in enumerate(list_of_labels):
|
||||
plt.plot(all_epochs[i], np.array(all_times[i]) / 3600, linewidth=1, label=label)
|
||||
|
||||
# Set names for axes
|
||||
plt.xlabel("epochs")
|
||||
plt.ylabel("time")
|
||||
plt.xlabel('epochs')
|
||||
plt.ylabel('time')
|
||||
# plt.yscale('log')
|
||||
|
||||
# Display legends and title
|
||||
|
@ -332,7 +338,7 @@ def compare_trainings(list_of_paths, list_of_labels=None):
|
|||
|
||||
# Customize the graph
|
||||
ax = fig.gca()
|
||||
ax.grid(linestyle="-.", which="both")
|
||||
ax.grid(linestyle='-.', which='both')
|
||||
# ax.set_yticks(np.arange(0.8, 1.02, 0.02))
|
||||
|
||||
# Show all
|
||||
|
@ -340,6 +346,7 @@ def compare_trainings(list_of_paths, list_of_labels=None):
|
|||
|
||||
|
||||
def compare_convergences_segment(dataset, list_of_paths, list_of_names=None):
|
||||
|
||||
# Parameters
|
||||
# **********
|
||||
|
||||
|
@ -361,20 +368,18 @@ def compare_convergences_segment(dataset, list_of_paths, list_of_names=None):
|
|||
config = Config()
|
||||
config.load(list_of_paths[0])
|
||||
|
||||
class_list = [
|
||||
dataset.label_to_names[label]
|
||||
for label in dataset.label_values
|
||||
if label not in dataset.ignored_labels
|
||||
]
|
||||
class_list = [dataset.label_to_names[label] for label in dataset.label_values
|
||||
if label not in dataset.ignored_labels]
|
||||
|
||||
s = "{:^10}|".format("mean")
|
||||
s = '{:^10}|'.format('mean')
|
||||
for c in class_list:
|
||||
s += "{:^10}".format(c)
|
||||
s += '{:^10}'.format(c)
|
||||
print(s)
|
||||
print(10 * "-" + "|" + 10 * config.num_classes * "-")
|
||||
print(10*'-' + '|' + 10*config.num_classes*'-')
|
||||
for path in list_of_paths:
|
||||
|
||||
# Get validation IoUs
|
||||
file = join(path, "val_IoUs.txt")
|
||||
file = join(path, 'val_IoUs.txt')
|
||||
val_IoUs = load_single_IoU(file, config.num_classes)
|
||||
|
||||
# Get mean IoU
|
||||
|
@ -385,9 +390,9 @@ def compare_convergences_segment(dataset, list_of_paths, list_of_names=None):
|
|||
all_mIoUs += [mIoUs]
|
||||
all_class_IoUs += [class_IoUs]
|
||||
|
||||
s = "{:^10.1f}|".format(100 * mIoUs[-1])
|
||||
s = '{:^10.1f}|'.format(100*mIoUs[-1])
|
||||
for IoU in class_IoUs[-1]:
|
||||
s += "{:^10.1f}".format(100 * IoU)
|
||||
s += '{:^10.1f}'.format(100*IoU)
|
||||
print(s)
|
||||
|
||||
# Get optional full validation on clouds
|
||||
|
@ -395,80 +400,73 @@ def compare_convergences_segment(dataset, list_of_paths, list_of_names=None):
|
|||
all_snap_epochs += [snap_epochs]
|
||||
all_snap_IoUs += [snap_IoUs]
|
||||
|
||||
print(10 * "-" + "|" + 10 * config.num_classes * "-")
|
||||
print(10*'-' + '|' + 10*config.num_classes*'-')
|
||||
for snap_IoUs in all_snap_IoUs:
|
||||
if len(snap_IoUs) > 0:
|
||||
s = "{:^10.1f}|".format(100 * np.mean(snap_IoUs[-1]))
|
||||
s = '{:^10.1f}|'.format(100*np.mean(snap_IoUs[-1]))
|
||||
for IoU in snap_IoUs[-1]:
|
||||
s += "{:^10.1f}".format(100 * IoU)
|
||||
s += '{:^10.1f}'.format(100*IoU)
|
||||
else:
|
||||
s = "{:^10s}".format("-")
|
||||
s = '{:^10s}'.format('-')
|
||||
for _ in range(config.num_classes):
|
||||
s += "{:^10s}".format("-")
|
||||
s += '{:^10s}'.format('-')
|
||||
print(s)
|
||||
|
||||
# Plots
|
||||
# *****
|
||||
|
||||
# Figure
|
||||
fig = plt.figure("mIoUs")
|
||||
fig = plt.figure('mIoUs')
|
||||
for i, name in enumerate(list_of_names):
|
||||
p = plt.plot(all_pred_epochs[i], all_mIoUs[i], "--", linewidth=1, label=name)
|
||||
plt.plot(
|
||||
all_snap_epochs[i],
|
||||
np.mean(all_snap_IoUs[i], axis=1),
|
||||
linewidth=1,
|
||||
color=p[-1].get_color(),
|
||||
)
|
||||
plt.xlabel("epochs")
|
||||
plt.ylabel("IoU")
|
||||
p = plt.plot(all_pred_epochs[i], all_mIoUs[i], '--', linewidth=1, label=name)
|
||||
plt.plot(all_snap_epochs[i], np.mean(all_snap_IoUs[i], axis=1), linewidth=1, color=p[-1].get_color())
|
||||
plt.xlabel('epochs')
|
||||
plt.ylabel('IoU')
|
||||
|
||||
# Set limits for y axis
|
||||
# plt.ylim(0.55, 0.95)
|
||||
#plt.ylim(0.55, 0.95)
|
||||
|
||||
# Display legends and title
|
||||
plt.legend(loc=4)
|
||||
|
||||
# Customize the graph
|
||||
ax = fig.gca()
|
||||
ax.grid(linestyle="-.", which="both")
|
||||
# ax.set_yticks(np.arange(0.8, 1.02, 0.02))
|
||||
ax.grid(linestyle='-.', which='both')
|
||||
#ax.set_yticks(np.arange(0.8, 1.02, 0.02))
|
||||
|
||||
displayed_classes = [0, 1, 2, 3, 4, 5, 6, 7]
|
||||
displayed_classes = []
|
||||
for c_i, c_name in enumerate(class_list):
|
||||
if c_i in displayed_classes:
|
||||
|
||||
# Figure
|
||||
fig = plt.figure(c_name + " IoU")
|
||||
fig = plt.figure(c_name + ' IoU')
|
||||
for i, name in enumerate(list_of_names):
|
||||
plt.plot(
|
||||
all_pred_epochs[i],
|
||||
all_class_IoUs[i][:, c_i],
|
||||
linewidth=1,
|
||||
label=name,
|
||||
)
|
||||
plt.xlabel("epochs")
|
||||
plt.ylabel("IoU")
|
||||
plt.plot(all_pred_epochs[i], all_class_IoUs[i][:, c_i], linewidth=1, label=name)
|
||||
plt.xlabel('epochs')
|
||||
plt.ylabel('IoU')
|
||||
|
||||
# Set limits for y axis
|
||||
# plt.ylim(0.8, 1)
|
||||
#plt.ylim(0.8, 1)
|
||||
|
||||
# Display legends and title
|
||||
plt.legend(loc=4)
|
||||
|
||||
# Customize the graph
|
||||
ax = fig.gca()
|
||||
ax.grid(linestyle="-.", which="both")
|
||||
# ax.set_yticks(np.arange(0.8, 1.02, 0.02))
|
||||
ax.grid(linestyle='-.', which='both')
|
||||
#ax.set_yticks(np.arange(0.8, 1.02, 0.02))
|
||||
|
||||
# Show all
|
||||
plt.show()
|
||||
|
||||
|
||||
def compare_convergences_classif(list_of_paths, list_of_labels=None):
|
||||
|
||||
# Parameters
|
||||
# **********
|
||||
|
||||
steps_per_epoch = 0
|
||||
smooth_n = 12
|
||||
|
||||
if list_of_labels is None:
|
||||
|
@ -479,10 +477,13 @@ def compare_convergences_classif(list_of_paths, list_of_labels=None):
|
|||
|
||||
all_pred_epochs = []
|
||||
all_val_OA = []
|
||||
all_train_OA = []
|
||||
all_vote_OA = []
|
||||
all_vote_confs = []
|
||||
|
||||
|
||||
for path in list_of_paths:
|
||||
|
||||
# Load parameters
|
||||
config = Config()
|
||||
config.load(list_of_paths[0])
|
||||
|
@ -495,31 +496,21 @@ def compare_convergences_classif(list_of_paths, list_of_labels=None):
|
|||
first_e = np.min(epochs)
|
||||
|
||||
# Get validation confusions
|
||||
file = join(path, "val_confs.txt")
|
||||
file = join(path, 'val_confs.txt')
|
||||
val_C1 = load_confusions(file, n_class)
|
||||
val_PRE, val_REC, val_F1, val_IoU, val_ACC = smooth_metrics(
|
||||
val_C1, smooth_n=smooth_n
|
||||
)
|
||||
val_PRE, val_REC, val_F1, val_IoU, val_ACC = smooth_metrics(val_C1, smooth_n=smooth_n)
|
||||
|
||||
# Get vote confusions
|
||||
file = join(path, "vote_confs.txt")
|
||||
file = join(path, 'vote_confs.txt')
|
||||
if exists(file):
|
||||
vote_C2 = load_confusions(file, n_class)
|
||||
vote_PRE, vote_REC, vote_F1, vote_IoU, vote_ACC = smooth_metrics(
|
||||
vote_C2, smooth_n=2
|
||||
)
|
||||
vote_PRE, vote_REC, vote_F1, vote_IoU, vote_ACC = smooth_metrics(vote_C2, smooth_n=2)
|
||||
else:
|
||||
vote_C2 = val_C1
|
||||
vote_PRE, vote_REC, vote_F1, vote_IoU, vote_ACC = (
|
||||
val_PRE,
|
||||
val_REC,
|
||||
val_F1,
|
||||
val_IoU,
|
||||
val_ACC,
|
||||
)
|
||||
vote_PRE, vote_REC, vote_F1, vote_IoU, vote_ACC = (val_PRE, val_REC, val_F1, val_IoU, val_ACC)
|
||||
|
||||
# Aggregate results
|
||||
all_pred_epochs += [np.array([i + first_e for i in range(len(val_ACC))])]
|
||||
all_pred_epochs += [np.array([i+first_e for i in range(len(val_ACC))])]
|
||||
all_val_OA += [val_ACC]
|
||||
all_vote_OA += [vote_ACC]
|
||||
all_vote_confs += [vote_C2]
|
||||
|
@ -530,15 +521,12 @@ def compare_convergences_classif(list_of_paths, list_of_labels=None):
|
|||
# ***********
|
||||
|
||||
for i, label in enumerate(list_of_labels):
|
||||
print("\n" + label + "\n" + "*" * len(label) + "\n")
|
||||
|
||||
print('\n' + label + '\n' + '*' * len(label) + '\n')
|
||||
print(list_of_paths[i])
|
||||
|
||||
best_epoch = np.argmax(all_vote_OA[i])
|
||||
print(
|
||||
"Best Accuracy : {:.1f} % (epoch {:d})".format(
|
||||
100 * all_vote_OA[i][best_epoch], best_epoch
|
||||
)
|
||||
)
|
||||
print('Best Accuracy : {:.1f} % (epoch {:d})'.format(100 * all_vote_OA[i][best_epoch], best_epoch))
|
||||
|
||||
confs = all_vote_confs[i]
|
||||
|
||||
|
@ -556,31 +544,32 @@ def compare_convergences_classif(list_of_paths, list_of_labels=None):
|
|||
diags = np.diagonal(class_avg_confs, axis1=-2, axis2=-1)
|
||||
class_avg_ACC = np.sum(diags, axis=-1) / np.sum(class_avg_confs, axis=(-1, -2))
|
||||
|
||||
print("Corresponding mAcc : {:.1f} %".format(100 * class_avg_ACC[best_epoch]))
|
||||
print('Corresponding mAcc : {:.1f} %'.format(100 * class_avg_ACC[best_epoch]))
|
||||
|
||||
# Plots
|
||||
# *****
|
||||
|
||||
for fig_name, OA in zip(["Validation", "Vote"], [all_val_OA, all_vote_OA]):
|
||||
for fig_name, OA in zip(['Validation', 'Vote'], [all_val_OA, all_vote_OA]):
|
||||
|
||||
# Figure
|
||||
fig = plt.figure(fig_name)
|
||||
for i, label in enumerate(list_of_labels):
|
||||
plt.plot(all_pred_epochs[i], OA[i], linewidth=1, label=label)
|
||||
plt.xlabel("epochs")
|
||||
plt.ylabel(fig_name + " Accuracy")
|
||||
plt.xlabel('epochs')
|
||||
plt.ylabel(fig_name + ' Accuracy')
|
||||
|
||||
# Set limits for y axis
|
||||
# plt.ylim(0.55, 0.95)
|
||||
#plt.ylim(0.55, 0.95)
|
||||
|
||||
# Display legends and title
|
||||
plt.legend(loc=4)
|
||||
|
||||
# Customize the graph
|
||||
ax = fig.gca()
|
||||
ax.grid(linestyle="-.", which="both")
|
||||
# ax.set_yticks(np.arange(0.8, 1.02, 0.02))
|
||||
ax.grid(linestyle='-.', which='both')
|
||||
#ax.set_yticks(np.arange(0.8, 1.02, 0.02))
|
||||
|
||||
# for i, label in enumerate(list_of_labels):
|
||||
#for i, label in enumerate(list_of_labels):
|
||||
# print(label, np.max(all_train_OA[i]), np.max(all_val_OA[i]))
|
||||
|
||||
# Show all
|
||||
|
@ -588,6 +577,7 @@ def compare_convergences_classif(list_of_paths, list_of_labels=None):
|
|||
|
||||
|
||||
def compare_convergences_SLAM(dataset, list_of_paths, list_of_names=None):
|
||||
|
||||
# Parameters
|
||||
# **********
|
||||
|
||||
|
@ -609,25 +599,23 @@ def compare_convergences_SLAM(dataset, list_of_paths, list_of_names=None):
|
|||
config = Config()
|
||||
config.load(list_of_paths[0])
|
||||
|
||||
class_list = [
|
||||
dataset.label_to_names[label]
|
||||
for label in dataset.label_values
|
||||
if label not in dataset.ignored_labels
|
||||
]
|
||||
class_list = [dataset.label_to_names[label] for label in dataset.label_values
|
||||
if label not in dataset.ignored_labels]
|
||||
|
||||
s = "{:^6}|".format("mean")
|
||||
s = '{:^6}|'.format('mean')
|
||||
for c in class_list:
|
||||
s += "{:^6}".format(c[:4])
|
||||
s += '{:^6}'.format(c[:4])
|
||||
print(s)
|
||||
print(6 * "-" + "|" + 6 * config.num_classes * "-")
|
||||
print(6*'-' + '|' + 6*config.num_classes*'-')
|
||||
for path in list_of_paths:
|
||||
|
||||
# Get validation IoUs
|
||||
nc_model = dataset.num_classes - len(dataset.ignored_labels)
|
||||
file = join(path, "val_IoUs.txt")
|
||||
file = join(path, 'val_IoUs.txt')
|
||||
val_IoUs = load_single_IoU(file, nc_model)
|
||||
|
||||
# Get Subpart IoUs
|
||||
file = join(path, "subpart_IoUs.txt")
|
||||
file = join(path, 'subpart_IoUs.txt')
|
||||
subpart_IoUs = load_single_IoU(file, nc_model)
|
||||
|
||||
# Get mean IoU
|
||||
|
@ -641,75 +629,69 @@ def compare_convergences_SLAM(dataset, list_of_paths, list_of_names=None):
|
|||
all_subpart_mIoUs += [subpart_mIoUs]
|
||||
all_subpart_class_IoUs += [subpart_class_IoUs]
|
||||
|
||||
s = "{:^6.1f}|".format(100 * subpart_mIoUs[-1])
|
||||
s = '{:^6.1f}|'.format(100*subpart_mIoUs[-1])
|
||||
for IoU in subpart_class_IoUs[-1]:
|
||||
s += "{:^6.1f}".format(100 * IoU)
|
||||
s += '{:^6.1f}'.format(100*IoU)
|
||||
print(s)
|
||||
|
||||
print(6 * "-" + "|" + 6 * config.num_classes * "-")
|
||||
print(6*'-' + '|' + 6*config.num_classes*'-')
|
||||
for snap_IoUs in all_val_class_IoUs:
|
||||
if len(snap_IoUs) > 0:
|
||||
s = "{:^6.1f}|".format(100 * np.mean(snap_IoUs[-1]))
|
||||
s = '{:^6.1f}|'.format(100*np.mean(snap_IoUs[-1]))
|
||||
for IoU in snap_IoUs[-1]:
|
||||
s += "{:^6.1f}".format(100 * IoU)
|
||||
s += '{:^6.1f}'.format(100*IoU)
|
||||
else:
|
||||
s = "{:^6s}".format("-")
|
||||
s = '{:^6s}'.format('-')
|
||||
for _ in range(config.num_classes):
|
||||
s += "{:^6s}".format("-")
|
||||
s += '{:^6s}'.format('-')
|
||||
print(s)
|
||||
|
||||
# Plots
|
||||
# *****
|
||||
|
||||
# Figure
|
||||
fig = plt.figure("mIoUs")
|
||||
fig = plt.figure('mIoUs')
|
||||
for i, name in enumerate(list_of_names):
|
||||
p = plt.plot(
|
||||
all_pred_epochs[i], all_subpart_mIoUs[i], "--", linewidth=1, label=name
|
||||
)
|
||||
plt.plot(
|
||||
all_pred_epochs[i], all_val_mIoUs[i], linewidth=1, color=p[-1].get_color()
|
||||
)
|
||||
plt.xlabel("epochs")
|
||||
plt.ylabel("IoU")
|
||||
p = plt.plot(all_pred_epochs[i], all_subpart_mIoUs[i], '--', linewidth=1, label=name)
|
||||
plt.plot(all_pred_epochs[i], all_val_mIoUs[i], linewidth=1, color=p[-1].get_color())
|
||||
plt.xlabel('epochs')
|
||||
plt.ylabel('IoU')
|
||||
|
||||
# Set limits for y axis
|
||||
# plt.ylim(0.55, 0.95)
|
||||
#plt.ylim(0.55, 0.95)
|
||||
|
||||
# Display legends and title
|
||||
plt.legend(loc=4)
|
||||
|
||||
# Customize the graph
|
||||
ax = fig.gca()
|
||||
ax.grid(linestyle="-.", which="both")
|
||||
# ax.set_yticks(np.arange(0.8, 1.02, 0.02))
|
||||
ax.grid(linestyle='-.', which='both')
|
||||
#ax.set_yticks(np.arange(0.8, 1.02, 0.02))
|
||||
|
||||
displayed_classes = [0, 1, 2, 3, 4, 5, 6, 7]
|
||||
# displayed_classes = []
|
||||
#displayed_classes = []
|
||||
for c_i, c_name in enumerate(class_list):
|
||||
if c_i in displayed_classes:
|
||||
|
||||
# Figure
|
||||
fig = plt.figure(c_name + " IoU")
|
||||
fig = plt.figure(c_name + ' IoU')
|
||||
for i, name in enumerate(list_of_names):
|
||||
plt.plot(
|
||||
all_pred_epochs[i],
|
||||
all_val_class_IoUs[i][:, c_i],
|
||||
linewidth=1,
|
||||
label=name,
|
||||
)
|
||||
plt.xlabel("epochs")
|
||||
plt.ylabel("IoU")
|
||||
plt.plot(all_pred_epochs[i], all_val_class_IoUs[i][:, c_i], linewidth=1, label=name)
|
||||
plt.xlabel('epochs')
|
||||
plt.ylabel('IoU')
|
||||
|
||||
# Set limits for y axis
|
||||
# plt.ylim(0.8, 1)
|
||||
#plt.ylim(0.8, 1)
|
||||
|
||||
# Display legends and title
|
||||
plt.legend(loc=4)
|
||||
|
||||
# Customize the graph
|
||||
ax = fig.gca()
|
||||
ax.grid(linestyle="-.", which="both")
|
||||
# ax.set_yticks(np.arange(0.8, 1.02, 0.02))
|
||||
ax.grid(linestyle='-.', which='both')
|
||||
#ax.set_yticks(np.arange(0.8, 1.02, 0.02))
|
||||
|
||||
|
||||
|
||||
# Show all
|
||||
plt.show()
|
||||
|
@ -731,22 +713,23 @@ def experiment_name_1():
|
|||
"""
|
||||
|
||||
# Using the dates of the logs, you can easily gather consecutive ones. All logs should be of the same dataset.
|
||||
start = "Log_2020-04-22_11-52-58"
|
||||
end = "Log_2023-07-29_12-40-27"
|
||||
start = 'Log_2020-04-22_11-52-58'
|
||||
end = 'Log_2023-07-29_12-40-27'
|
||||
|
||||
# Name of the result path
|
||||
res_path = "results"
|
||||
res_path = 'results'
|
||||
|
||||
# Gather logs and sort by date
|
||||
logs = np.sort(
|
||||
[join(res_path, l) for l in listdir_str(res_path) if start <= l <= end]
|
||||
)
|
||||
logs = np.sort([join(res_path, l) for l in listdir_str(res_path) if start <= l <= end])
|
||||
|
||||
# Give names to the logs (for plot legends)
|
||||
logs_names = ["name_log_1", "name_log_2", "name_log_3", "name_log_4"]
|
||||
logs_names = ['name_log_1',
|
||||
'name_log_2',
|
||||
'name_log_3',
|
||||
'name_log_4']
|
||||
|
||||
# safe check log names
|
||||
logs_names = np.array(logs_names[: len(logs)])
|
||||
logs_names = np.array(logs_names[:len(logs)])
|
||||
|
||||
return logs, logs_names
|
||||
|
||||
|
@ -760,26 +743,27 @@ def experiment_name_2():
|
|||
"""
|
||||
|
||||
# Using the dates of the logs, you can easily gather consecutive ones. All logs should be of the same dataset.
|
||||
start = "Log_2020-04-22_11-52-58"
|
||||
end = "Log_2020-05-22_11-52-58"
|
||||
start = 'Log_2020-04-22_11-52-58'
|
||||
end = 'Log_2020-05-22_11-52-58'
|
||||
|
||||
# Name of the result path
|
||||
res_path = "results"
|
||||
res_path = 'results'
|
||||
|
||||
# Gather logs and sort by date
|
||||
logs = np.sort(
|
||||
[join(res_path, l) for l in listdir_str(res_path) if start <= l <= end]
|
||||
)
|
||||
logs = np.sort([join(res_path, l) for l in listdir_str(res_path) if start <= l <= end])
|
||||
|
||||
# Optionally add a specific log at a specific place in the log list
|
||||
logs = logs.astype("<U50")
|
||||
logs = np.insert(logs, 0, "results/Log_2020-04-04_10-04-42")
|
||||
logs = logs.astype('<U50')
|
||||
logs = np.insert(logs, 0, 'results/Log_2020-04-04_10-04-42')
|
||||
|
||||
# Give names to the logs (for plot legends)
|
||||
logs_names = ["name_log_inserted", "name_log_1", "name_log_2", "name_log_3"]
|
||||
logs_names = ['name_log_inserted',
|
||||
'name_log_1',
|
||||
'name_log_2',
|
||||
'name_log_3']
|
||||
|
||||
# safe check log names
|
||||
logs_names = np.array(logs_names[: len(logs)])
|
||||
logs_names = np.array(logs_names[:len(logs)])
|
||||
|
||||
return logs, logs_names
|
||||
|
||||
|
@ -790,7 +774,8 @@ def experiment_name_2():
|
|||
# \***************/
|
||||
#
|
||||
|
||||
if __name__ == "__main__":
|
||||
if __name__ == '__main__':
|
||||
|
||||
######################################################
|
||||
# Choose a list of log to plot together for comparison
|
||||
######################################################
|
||||
|
@ -808,15 +793,15 @@ if __name__ == "__main__":
|
|||
for log in logs:
|
||||
config = Config()
|
||||
config.load(log)
|
||||
if "ShapeNetPart" in config.dataset:
|
||||
this_dataset = "ShapeNetPart"
|
||||
if 'ShapeNetPart' in config.dataset:
|
||||
this_dataset = 'ShapeNetPart'
|
||||
else:
|
||||
this_dataset = config.dataset
|
||||
if plot_dataset:
|
||||
if plot_dataset == this_dataset:
|
||||
continue
|
||||
else:
|
||||
raise ValueError("All logs must share the same dataset to be compared")
|
||||
raise ValueError('All logs must share the same dataset to be compared')
|
||||
else:
|
||||
plot_dataset = this_dataset
|
||||
|
||||
|
@ -824,15 +809,19 @@ if __name__ == "__main__":
|
|||
compare_trainings(logs, logs_names)
|
||||
|
||||
# Plot the validation
|
||||
if config.dataset_task == "classification":
|
||||
if config.dataset_task == 'classification':
|
||||
compare_convergences_classif(logs, logs_names)
|
||||
elif config.dataset_task == "cloud_segmentation":
|
||||
if config.dataset.startswith("S3DIS"):
|
||||
elif config.dataset_task == 'cloud_segmentation':
|
||||
if config.dataset.startswith('S3DIS'):
|
||||
dataset = S3DISDataset(config, load_data=False)
|
||||
compare_convergences_segment(dataset, logs, logs_names)
|
||||
elif config.dataset_task == "slam_segmentation":
|
||||
if config.dataset.startswith("SemanticKitti"):
|
||||
elif config.dataset_task == 'slam_segmentation':
|
||||
if config.dataset.startswith('SemanticKitti'):
|
||||
dataset = SemanticKittiDataset(config)
|
||||
compare_convergences_SLAM(dataset, logs, logs_names)
|
||||
else:
|
||||
raise ValueError("Unsupported dataset : " + plot_dataset)
|
||||
raise ValueError('Unsupported dataset : ' + plot_dataset)
|
||||
|
||||
|
||||
|
||||
|
||||
|
|
102
test_models.py
102
test_models.py
|
@ -22,13 +22,16 @@
|
|||
#
|
||||
|
||||
# Common libs
|
||||
import signal
|
||||
import os
|
||||
import numpy as np
|
||||
import sys
|
||||
import torch
|
||||
|
||||
# Dataset
|
||||
from datasetss.ModelNet40 import *
|
||||
from datasetss.S3DIS import *
|
||||
from datasetss.SemanticKitti import *
|
||||
from datasets.ModelNet40 import *
|
||||
from datasets.S3DIS import *
|
||||
from datasets.SemanticKitti import *
|
||||
from torch.utils.data import DataLoader
|
||||
|
||||
from utils.config import Config
|
||||
|
@ -42,25 +45,20 @@ from models.architectures import KPCNN, KPFCNN
|
|||
# \***************/
|
||||
#
|
||||
|
||||
|
||||
def model_choice(chosen_log):
|
||||
|
||||
###########################
|
||||
# Call the test initializer
|
||||
###########################
|
||||
|
||||
# Automatically retrieve the last trained model
|
||||
if chosen_log in ["last_ModelNet40", "last_ShapeNetPart", "last_S3DIS"]:
|
||||
if chosen_log in ['last_ModelNet40', 'last_ShapeNetPart', 'last_S3DIS']:
|
||||
|
||||
# Dataset name
|
||||
test_dataset = "_".join(chosen_log.split("_")[1:])
|
||||
test_dataset = '_'.join(chosen_log.split('_')[1:])
|
||||
|
||||
# List all training logs
|
||||
logs = np.sort(
|
||||
[
|
||||
os.path.join("results", f)
|
||||
for f in os.listdir("results")
|
||||
if f.startswith("Log")
|
||||
]
|
||||
)
|
||||
logs = np.sort([os.path.join('results', f) for f in os.listdir('results') if f.startswith('Log')])
|
||||
|
||||
# Find the last log of asked dataset
|
||||
for log in logs[::-1]:
|
||||
|
@ -70,12 +68,12 @@ def model_choice(chosen_log):
|
|||
chosen_log = log
|
||||
break
|
||||
|
||||
if chosen_log in ["last_ModelNet40", "last_ShapeNetPart", "last_S3DIS"]:
|
||||
if chosen_log in ['last_ModelNet40', 'last_ShapeNetPart', 'last_S3DIS']:
|
||||
raise ValueError('No log of the dataset "' + test_dataset + '" found')
|
||||
|
||||
# Check if log exists
|
||||
if not os.path.exists(chosen_log):
|
||||
raise ValueError("The given log does not exists: " + chosen_log)
|
||||
raise ValueError('The given log does not exists: ' + chosen_log)
|
||||
|
||||
return chosen_log
|
||||
|
||||
|
@ -86,7 +84,8 @@ def model_choice(chosen_log):
|
|||
# \***************/
|
||||
#
|
||||
|
||||
if __name__ == "__main__":
|
||||
if __name__ == '__main__':
|
||||
|
||||
###############################
|
||||
# Choose the model to visualize
|
||||
###############################
|
||||
|
@ -96,7 +95,7 @@ if __name__ == "__main__":
|
|||
# > 'last_XXX': Automatically retrieve the last trained model on dataset XXX
|
||||
# > '(old_)results/Log_YYYY-MM-DD_HH-MM-SS': Directly provide the path of a trained model
|
||||
|
||||
chosen_log = "results/Light_KPFCNN"
|
||||
chosen_log = 'results/Light_KPFCNN'
|
||||
|
||||
# Choose the index of the checkpoint to load OR None if you want to load the current checkpoint
|
||||
chkp_idx = -1
|
||||
|
@ -112,25 +111,25 @@ if __name__ == "__main__":
|
|||
############################
|
||||
|
||||
# Set which gpu is going to be used
|
||||
GPU_ID = "0"
|
||||
GPU_ID = '0'
|
||||
|
||||
# Set GPU visible device
|
||||
os.environ["CUDA_VISIBLE_DEVICES"] = GPU_ID
|
||||
os.environ['CUDA_VISIBLE_DEVICES'] = GPU_ID
|
||||
|
||||
###############
|
||||
# Previous chkp
|
||||
###############
|
||||
|
||||
# Find all checkpoints in the chosen training folder
|
||||
chkp_path = os.path.join(chosen_log, "checkpoints")
|
||||
chkps = [f for f in os.listdir(chkp_path) if f[:4] == "chkp"]
|
||||
chkp_path = os.path.join(chosen_log, 'checkpoints')
|
||||
chkps = [f for f in os.listdir(chkp_path) if f[:4] == 'chkp']
|
||||
|
||||
# Find which snapshot to restore
|
||||
if chkp_idx is None:
|
||||
chosen_chkp = "current_chkp.tar"
|
||||
chosen_chkp = 'current_chkp.tar'
|
||||
else:
|
||||
chosen_chkp = np.sort(chkps)[chkp_idx]
|
||||
chosen_chkp = os.path.join(chosen_log, "checkpoints", chosen_chkp)
|
||||
chosen_chkp = os.path.join(chosen_log, 'checkpoints', chosen_chkp)
|
||||
|
||||
# Initialize configuration class
|
||||
config = Config()
|
||||
|
@ -142,10 +141,10 @@ if __name__ == "__main__":
|
|||
|
||||
# Change parameters for the test here. For example, you can stop augmenting the input data.
|
||||
|
||||
# config.augment_noise = 0.0001
|
||||
# config.augment_symmetries = False
|
||||
# config.batch_num = 3
|
||||
# config.in_radius = 4
|
||||
#config.augment_noise = 0.0001
|
||||
#config.augment_symmetries = False
|
||||
#config.batch_num = 3
|
||||
#config.in_radius = 4
|
||||
config.validation_size = 200
|
||||
config.input_threads = 10
|
||||
|
||||
|
@ -154,69 +153,66 @@ if __name__ == "__main__":
|
|||
##############
|
||||
|
||||
print()
|
||||
print("Data Preparation")
|
||||
print("****************")
|
||||
print(config.dataset)
|
||||
print('Data Preparation')
|
||||
print('****************')
|
||||
|
||||
if on_val:
|
||||
set = "validation"
|
||||
set = 'validation'
|
||||
else:
|
||||
set = "test"
|
||||
set = 'test'
|
||||
|
||||
# Initiate dataset
|
||||
if config.dataset == "ModelNet40":
|
||||
if config.dataset == 'ModelNet40':
|
||||
test_dataset = ModelNet40Dataset(config, train=False)
|
||||
test_sampler = ModelNet40Sampler(test_dataset)
|
||||
collate_fn = ModelNet40Collate
|
||||
elif config.dataset == "S3DIS":
|
||||
test_dataset = S3DISDataset(config, set="validation", use_potentials=True)
|
||||
elif config.dataset == 'S3DIS':
|
||||
test_dataset = S3DISDataset(config, set='validation', use_potentials=True)
|
||||
test_sampler = S3DISSampler(test_dataset)
|
||||
collate_fn = S3DISCollate
|
||||
elif config.dataset == "SemanticKitti":
|
||||
elif config.dataset == 'SemanticKitti':
|
||||
test_dataset = SemanticKittiDataset(config, set=set, balance_classes=False)
|
||||
test_sampler = SemanticKittiSampler(test_dataset)
|
||||
collate_fn = SemanticKittiCollate
|
||||
else:
|
||||
raise ValueError("Unsupported dataset : " + config.dataset)
|
||||
raise ValueError('Unsupported dataset : ' + config.dataset)
|
||||
|
||||
# Data loader
|
||||
test_loader = DataLoader(
|
||||
test_dataset,
|
||||
test_loader = DataLoader(test_dataset,
|
||||
batch_size=1,
|
||||
sampler=test_sampler,
|
||||
collate_fn=collate_fn,
|
||||
num_workers=config.input_threads,
|
||||
pin_memory=True,
|
||||
)
|
||||
pin_memory=True)
|
||||
|
||||
# Calibrate samplers
|
||||
test_sampler.calibration(test_loader, verbose=True)
|
||||
|
||||
print("\nModel Preparation")
|
||||
print("*****************")
|
||||
print('\nModel Preparation')
|
||||
print('*****************')
|
||||
|
||||
# Define network model
|
||||
t1 = time.time()
|
||||
if config.dataset_task == "classification":
|
||||
if config.dataset_task == 'classification':
|
||||
net = KPCNN(config)
|
||||
elif config.dataset_task in ["cloud_segmentation", "slam_segmentation"]:
|
||||
elif config.dataset_task in ['cloud_segmentation', 'slam_segmentation']:
|
||||
net = KPFCNN(config, test_dataset.label_values, test_dataset.ignored_labels)
|
||||
else:
|
||||
raise ValueError("Unsupported dataset_task for testing: " + config.dataset_task)
|
||||
raise ValueError('Unsupported dataset_task for testing: ' + config.dataset_task)
|
||||
|
||||
# Define a visualizer class
|
||||
tester = ModelTester(net, chkp_path=chosen_chkp)
|
||||
print("Done in {:.1f}s\n".format(time.time() - t1))
|
||||
print('Done in {:.1f}s\n'.format(time.time() - t1))
|
||||
|
||||
print("\nStart test")
|
||||
print("**********\n")
|
||||
print('\nStart test')
|
||||
print('**********\n')
|
||||
|
||||
# Training
|
||||
if config.dataset_task == "classification":
|
||||
if config.dataset_task == 'classification':
|
||||
tester.classification_test(net, test_loader, config)
|
||||
elif config.dataset_task == "cloud_segmentation":
|
||||
elif config.dataset_task == 'cloud_segmentation':
|
||||
tester.cloud_segmentation_test(net, test_loader, config)
|
||||
elif config.dataset_task == "slam_segmentation":
|
||||
elif config.dataset_task == 'slam_segmentation':
|
||||
tester.slam_segmentation_test(net, test_loader, config)
|
||||
else:
|
||||
raise ValueError("Unsupported dataset_task for testing: " + config.dataset_task)
|
||||
raise ValueError('Unsupported dataset_task for testing: ' + config.dataset_task)
|
|
@ -26,9 +26,10 @@ import signal
|
|||
import os
|
||||
import numpy as np
|
||||
import sys
|
||||
import torch
|
||||
|
||||
# Dataset
|
||||
from datasetss.ModelNet40 import *
|
||||
from datasets.ModelNet40 import *
|
||||
from torch.utils.data import DataLoader
|
||||
|
||||
from utils.config import Config
|
||||
|
@ -42,7 +43,6 @@ from models.architectures import KPCNN
|
|||
# \******************/
|
||||
#
|
||||
|
||||
|
||||
class Modelnet40Config(Config):
|
||||
"""
|
||||
Override the parameters you want to modify for this dataset
|
||||
|
@ -53,13 +53,13 @@ class Modelnet40Config(Config):
|
|||
####################
|
||||
|
||||
# Dataset name
|
||||
dataset = "ModelNet40"
|
||||
dataset = 'ModelNet40'
|
||||
|
||||
# Number of classes in the dataset (This value is overwritten by dataset class when Initializating dataset).
|
||||
num_classes = None
|
||||
|
||||
# Type of task performed on this dataset (also overwritten)
|
||||
dataset_task = ""
|
||||
dataset_task = ''
|
||||
|
||||
# Number of CPU threads for the input pipeline
|
||||
input_threads = 10
|
||||
|
@ -69,23 +69,21 @@ class Modelnet40Config(Config):
|
|||
#########################
|
||||
|
||||
# Define layers
|
||||
architecture = [
|
||||
"simple",
|
||||
"resnetb",
|
||||
"resnetb_strided",
|
||||
"resnetb",
|
||||
"resnetb",
|
||||
"resnetb_strided",
|
||||
"resnetb",
|
||||
"resnetb",
|
||||
"resnetb_strided",
|
||||
"resnetb",
|
||||
"resnetb",
|
||||
"resnetb_strided",
|
||||
"resnetb",
|
||||
"resnetb",
|
||||
"global_average",
|
||||
]
|
||||
architecture = ['simple',
|
||||
'resnetb',
|
||||
'resnetb_strided',
|
||||
'resnetb',
|
||||
'resnetb',
|
||||
'resnetb_strided',
|
||||
'resnetb',
|
||||
'resnetb',
|
||||
'resnetb_strided',
|
||||
'resnetb',
|
||||
'resnetb',
|
||||
'resnetb_strided',
|
||||
'resnetb',
|
||||
'resnetb',
|
||||
'global_average']
|
||||
|
||||
###################
|
||||
# KPConv parameters
|
||||
|
@ -107,10 +105,10 @@ class Modelnet40Config(Config):
|
|||
KP_extent = 1.2
|
||||
|
||||
# Behavior of convolutions in ('constant', 'linear', 'gaussian')
|
||||
KP_influence = "linear"
|
||||
KP_influence = 'linear'
|
||||
|
||||
# Aggregation function of KPConv in ('closest', 'sum')
|
||||
aggregation_mode = "sum"
|
||||
aggregation_mode = 'sum'
|
||||
|
||||
# Choice of input features
|
||||
in_features_dim = 1
|
||||
|
@ -125,7 +123,7 @@ class Modelnet40Config(Config):
|
|||
# Deformable offset loss
|
||||
# 'point2point' fitting geometry by penalizing distance from deform point to input points
|
||||
# 'point2plane' fitting geometry by penalizing distance from deform point to input point triplet (not implemented)
|
||||
deform_fitting_mode = "point2point"
|
||||
deform_fitting_mode = 'point2point'
|
||||
deform_fitting_power = 1.0 # Multiplier for the fitting/repulsive loss
|
||||
deform_lr_factor = 0.1 # Multiplier for learning rate applied to the deformations
|
||||
repulse_extent = 1.2 # Distance of repulsion for deformed kernel points
|
||||
|
@ -140,7 +138,7 @@ class Modelnet40Config(Config):
|
|||
# Learning rate management
|
||||
learning_rate = 1e-2
|
||||
momentum = 0.98
|
||||
lr_decays = {i: 0.1 ** (1 / 100) for i in range(1, max_epoch)}
|
||||
lr_decays = {i: 0.1**(1/100) for i in range(1, max_epoch)}
|
||||
grad_clip_norm = 100.0
|
||||
|
||||
# Number of batch
|
||||
|
@ -158,7 +156,7 @@ class Modelnet40Config(Config):
|
|||
# Augmentations
|
||||
augment_scale_anisotropic = True
|
||||
augment_symmetries = [True, True, True]
|
||||
augment_rotation = "none"
|
||||
augment_rotation = 'none'
|
||||
augment_scale_min = 0.8
|
||||
augment_scale_max = 1.2
|
||||
augment_noise = 0.001
|
||||
|
@ -168,7 +166,7 @@ class Modelnet40Config(Config):
|
|||
# > 'none': Each point in the whole batch has the same contribution.
|
||||
# > 'class': Each class has the same contribution (points are weighted according to class balance)
|
||||
# > 'batch': Each cloud in the batch has the same contribution (points are weighted according cloud sizes)
|
||||
segloss_balance = "none"
|
||||
segloss_balance = 'none'
|
||||
|
||||
# Do we nee to save convergence
|
||||
saving = True
|
||||
|
@ -181,40 +179,40 @@ class Modelnet40Config(Config):
|
|||
# \***************/
|
||||
#
|
||||
|
||||
if __name__ == "__main__":
|
||||
if __name__ == '__main__':
|
||||
|
||||
############################
|
||||
# Initialize the environment
|
||||
############################
|
||||
|
||||
# Set which gpu is going to be used
|
||||
GPU_ID = "0"
|
||||
GPU_ID = '0'
|
||||
|
||||
# Set GPU visible device
|
||||
os.environ["CUDA_VISIBLE_DEVICES"] = GPU_ID
|
||||
os.environ['CUDA_VISIBLE_DEVICES'] = GPU_ID
|
||||
|
||||
###############
|
||||
# Previous chkp
|
||||
###############
|
||||
|
||||
# Choose here if you want to start training from a previous snapshot (None for new training)
|
||||
# previous_training_path = 'Log_2020-03-19_19-53-27'
|
||||
previous_training_path = ""
|
||||
#previous_training_path = 'Log_2020-03-19_19-53-27'
|
||||
previous_training_path = ''
|
||||
|
||||
# Choose index of checkpoint to start from. If None, uses the latest chkp
|
||||
chkp_idx = None
|
||||
if previous_training_path:
|
||||
|
||||
# Find all snapshot in the chosen training folder
|
||||
chkp_path = os.path.join("results", previous_training_path, "checkpoints")
|
||||
chkps = [f for f in os.listdir(chkp_path) if f[:4] == "chkp"]
|
||||
chkp_path = os.path.join('results', previous_training_path, 'checkpoints')
|
||||
chkps = [f for f in os.listdir(chkp_path) if f[:4] == 'chkp']
|
||||
|
||||
# Find which snapshot to restore
|
||||
if chkp_idx is None:
|
||||
chosen_chkp = "current_chkp.tar"
|
||||
chosen_chkp = 'current_chkp.tar'
|
||||
else:
|
||||
chosen_chkp = np.sort(chkps)[chkp_idx]
|
||||
chosen_chkp = os.path.join(
|
||||
"results", previous_training_path, "checkpoints", chosen_chkp
|
||||
)
|
||||
chosen_chkp = os.path.join('results', previous_training_path, 'checkpoints', chosen_chkp)
|
||||
|
||||
else:
|
||||
chosen_chkp = None
|
||||
|
@ -224,13 +222,13 @@ if __name__ == "__main__":
|
|||
##############
|
||||
|
||||
print()
|
||||
print("Data Preparation")
|
||||
print("****************")
|
||||
print('Data Preparation')
|
||||
print('****************')
|
||||
|
||||
# Initialize configuration class
|
||||
config = Modelnet40Config()
|
||||
if previous_training_path:
|
||||
config.load(os.path.join("results", previous_training_path))
|
||||
config.load(os.path.join('results', previous_training_path))
|
||||
config.saving_path = None
|
||||
|
||||
# Get path from argument if given
|
||||
|
@ -246,32 +244,28 @@ if __name__ == "__main__":
|
|||
test_sampler = ModelNet40Sampler(test_dataset, balance_labels=True)
|
||||
|
||||
# Initialize the dataloader
|
||||
training_loader = DataLoader(
|
||||
training_dataset,
|
||||
training_loader = DataLoader(training_dataset,
|
||||
batch_size=1,
|
||||
sampler=training_sampler,
|
||||
collate_fn=ModelNet40Collate,
|
||||
num_workers=config.input_threads,
|
||||
pin_memory=True,
|
||||
)
|
||||
test_loader = DataLoader(
|
||||
test_dataset,
|
||||
pin_memory=True)
|
||||
test_loader = DataLoader(test_dataset,
|
||||
batch_size=1,
|
||||
sampler=test_sampler,
|
||||
collate_fn=ModelNet40Collate,
|
||||
num_workers=config.input_threads,
|
||||
pin_memory=True,
|
||||
)
|
||||
pin_memory=True)
|
||||
|
||||
# Calibrate samplers
|
||||
training_sampler.calibration(training_loader)
|
||||
test_sampler.calibration(test_loader)
|
||||
|
||||
# debug_timing(test_dataset, test_sampler, test_loader)
|
||||
# debug_show_clouds(training_dataset, training_sampler, training_loader)
|
||||
#debug_timing(test_dataset, test_sampler, test_loader)
|
||||
#debug_show_clouds(training_dataset, training_sampler, training_loader)
|
||||
|
||||
print("\nModel Preparation")
|
||||
print("*****************")
|
||||
print('\nModel Preparation')
|
||||
print('*****************')
|
||||
|
||||
# Define network model
|
||||
t1 = time.time()
|
||||
|
@ -279,17 +273,20 @@ if __name__ == "__main__":
|
|||
|
||||
# Define a trainer class
|
||||
trainer = ModelTrainer(net, config, chkp_path=chosen_chkp)
|
||||
print("Done in {:.1f}s\n".format(time.time() - t1))
|
||||
print('Done in {:.1f}s\n'.format(time.time() - t1))
|
||||
|
||||
print("\nStart training")
|
||||
print("**************")
|
||||
print('\nStart training')
|
||||
print('**************')
|
||||
|
||||
# Training
|
||||
try:
|
||||
trainer.train(net, training_loader, test_loader, config)
|
||||
except:
|
||||
print("Caught an error")
|
||||
print('Caught an error')
|
||||
os.kill(os.getpid(), signal.SIGINT)
|
||||
|
||||
print("Forcing exit now")
|
||||
print('Forcing exit now')
|
||||
os.kill(os.getpid(), signal.SIGINT)
|
||||
|
||||
|
||||
|
||||
|
|
130
train_NPM3D.py
130
train_NPM3D.py
|
@ -26,7 +26,7 @@ import signal
|
|||
import os
|
||||
|
||||
# Dataset
|
||||
from datasetss.NPM3D import *
|
||||
from datasets.NPM3D import *
|
||||
from torch.utils.data import DataLoader
|
||||
|
||||
from utils.config import Config
|
||||
|
@ -40,7 +40,6 @@ from models.architectures import KPFCNN
|
|||
# \******************/
|
||||
#
|
||||
|
||||
|
||||
class NPM3DConfig(Config):
|
||||
"""
|
||||
Override the parameters you want to modify for this dataset
|
||||
|
@ -51,13 +50,13 @@ class NPM3DConfig(Config):
|
|||
####################
|
||||
|
||||
# Dataset name
|
||||
dataset = "NPM3D"
|
||||
dataset = 'NPM3D'
|
||||
|
||||
# Number of classes in the dataset (This value is overwritten by dataset class when Initializating dataset).
|
||||
num_classes = None
|
||||
|
||||
# Type of task performed on this dataset (also overwritten)
|
||||
dataset_task = ""
|
||||
dataset_task = ''
|
||||
|
||||
# Number of CPU threads for the input pipeline
|
||||
input_threads = 10
|
||||
|
@ -67,30 +66,28 @@ class NPM3DConfig(Config):
|
|||
#########################
|
||||
|
||||
# # Define layers
|
||||
architecture = [
|
||||
"simple",
|
||||
"resnetb",
|
||||
"resnetb_strided",
|
||||
"resnetb",
|
||||
"resnetb",
|
||||
"resnetb_strided",
|
||||
"resnetb",
|
||||
"resnetb",
|
||||
"resnetb_strided",
|
||||
"resnetb",
|
||||
"resnetb",
|
||||
"resnetb_strided",
|
||||
"resnetb",
|
||||
"resnetb",
|
||||
"nearest_upsample",
|
||||
"unary",
|
||||
"nearest_upsample",
|
||||
"unary",
|
||||
"nearest_upsample",
|
||||
"unary",
|
||||
"nearest_upsample",
|
||||
"unary",
|
||||
]
|
||||
architecture = ['simple',
|
||||
'resnetb',
|
||||
'resnetb_strided',
|
||||
'resnetb',
|
||||
'resnetb',
|
||||
'resnetb_strided',
|
||||
'resnetb',
|
||||
'resnetb',
|
||||
'resnetb_strided',
|
||||
'resnetb',
|
||||
'resnetb',
|
||||
'resnetb_strided',
|
||||
'resnetb',
|
||||
'resnetb',
|
||||
'nearest_upsample',
|
||||
'unary',
|
||||
'nearest_upsample',
|
||||
'unary',
|
||||
'nearest_upsample',
|
||||
'unary',
|
||||
'nearest_upsample',
|
||||
'unary']
|
||||
|
||||
###################
|
||||
# KPConv parameters
|
||||
|
@ -115,10 +112,10 @@ class NPM3DConfig(Config):
|
|||
KP_extent = 1.2
|
||||
|
||||
# Behavior of convolutions in ('constant', 'linear', 'gaussian')
|
||||
KP_influence = "linear"
|
||||
KP_influence = 'linear'
|
||||
|
||||
# Aggregation function of KPConv in ('closest', 'sum')
|
||||
aggregation_mode = "sum"
|
||||
aggregation_mode = 'sum'
|
||||
|
||||
# Choice of input features
|
||||
first_features_dim = 128
|
||||
|
@ -134,7 +131,7 @@ class NPM3DConfig(Config):
|
|||
# Deformable offset loss
|
||||
# 'point2point' fitting geometry by penalizing distance from deform point to input points
|
||||
# 'point2plane' fitting geometry by penalizing distance from deform point to input point triplet (not implemented)
|
||||
deform_fitting_mode = "point2point"
|
||||
deform_fitting_mode = 'point2point'
|
||||
deform_fitting_power = 1.0 # Multiplier for the fitting/repulsive loss
|
||||
deform_lr_factor = 0.1 # Multiplier for learning rate applied to the deformations
|
||||
repulse_extent = 1.2 # Distance of repulsion for deformed kernel points
|
||||
|
@ -167,7 +164,7 @@ class NPM3DConfig(Config):
|
|||
# Augmentations
|
||||
augment_scale_anisotropic = True
|
||||
augment_symmetries = [True, False, False]
|
||||
augment_rotation = "vertical"
|
||||
augment_rotation = 'vertical'
|
||||
augment_scale_min = 0.9
|
||||
augment_scale_max = 1.1
|
||||
augment_noise = 0.001
|
||||
|
@ -177,7 +174,7 @@ class NPM3DConfig(Config):
|
|||
# > 'none': Each point in the whole batch has the same contribution.
|
||||
# > 'class': Each class has the same contribution (points are weighted according to class balance)
|
||||
# > 'batch': Each cloud in the batch has the same contribution (points are weighted according cloud sizes)
|
||||
segloss_balance = "none"
|
||||
segloss_balance = 'none'
|
||||
|
||||
# Do we nee to save convergence
|
||||
saving = True
|
||||
|
@ -190,16 +187,17 @@ class NPM3DConfig(Config):
|
|||
# \***************/
|
||||
#
|
||||
|
||||
if __name__ == "__main__":
|
||||
if __name__ == '__main__':
|
||||
|
||||
############################
|
||||
# Initialize the environment
|
||||
############################
|
||||
|
||||
# Set which gpu is going to be used
|
||||
GPU_ID = "0"
|
||||
GPU_ID = '0'
|
||||
|
||||
# Set GPU visible device
|
||||
os.environ["CUDA_VISIBLE_DEVICES"] = GPU_ID
|
||||
os.environ['CUDA_VISIBLE_DEVICES'] = GPU_ID
|
||||
|
||||
###############
|
||||
# Previous chkp
|
||||
|
@ -207,23 +205,22 @@ if __name__ == "__main__":
|
|||
|
||||
# Choose here if you want to start training from a previous snapshot (None for new training)
|
||||
# previous_training_path = 'Log_2020-03-19_19-53-27'
|
||||
previous_training_path = ""
|
||||
previous_training_path = ''
|
||||
|
||||
# Choose index of checkpoint to start from. If None, uses the latest chkp
|
||||
chkp_idx = None
|
||||
if previous_training_path:
|
||||
|
||||
# Find all snapshot in the chosen training folder
|
||||
chkp_path = os.path.join("results", previous_training_path, "checkpoints")
|
||||
chkps = [f for f in os.listdir(chkp_path) if f[:4] == "chkp"]
|
||||
chkp_path = os.path.join('results', previous_training_path, 'checkpoints')
|
||||
chkps = [f for f in os.listdir(chkp_path) if f[:4] == 'chkp']
|
||||
|
||||
# Find which snapshot to restore
|
||||
if chkp_idx is None:
|
||||
chosen_chkp = "current_chkp.tar"
|
||||
chosen_chkp = 'current_chkp.tar'
|
||||
else:
|
||||
chosen_chkp = np.sort(chkps)[chkp_idx]
|
||||
chosen_chkp = os.path.join(
|
||||
"results", previous_training_path, "checkpoints", chosen_chkp
|
||||
)
|
||||
chosen_chkp = os.path.join('results', previous_training_path, 'checkpoints', chosen_chkp)
|
||||
|
||||
else:
|
||||
chosen_chkp = None
|
||||
|
@ -233,13 +230,13 @@ if __name__ == "__main__":
|
|||
##############
|
||||
|
||||
print()
|
||||
print("Data Preparation")
|
||||
print("****************")
|
||||
print('Data Preparation')
|
||||
print('****************')
|
||||
|
||||
# Initialize configuration class
|
||||
config = NPM3DConfig()
|
||||
if previous_training_path:
|
||||
config.load(os.path.join("results", previous_training_path))
|
||||
config.load(os.path.join('results', previous_training_path))
|
||||
config.saving_path = None
|
||||
|
||||
# Get path from argument if given
|
||||
|
@ -247,30 +244,26 @@ if __name__ == "__main__":
|
|||
config.saving_path = sys.argv[1]
|
||||
|
||||
# Initialize datasets
|
||||
training_dataset = NPM3DDataset(config, set="training", use_potentials=True)
|
||||
test_dataset = NPM3DDataset(config, set="validation", use_potentials=True)
|
||||
training_dataset = NPM3DDataset(config, set='training', use_potentials=True)
|
||||
test_dataset = NPM3DDataset(config, set='validation', use_potentials=True)
|
||||
|
||||
# Initialize samplers
|
||||
training_sampler = NPM3DSampler(training_dataset)
|
||||
test_sampler = NPM3DSampler(test_dataset)
|
||||
|
||||
# Initialize the dataloader
|
||||
training_loader = DataLoader(
|
||||
training_dataset,
|
||||
training_loader = DataLoader(training_dataset,
|
||||
batch_size=1,
|
||||
sampler=training_sampler,
|
||||
collate_fn=NPM3DCollate,
|
||||
num_workers=config.input_threads,
|
||||
pin_memory=True,
|
||||
)
|
||||
test_loader = DataLoader(
|
||||
test_dataset,
|
||||
pin_memory=True)
|
||||
test_loader = DataLoader(test_dataset,
|
||||
batch_size=1,
|
||||
sampler=test_sampler,
|
||||
collate_fn=NPM3DCollate,
|
||||
num_workers=config.input_threads,
|
||||
pin_memory=True,
|
||||
)
|
||||
pin_memory=True)
|
||||
|
||||
# Calibrate samplers
|
||||
training_sampler.calibration(training_loader, verbose=True)
|
||||
|
@ -281,8 +274,8 @@ if __name__ == "__main__":
|
|||
# debug_timing(test_dataset, test_loader)
|
||||
# debug_upsampling(training_dataset, training_loader)
|
||||
|
||||
print("\nModel Preparation")
|
||||
print("*****************")
|
||||
print('\nModel Preparation')
|
||||
print('*****************')
|
||||
|
||||
# Define network model
|
||||
t1 = time.time()
|
||||
|
@ -290,28 +283,25 @@ if __name__ == "__main__":
|
|||
|
||||
debug = False
|
||||
if debug:
|
||||
print("\n*************************************\n")
|
||||
print('\n*************************************\n')
|
||||
print(net)
|
||||
print("\n*************************************\n")
|
||||
print('\n*************************************\n')
|
||||
for param in net.parameters():
|
||||
if param.requires_grad:
|
||||
print(param.shape)
|
||||
print("\n*************************************\n")
|
||||
print(
|
||||
"Model size %i"
|
||||
% sum(param.numel() for param in net.parameters() if param.requires_grad)
|
||||
)
|
||||
print("\n*************************************\n")
|
||||
print('\n*************************************\n')
|
||||
print("Model size %i" % sum(param.numel() for param in net.parameters() if param.requires_grad))
|
||||
print('\n*************************************\n')
|
||||
|
||||
# Define a trainer class
|
||||
trainer = ModelTrainer(net, config, chkp_path=chosen_chkp)
|
||||
print("Done in {:.1f}s\n".format(time.time() - t1))
|
||||
print('Done in {:.1f}s\n'.format(time.time() - t1))
|
||||
|
||||
print("\nStart training")
|
||||
print("**************")
|
||||
print('\nStart training')
|
||||
print('**************')
|
||||
|
||||
# Training
|
||||
trainer.train(net, training_loader, test_loader, config)
|
||||
|
||||
print("Forcing exit now")
|
||||
print('Forcing exit now')
|
||||
os.kill(os.getpid(), signal.SIGINT)
|
||||
|
|
130
train_S3DIS.py
130
train_S3DIS.py
|
@ -26,7 +26,7 @@ import signal
|
|||
import os
|
||||
|
||||
# Dataset
|
||||
from datasetss.S3DIS import *
|
||||
from datasets.S3DIS import *
|
||||
from torch.utils.data import DataLoader
|
||||
|
||||
from utils.config import Config
|
||||
|
@ -40,7 +40,6 @@ from models.architectures import KPFCNN
|
|||
# \******************/
|
||||
#
|
||||
|
||||
|
||||
class S3DISConfig(Config):
|
||||
"""
|
||||
Override the parameters you want to modify for this dataset
|
||||
|
@ -51,13 +50,13 @@ class S3DISConfig(Config):
|
|||
####################
|
||||
|
||||
# Dataset name
|
||||
dataset = "S3DIS"
|
||||
dataset = 'S3DIS'
|
||||
|
||||
# Number of classes in the dataset (This value is overwritten by dataset class when Initializating dataset).
|
||||
num_classes = None
|
||||
|
||||
# Type of task performed on this dataset (also overwritten)
|
||||
dataset_task = ""
|
||||
dataset_task = ''
|
||||
|
||||
# Number of CPU threads for the input pipeline
|
||||
input_threads = 10
|
||||
|
@ -67,30 +66,28 @@ class S3DISConfig(Config):
|
|||
#########################
|
||||
|
||||
# # Define layers
|
||||
architecture = [
|
||||
"simple",
|
||||
"resnetb",
|
||||
"resnetb_strided",
|
||||
"resnetb",
|
||||
"resnetb",
|
||||
"resnetb_strided",
|
||||
"resnetb",
|
||||
"resnetb",
|
||||
"resnetb_strided",
|
||||
"resnetb_deformable",
|
||||
"resnetb_deformable",
|
||||
"resnetb_deformable_strided",
|
||||
"resnetb_deformable",
|
||||
"resnetb_deformable",
|
||||
"nearest_upsample",
|
||||
"unary",
|
||||
"nearest_upsample",
|
||||
"unary",
|
||||
"nearest_upsample",
|
||||
"unary",
|
||||
"nearest_upsample",
|
||||
"unary",
|
||||
]
|
||||
architecture = ['simple',
|
||||
'resnetb',
|
||||
'resnetb_strided',
|
||||
'resnetb',
|
||||
'resnetb',
|
||||
'resnetb_strided',
|
||||
'resnetb',
|
||||
'resnetb',
|
||||
'resnetb_strided',
|
||||
'resnetb_deformable',
|
||||
'resnetb_deformable',
|
||||
'resnetb_deformable_strided',
|
||||
'resnetb_deformable',
|
||||
'resnetb_deformable',
|
||||
'nearest_upsample',
|
||||
'unary',
|
||||
'nearest_upsample',
|
||||
'unary',
|
||||
'nearest_upsample',
|
||||
'unary',
|
||||
'nearest_upsample',
|
||||
'unary']
|
||||
|
||||
# Define layers
|
||||
# architecture = ['simple',
|
||||
|
@ -139,10 +136,10 @@ class S3DISConfig(Config):
|
|||
KP_extent = 1.2
|
||||
|
||||
# Behavior of convolutions in ('constant', 'linear', 'gaussian')
|
||||
KP_influence = "linear"
|
||||
KP_influence = 'linear'
|
||||
|
||||
# Aggregation function of KPConv in ('closest', 'sum')
|
||||
aggregation_mode = "sum"
|
||||
aggregation_mode = 'sum'
|
||||
|
||||
# Choice of input features
|
||||
first_features_dim = 128
|
||||
|
@ -158,7 +155,7 @@ class S3DISConfig(Config):
|
|||
# Deformable offset loss
|
||||
# 'point2point' fitting geometry by penalizing distance from deform point to input points
|
||||
# 'point2plane' fitting geometry by penalizing distance from deform point to input point triplet (not implemented)
|
||||
deform_fitting_mode = "point2point"
|
||||
deform_fitting_mode = 'point2point'
|
||||
deform_fitting_power = 1.0 # Multiplier for the fitting/repulsive loss
|
||||
deform_lr_factor = 0.1 # Multiplier for learning rate applied to the deformations
|
||||
repulse_extent = 1.2 # Distance of repulsion for deformed kernel points
|
||||
|
@ -191,7 +188,7 @@ class S3DISConfig(Config):
|
|||
# Augmentations
|
||||
augment_scale_anisotropic = True
|
||||
augment_symmetries = [True, False, False]
|
||||
augment_rotation = "vertical"
|
||||
augment_rotation = 'vertical'
|
||||
augment_scale_min = 0.9
|
||||
augment_scale_max = 1.1
|
||||
augment_noise = 0.001
|
||||
|
@ -201,7 +198,7 @@ class S3DISConfig(Config):
|
|||
# > 'none': Each point in the whole batch has the same contribution.
|
||||
# > 'class': Each class has the same contribution (points are weighted according to class balance)
|
||||
# > 'batch': Each cloud in the batch has the same contribution (points are weighted according cloud sizes)
|
||||
segloss_balance = "none"
|
||||
segloss_balance = 'none'
|
||||
|
||||
# Do we nee to save convergence
|
||||
saving = True
|
||||
|
@ -214,16 +211,17 @@ class S3DISConfig(Config):
|
|||
# \***************/
|
||||
#
|
||||
|
||||
if __name__ == "__main__":
|
||||
if __name__ == '__main__':
|
||||
|
||||
############################
|
||||
# Initialize the environment
|
||||
############################
|
||||
|
||||
# Set which gpu is going to be used
|
||||
GPU_ID = "0"
|
||||
GPU_ID = '0'
|
||||
|
||||
# Set GPU visible device
|
||||
os.environ["CUDA_VISIBLE_DEVICES"] = GPU_ID
|
||||
os.environ['CUDA_VISIBLE_DEVICES'] = GPU_ID
|
||||
|
||||
###############
|
||||
# Previous chkp
|
||||
|
@ -231,23 +229,22 @@ if __name__ == "__main__":
|
|||
|
||||
# Choose here if you want to start training from a previous snapshot (None for new training)
|
||||
# previous_training_path = 'Log_2020-03-19_19-53-27'
|
||||
previous_training_path = ""
|
||||
previous_training_path = ''
|
||||
|
||||
# Choose index of checkpoint to start from. If None, uses the latest chkp
|
||||
chkp_idx = None
|
||||
if previous_training_path:
|
||||
|
||||
# Find all snapshot in the chosen training folder
|
||||
chkp_path = os.path.join("results", previous_training_path, "checkpoints")
|
||||
chkps = [f for f in os.listdir(chkp_path) if f[:4] == "chkp"]
|
||||
chkp_path = os.path.join('results', previous_training_path, 'checkpoints')
|
||||
chkps = [f for f in os.listdir(chkp_path) if f[:4] == 'chkp']
|
||||
|
||||
# Find which snapshot to restore
|
||||
if chkp_idx is None:
|
||||
chosen_chkp = "current_chkp.tar"
|
||||
chosen_chkp = 'current_chkp.tar'
|
||||
else:
|
||||
chosen_chkp = np.sort(chkps)[chkp_idx]
|
||||
chosen_chkp = os.path.join(
|
||||
"results", previous_training_path, "checkpoints", chosen_chkp
|
||||
)
|
||||
chosen_chkp = os.path.join('results', previous_training_path, 'checkpoints', chosen_chkp)
|
||||
|
||||
else:
|
||||
chosen_chkp = None
|
||||
|
@ -257,13 +254,13 @@ if __name__ == "__main__":
|
|||
##############
|
||||
|
||||
print()
|
||||
print("Data Preparation")
|
||||
print("****************")
|
||||
print('Data Preparation')
|
||||
print('****************')
|
||||
|
||||
# Initialize configuration class
|
||||
config = S3DISConfig()
|
||||
if previous_training_path:
|
||||
config.load(os.path.join("results", previous_training_path))
|
||||
config.load(os.path.join('results', previous_training_path))
|
||||
config.saving_path = None
|
||||
|
||||
# Get path from argument if given
|
||||
|
@ -271,30 +268,26 @@ if __name__ == "__main__":
|
|||
config.saving_path = sys.argv[1]
|
||||
|
||||
# Initialize datasets
|
||||
training_dataset = S3DISDataset(config, set="training", use_potentials=True)
|
||||
test_dataset = S3DISDataset(config, set="validation", use_potentials=True)
|
||||
training_dataset = S3DISDataset(config, set='training', use_potentials=True)
|
||||
test_dataset = S3DISDataset(config, set='validation', use_potentials=True)
|
||||
|
||||
# Initialize samplers
|
||||
training_sampler = S3DISSampler(training_dataset)
|
||||
test_sampler = S3DISSampler(test_dataset)
|
||||
|
||||
# Initialize the dataloader
|
||||
training_loader = DataLoader(
|
||||
training_dataset,
|
||||
training_loader = DataLoader(training_dataset,
|
||||
batch_size=1,
|
||||
sampler=training_sampler,
|
||||
collate_fn=S3DISCollate,
|
||||
num_workers=config.input_threads,
|
||||
pin_memory=True,
|
||||
)
|
||||
test_loader = DataLoader(
|
||||
test_dataset,
|
||||
pin_memory=True)
|
||||
test_loader = DataLoader(test_dataset,
|
||||
batch_size=1,
|
||||
sampler=test_sampler,
|
||||
collate_fn=S3DISCollate,
|
||||
num_workers=config.input_threads,
|
||||
pin_memory=True,
|
||||
)
|
||||
pin_memory=True)
|
||||
|
||||
# Calibrate samplers
|
||||
training_sampler.calibration(training_loader, verbose=True)
|
||||
|
@ -305,8 +298,8 @@ if __name__ == "__main__":
|
|||
# debug_timing(test_dataset, test_loader)
|
||||
# debug_upsampling(training_dataset, training_loader)
|
||||
|
||||
print("\nModel Preparation")
|
||||
print("*****************")
|
||||
print('\nModel Preparation')
|
||||
print('*****************')
|
||||
|
||||
# Define network model
|
||||
t1 = time.time()
|
||||
|
@ -314,28 +307,25 @@ if __name__ == "__main__":
|
|||
|
||||
debug = False
|
||||
if debug:
|
||||
print("\n*************************************\n")
|
||||
print('\n*************************************\n')
|
||||
print(net)
|
||||
print("\n*************************************\n")
|
||||
print('\n*************************************\n')
|
||||
for param in net.parameters():
|
||||
if param.requires_grad:
|
||||
print(param.shape)
|
||||
print("\n*************************************\n")
|
||||
print(
|
||||
"Model size %i"
|
||||
% sum(param.numel() for param in net.parameters() if param.requires_grad)
|
||||
)
|
||||
print("\n*************************************\n")
|
||||
print('\n*************************************\n')
|
||||
print("Model size %i" % sum(param.numel() for param in net.parameters() if param.requires_grad))
|
||||
print('\n*************************************\n')
|
||||
|
||||
# Define a trainer class
|
||||
trainer = ModelTrainer(net, config, chkp_path=chosen_chkp)
|
||||
print("Done in {:.1f}s\n".format(time.time() - t1))
|
||||
print('Done in {:.1f}s\n'.format(time.time() - t1))
|
||||
|
||||
print("\nStart training")
|
||||
print("**************")
|
||||
print('\nStart training')
|
||||
print('**************')
|
||||
|
||||
# Training
|
||||
trainer.train(net, training_loader, test_loader, config)
|
||||
|
||||
print("Forcing exit now")
|
||||
print('Forcing exit now')
|
||||
os.kill(os.getpid(), signal.SIGINT)
|
||||
|
|
|
@ -26,9 +26,10 @@ import signal
|
|||
import os
|
||||
import numpy as np
|
||||
import sys
|
||||
import torch
|
||||
|
||||
# Dataset
|
||||
from datasetss.SemanticKitti import *
|
||||
from datasets.SemanticKitti import *
|
||||
from torch.utils.data import DataLoader
|
||||
|
||||
from utils.config import Config
|
||||
|
@ -42,7 +43,6 @@ from models.architectures import KPFCNN
|
|||
# \******************/
|
||||
#
|
||||
|
||||
|
||||
class SemanticKittiConfig(Config):
|
||||
"""
|
||||
Override the parameters you want to modify for this dataset
|
||||
|
@ -53,13 +53,13 @@ class SemanticKittiConfig(Config):
|
|||
####################
|
||||
|
||||
# Dataset name
|
||||
dataset = "SemanticKitti"
|
||||
dataset = 'SemanticKitti'
|
||||
|
||||
# Number of classes in the dataset (This value is overwritten by dataset class when Initializating dataset).
|
||||
num_classes = None
|
||||
|
||||
# Type of task performed on this dataset (also overwritten)
|
||||
dataset_task = ""
|
||||
dataset_task = ''
|
||||
|
||||
# Number of CPU threads for the input pipeline
|
||||
input_threads = 10
|
||||
|
@ -69,29 +69,27 @@ class SemanticKittiConfig(Config):
|
|||
#########################
|
||||
|
||||
# Define layers
|
||||
architecture = [
|
||||
"simple",
|
||||
"resnetb",
|
||||
"resnetb_strided",
|
||||
"resnetb",
|
||||
"resnetb",
|
||||
"resnetb_strided",
|
||||
"resnetb",
|
||||
"resnetb",
|
||||
"resnetb_strided",
|
||||
"resnetb",
|
||||
"resnetb",
|
||||
"resnetb_strided",
|
||||
"resnetb",
|
||||
"nearest_upsample",
|
||||
"unary",
|
||||
"nearest_upsample",
|
||||
"unary",
|
||||
"nearest_upsample",
|
||||
"unary",
|
||||
"nearest_upsample",
|
||||
"unary",
|
||||
]
|
||||
architecture = ['simple',
|
||||
'resnetb',
|
||||
'resnetb_strided',
|
||||
'resnetb',
|
||||
'resnetb',
|
||||
'resnetb_strided',
|
||||
'resnetb',
|
||||
'resnetb',
|
||||
'resnetb_strided',
|
||||
'resnetb',
|
||||
'resnetb',
|
||||
'resnetb_strided',
|
||||
'resnetb',
|
||||
'nearest_upsample',
|
||||
'unary',
|
||||
'nearest_upsample',
|
||||
'unary',
|
||||
'nearest_upsample',
|
||||
'unary',
|
||||
'nearest_upsample',
|
||||
'unary']
|
||||
|
||||
###################
|
||||
# KPConv parameters
|
||||
|
@ -124,10 +122,10 @@ class SemanticKittiConfig(Config):
|
|||
KP_extent = 1.2
|
||||
|
||||
# Behavior of convolutions in ('constant', 'linear', 'gaussian')
|
||||
KP_influence = "linear"
|
||||
KP_influence = 'linear'
|
||||
|
||||
# Aggregation function of KPConv in ('closest', 'sum')
|
||||
aggregation_mode = "sum"
|
||||
aggregation_mode = 'sum'
|
||||
|
||||
# Choice of input features
|
||||
first_features_dim = 128
|
||||
|
@ -143,7 +141,7 @@ class SemanticKittiConfig(Config):
|
|||
# Deformable offset loss
|
||||
# 'point2point' fitting geometry by penalizing distance from deform point to input points
|
||||
# 'point2plane' fitting geometry by penalizing distance from deform point to input point triplet (not implemented)
|
||||
deform_fitting_mode = "point2point"
|
||||
deform_fitting_mode = 'point2point'
|
||||
deform_fitting_power = 1.0 # Multiplier for the fitting/repulsive loss
|
||||
deform_lr_factor = 0.1 # Multiplier for learning rate applied to the deformations
|
||||
repulse_extent = 1.2 # Distance of repulsion for deformed kernel points
|
||||
|
@ -173,7 +171,7 @@ class SemanticKittiConfig(Config):
|
|||
# Augmentations
|
||||
augment_scale_anisotropic = True
|
||||
augment_symmetries = [True, False, False]
|
||||
augment_rotation = "vertical"
|
||||
augment_rotation = 'vertical'
|
||||
augment_scale_min = 0.8
|
||||
augment_scale_max = 1.2
|
||||
augment_noise = 0.001
|
||||
|
@ -204,16 +202,17 @@ class SemanticKittiConfig(Config):
|
|||
# \***************/
|
||||
#
|
||||
|
||||
if __name__ == "__main__":
|
||||
if __name__ == '__main__':
|
||||
|
||||
############################
|
||||
# Initialize the environment
|
||||
############################
|
||||
|
||||
# Set which gpu is going to be used
|
||||
GPU_ID = "0"
|
||||
GPU_ID = '0'
|
||||
|
||||
# Set GPU visible device
|
||||
os.environ["CUDA_VISIBLE_DEVICES"] = GPU_ID
|
||||
os.environ['CUDA_VISIBLE_DEVICES'] = GPU_ID
|
||||
|
||||
###############
|
||||
# Previous chkp
|
||||
|
@ -221,23 +220,22 @@ if __name__ == "__main__":
|
|||
|
||||
# Choose here if you want to start training from a previous snapshot (None for new training)
|
||||
# previous_training_path = 'Log_2020-03-19_19-53-27'
|
||||
previous_training_path = ""
|
||||
previous_training_path = ''
|
||||
|
||||
# Choose index of checkpoint to start from. If None, uses the latest chkp
|
||||
chkp_idx = None
|
||||
if previous_training_path:
|
||||
|
||||
# Find all snapshot in the chosen training folder
|
||||
chkp_path = os.path.join("results", previous_training_path, "checkpoints")
|
||||
chkps = [f for f in os.listdir(chkp_path) if f[:4] == "chkp"]
|
||||
chkp_path = os.path.join('results', previous_training_path, 'checkpoints')
|
||||
chkps = [f for f in os.listdir(chkp_path) if f[:4] == 'chkp']
|
||||
|
||||
# Find which snapshot to restore
|
||||
if chkp_idx is None:
|
||||
chosen_chkp = "current_chkp.tar"
|
||||
chosen_chkp = 'current_chkp.tar'
|
||||
else:
|
||||
chosen_chkp = np.sort(chkps)[chkp_idx]
|
||||
chosen_chkp = os.path.join(
|
||||
"results", previous_training_path, "checkpoints", chosen_chkp
|
||||
)
|
||||
chosen_chkp = os.path.join('results', previous_training_path, 'checkpoints', chosen_chkp)
|
||||
|
||||
else:
|
||||
chosen_chkp = None
|
||||
|
@ -247,13 +245,13 @@ if __name__ == "__main__":
|
|||
##############
|
||||
|
||||
print()
|
||||
print("Data Preparation")
|
||||
print("****************")
|
||||
print('Data Preparation')
|
||||
print('****************')
|
||||
|
||||
# Initialize configuration class
|
||||
config = SemanticKittiConfig()
|
||||
if previous_training_path:
|
||||
config.load(os.path.join("results", previous_training_path))
|
||||
config.load(os.path.join('results', previous_training_path))
|
||||
config.saving_path = None
|
||||
|
||||
# Get path from argument if given
|
||||
|
@ -261,32 +259,28 @@ if __name__ == "__main__":
|
|||
config.saving_path = sys.argv[1]
|
||||
|
||||
# Initialize datasets
|
||||
training_dataset = SemanticKittiDataset(
|
||||
config, set="training", balance_classes=True
|
||||
)
|
||||
test_dataset = SemanticKittiDataset(config, set="validation", balance_classes=False)
|
||||
training_dataset = SemanticKittiDataset(config, set='training',
|
||||
balance_classes=True)
|
||||
test_dataset = SemanticKittiDataset(config, set='validation',
|
||||
balance_classes=False)
|
||||
|
||||
# Initialize samplers
|
||||
training_sampler = SemanticKittiSampler(training_dataset)
|
||||
test_sampler = SemanticKittiSampler(test_dataset)
|
||||
|
||||
# Initialize the dataloader
|
||||
training_loader = DataLoader(
|
||||
training_dataset,
|
||||
training_loader = DataLoader(training_dataset,
|
||||
batch_size=1,
|
||||
sampler=training_sampler,
|
||||
collate_fn=SemanticKittiCollate,
|
||||
num_workers=config.input_threads,
|
||||
pin_memory=True,
|
||||
)
|
||||
test_loader = DataLoader(
|
||||
test_dataset,
|
||||
pin_memory=True)
|
||||
test_loader = DataLoader(test_dataset,
|
||||
batch_size=1,
|
||||
sampler=test_sampler,
|
||||
collate_fn=SemanticKittiCollate,
|
||||
num_workers=config.input_threads,
|
||||
pin_memory=True,
|
||||
)
|
||||
pin_memory=True)
|
||||
|
||||
# Calibrate max_in_point value
|
||||
training_sampler.calib_max_in(config, training_loader, verbose=True)
|
||||
|
@ -300,8 +294,8 @@ if __name__ == "__main__":
|
|||
# debug_timing(test_dataset, test_loader)
|
||||
# debug_class_w(training_dataset, training_loader)
|
||||
|
||||
print("\nModel Preparation")
|
||||
print("*****************")
|
||||
print('\nModel Preparation')
|
||||
print('*****************')
|
||||
|
||||
# Define network model
|
||||
t1 = time.time()
|
||||
|
@ -309,28 +303,25 @@ if __name__ == "__main__":
|
|||
|
||||
debug = False
|
||||
if debug:
|
||||
print("\n*************************************\n")
|
||||
print('\n*************************************\n')
|
||||
print(net)
|
||||
print("\n*************************************\n")
|
||||
print('\n*************************************\n')
|
||||
for param in net.parameters():
|
||||
if param.requires_grad:
|
||||
print(param.shape)
|
||||
print("\n*************************************\n")
|
||||
print(
|
||||
"Model size %i"
|
||||
% sum(param.numel() for param in net.parameters() if param.requires_grad)
|
||||
)
|
||||
print("\n*************************************\n")
|
||||
print('\n*************************************\n')
|
||||
print("Model size %i" % sum(param.numel() for param in net.parameters() if param.requires_grad))
|
||||
print('\n*************************************\n')
|
||||
|
||||
# Define a trainer class
|
||||
trainer = ModelTrainer(net, config, chkp_path=chosen_chkp)
|
||||
print("Done in {:.1f}s\n".format(time.time() - t1))
|
||||
print('Done in {:.1f}s\n'.format(time.time() - t1))
|
||||
|
||||
print("\nStart training")
|
||||
print("**************")
|
||||
print('\nStart training')
|
||||
print('**************')
|
||||
|
||||
# Training
|
||||
trainer.train(net, training_loader, test_loader, config)
|
||||
|
||||
print("Forcing exit now")
|
||||
print('Forcing exit now')
|
||||
os.kill(os.getpid(), signal.SIGINT)
|
||||
|
|
280
utils/config.py
280
utils/config.py
|
@ -21,14 +21,14 @@ import numpy as np
|
|||
|
||||
# Colors for printing
|
||||
class bcolors:
|
||||
HEADER = "\033[95m"
|
||||
OKBLUE = "\033[94m"
|
||||
OKGREEN = "\033[92m"
|
||||
WARNING = "\033[93m"
|
||||
FAIL = "\033[91m"
|
||||
ENDC = "\033[0m"
|
||||
BOLD = "\033[1m"
|
||||
UNDERLINE = "\033[4m"
|
||||
HEADER = '\033[95m'
|
||||
OKBLUE = '\033[94m'
|
||||
OKGREEN = '\033[92m'
|
||||
WARNING = '\033[93m'
|
||||
FAIL = '\033[91m'
|
||||
ENDC = '\033[0m'
|
||||
BOLD = '\033[1m'
|
||||
UNDERLINE = '\033[4m'
|
||||
|
||||
|
||||
class Config:
|
||||
|
@ -41,10 +41,10 @@ class Config:
|
|||
##################
|
||||
|
||||
# Dataset name
|
||||
dataset = ""
|
||||
dataset = ''
|
||||
|
||||
# Type of network model
|
||||
dataset_task = ""
|
||||
dataset_task = ''
|
||||
|
||||
# Number of classes in the dataset
|
||||
num_classes = 0
|
||||
|
@ -69,8 +69,8 @@ class Config:
|
|||
architecture = []
|
||||
|
||||
# Decide the mode of equivariance and invariance
|
||||
equivar_mode = ""
|
||||
invar_mode = ""
|
||||
equivar_mode = ''
|
||||
invar_mode = ''
|
||||
|
||||
# Dimension of the first feature maps
|
||||
first_features_dim = 64
|
||||
|
@ -102,14 +102,14 @@ class Config:
|
|||
KP_extent = 1.0
|
||||
|
||||
# Influence function when d < KP_extent. ('constant', 'linear', 'gaussian') When d > KP_extent, always zero
|
||||
KP_influence = "linear"
|
||||
KP_influence = 'linear'
|
||||
|
||||
# Aggregation function of KPConv in ('closest', 'sum')
|
||||
# Decide if you sum all kernel point influences, or if you only take the influence of the closest KP
|
||||
aggregation_mode = "sum"
|
||||
aggregation_mode = 'sum'
|
||||
|
||||
# Fixed points in the kernel : 'none', 'center' or 'verticals'
|
||||
fixed_kernel_points = "center"
|
||||
fixed_kernel_points = 'center'
|
||||
|
||||
# Use modulateion in deformable convolutions
|
||||
modulated = False
|
||||
|
@ -141,12 +141,12 @@ class Config:
|
|||
augment_scale_min = 0.9
|
||||
augment_scale_max = 1.1
|
||||
augment_symmetries = [False, False, False]
|
||||
augment_rotation = "vertical"
|
||||
augment_rotation = 'vertical'
|
||||
augment_noise = 0.005
|
||||
augment_color = 0.7
|
||||
|
||||
# Augment with occlusions (not implemented yet)
|
||||
augment_occlusion = "none"
|
||||
augment_occlusion = 'none'
|
||||
augment_occlusion_ratio = 0.2
|
||||
augment_occlusion_num = 1
|
||||
|
||||
|
@ -154,7 +154,7 @@ class Config:
|
|||
weight_decay = 1e-3
|
||||
|
||||
# The way we balance segmentation loss DEPRECATED
|
||||
segloss_balance = "none"
|
||||
segloss_balance = 'none'
|
||||
|
||||
# Choose weights for class (used in segmentation loss). Empty list for no weights
|
||||
class_w = []
|
||||
|
@ -162,7 +162,7 @@ class Config:
|
|||
# Deformable offset loss
|
||||
# 'point2point' fitting geometry by penalizing distance from deform point to input points
|
||||
# 'point2plane' fitting geometry by penalizing distance from deform point to input point triplet (not implemented)
|
||||
deform_fitting_mode = "point2point"
|
||||
deform_fitting_mode = 'point2point'
|
||||
deform_fitting_power = 1.0 # Multiplier for the fitting/repulsive loss
|
||||
deform_lr_factor = 0.1 # Multiplier for learning rate applied to the deformations
|
||||
repulse_extent = 1.0 # Distance of repulsion for deformed kernel points
|
||||
|
@ -193,16 +193,7 @@ class Config:
|
|||
"""
|
||||
|
||||
# Number of layers
|
||||
self.num_layers = (
|
||||
len(
|
||||
[
|
||||
block
|
||||
for block in self.architecture
|
||||
if "pool" in block or "strided" in block
|
||||
]
|
||||
)
|
||||
+ 1
|
||||
)
|
||||
self.num_layers = len([block for block in self.architecture if 'pool' in block or 'strided' in block]) + 1
|
||||
|
||||
###################
|
||||
# Deform layer list
|
||||
|
@ -215,13 +206,9 @@ class Config:
|
|||
self.deform_layers = []
|
||||
arch = self.architecture
|
||||
for block_i, block in enumerate(arch):
|
||||
|
||||
# Get all blocks of the layer
|
||||
if not (
|
||||
"pool" in block
|
||||
or "strided" in block
|
||||
or "global" in block
|
||||
or "upsample" in block
|
||||
):
|
||||
if not ('pool' in block or 'strided' in block or 'global' in block or 'upsample' in block):
|
||||
layer_blocks += [block]
|
||||
continue
|
||||
|
||||
|
@ -230,51 +217,50 @@ class Config:
|
|||
|
||||
deform_layer = False
|
||||
if layer_blocks:
|
||||
if np.any(["deformable" in blck for blck in layer_blocks]):
|
||||
if np.any(['deformable' in blck for blck in layer_blocks]):
|
||||
deform_layer = True
|
||||
|
||||
if "pool" in block or "strided" in block:
|
||||
if "deformable" in block:
|
||||
if 'pool' in block or 'strided' in block:
|
||||
if 'deformable' in block:
|
||||
deform_layer = True
|
||||
|
||||
self.deform_layers += [deform_layer]
|
||||
layer_blocks = []
|
||||
|
||||
# Stop when meeting a global pooling or upsampling
|
||||
if "global" in block or "upsample" in block:
|
||||
if 'global' in block or 'upsample' in block:
|
||||
break
|
||||
|
||||
def load(self, path):
|
||||
filename = join(path, "parameters.txt")
|
||||
with open(filename, "r") as f:
|
||||
|
||||
filename = join(path, 'parameters.txt')
|
||||
with open(filename, 'r') as f:
|
||||
lines = f.readlines()
|
||||
|
||||
# Class variable dictionary
|
||||
for line in lines:
|
||||
line_info = line.split()
|
||||
if len(line_info) > 2 and line_info[0] != "#":
|
||||
if line_info[2] == "None":
|
||||
if len(line_info) > 2 and line_info[0] != '#':
|
||||
|
||||
if line_info[2] == 'None':
|
||||
setattr(self, line_info[0], None)
|
||||
|
||||
elif line_info[0] == "lr_decay_epochs":
|
||||
self.lr_decays = {
|
||||
int(b.split(":")[0]): float(b.split(":")[1])
|
||||
for b in line_info[2:]
|
||||
}
|
||||
elif line_info[0] == 'lr_decay_epochs':
|
||||
self.lr_decays = {int(b.split(':')[0]): float(b.split(':')[1]) for b in line_info[2:]}
|
||||
|
||||
elif line_info[0] == "architecture":
|
||||
elif line_info[0] == 'architecture':
|
||||
self.architecture = [b for b in line_info[2:]]
|
||||
|
||||
elif line_info[0] == "augment_symmetries":
|
||||
elif line_info[0] == 'augment_symmetries':
|
||||
self.augment_symmetries = [bool(int(b)) for b in line_info[2:]]
|
||||
|
||||
elif line_info[0] == "num_classes":
|
||||
elif line_info[0] == 'num_classes':
|
||||
if len(line_info) > 3:
|
||||
self.num_classes = [int(c) for c in line_info[2:]]
|
||||
else:
|
||||
self.num_classes = int(line_info[2])
|
||||
|
||||
elif line_info[0] == "class_w":
|
||||
elif line_info[0] == 'class_w':
|
||||
self.class_w = [float(w) for w in line_info[2:]]
|
||||
|
||||
elif hasattr(self, line_info[0]):
|
||||
|
@ -289,132 +275,108 @@ class Config:
|
|||
self.__init__()
|
||||
|
||||
def save(self):
|
||||
with open(join(self.saving_path, "parameters.txt"), "w") as text_file:
|
||||
text_file.write("# -----------------------------------#\n")
|
||||
text_file.write("# Parameters of the training session #\n")
|
||||
text_file.write("# -----------------------------------#\n\n")
|
||||
|
||||
with open(join(self.saving_path, 'parameters.txt'), "w") as text_file:
|
||||
|
||||
text_file.write('# -----------------------------------#\n')
|
||||
text_file.write('# Parameters of the training session #\n')
|
||||
text_file.write('# -----------------------------------#\n\n')
|
||||
|
||||
# Input parameters
|
||||
text_file.write("# Input parameters\n")
|
||||
text_file.write("# ****************\n\n")
|
||||
text_file.write("dataset = {:s}\n".format(self.dataset))
|
||||
text_file.write("dataset_task = {:s}\n".format(self.dataset_task))
|
||||
text_file.write('# Input parameters\n')
|
||||
text_file.write('# ****************\n\n')
|
||||
text_file.write('dataset = {:s}\n'.format(self.dataset))
|
||||
text_file.write('dataset_task = {:s}\n'.format(self.dataset_task))
|
||||
if type(self.num_classes) is list:
|
||||
text_file.write("num_classes =")
|
||||
text_file.write('num_classes =')
|
||||
for n in self.num_classes:
|
||||
text_file.write(" {:d}".format(n))
|
||||
text_file.write("\n")
|
||||
text_file.write(' {:d}'.format(n))
|
||||
text_file.write('\n')
|
||||
else:
|
||||
text_file.write("num_classes = {:d}\n".format(self.num_classes))
|
||||
text_file.write("in_points_dim = {:d}\n".format(self.in_points_dim))
|
||||
text_file.write("in_features_dim = {:d}\n".format(self.in_features_dim))
|
||||
text_file.write("in_radius = {:.6f}\n".format(self.in_radius))
|
||||
text_file.write("input_threads = {:d}\n\n".format(self.input_threads))
|
||||
text_file.write('num_classes = {:d}\n'.format(self.num_classes))
|
||||
text_file.write('in_points_dim = {:d}\n'.format(self.in_points_dim))
|
||||
text_file.write('in_features_dim = {:d}\n'.format(self.in_features_dim))
|
||||
text_file.write('in_radius = {:.6f}\n'.format(self.in_radius))
|
||||
text_file.write('input_threads = {:d}\n\n'.format(self.input_threads))
|
||||
|
||||
# Model parameters
|
||||
text_file.write("# Model parameters\n")
|
||||
text_file.write("# ****************\n\n")
|
||||
text_file.write('# Model parameters\n')
|
||||
text_file.write('# ****************\n\n')
|
||||
|
||||
text_file.write("architecture =")
|
||||
text_file.write('architecture =')
|
||||
for a in self.architecture:
|
||||
text_file.write(" {:s}".format(a))
|
||||
text_file.write("\n")
|
||||
text_file.write("equivar_mode = {:s}\n".format(self.equivar_mode))
|
||||
text_file.write("invar_mode = {:s}\n".format(self.invar_mode))
|
||||
text_file.write("num_layers = {:d}\n".format(self.num_layers))
|
||||
text_file.write(
|
||||
"first_features_dim = {:d}\n".format(self.first_features_dim)
|
||||
)
|
||||
text_file.write("use_batch_norm = {:d}\n".format(int(self.use_batch_norm)))
|
||||
text_file.write(
|
||||
"batch_norm_momentum = {:.6f}\n\n".format(self.batch_norm_momentum)
|
||||
)
|
||||
text_file.write(
|
||||
"segmentation_ratio = {:.6f}\n\n".format(self.segmentation_ratio)
|
||||
)
|
||||
text_file.write(' {:s}'.format(a))
|
||||
text_file.write('\n')
|
||||
text_file.write('equivar_mode = {:s}\n'.format(self.equivar_mode))
|
||||
text_file.write('invar_mode = {:s}\n'.format(self.invar_mode))
|
||||
text_file.write('num_layers = {:d}\n'.format(self.num_layers))
|
||||
text_file.write('first_features_dim = {:d}\n'.format(self.first_features_dim))
|
||||
text_file.write('use_batch_norm = {:d}\n'.format(int(self.use_batch_norm)))
|
||||
text_file.write('batch_norm_momentum = {:.6f}\n\n'.format(self.batch_norm_momentum))
|
||||
text_file.write('segmentation_ratio = {:.6f}\n\n'.format(self.segmentation_ratio))
|
||||
|
||||
# KPConv parameters
|
||||
text_file.write("# KPConv parameters\n")
|
||||
text_file.write("# *****************\n\n")
|
||||
text_file.write('# KPConv parameters\n')
|
||||
text_file.write('# *****************\n\n')
|
||||
|
||||
text_file.write(
|
||||
"first_subsampling_dl = {:.6f}\n".format(self.first_subsampling_dl)
|
||||
)
|
||||
text_file.write("num_kernel_points = {:d}\n".format(self.num_kernel_points))
|
||||
text_file.write("conv_radius = {:.6f}\n".format(self.conv_radius))
|
||||
text_file.write("deform_radius = {:.6f}\n".format(self.deform_radius))
|
||||
text_file.write(
|
||||
"fixed_kernel_points = {:s}\n".format(self.fixed_kernel_points)
|
||||
)
|
||||
text_file.write("KP_extent = {:.6f}\n".format(self.KP_extent))
|
||||
text_file.write("KP_influence = {:s}\n".format(self.KP_influence))
|
||||
text_file.write("aggregation_mode = {:s}\n".format(self.aggregation_mode))
|
||||
text_file.write("modulated = {:d}\n".format(int(self.modulated)))
|
||||
text_file.write("n_frames = {:d}\n".format(self.n_frames))
|
||||
text_file.write("max_in_points = {:d}\n\n".format(self.max_in_points))
|
||||
text_file.write("max_val_points = {:d}\n\n".format(self.max_val_points))
|
||||
text_file.write("val_radius = {:.6f}\n\n".format(self.val_radius))
|
||||
text_file.write('first_subsampling_dl = {:.6f}\n'.format(self.first_subsampling_dl))
|
||||
text_file.write('num_kernel_points = {:d}\n'.format(self.num_kernel_points))
|
||||
text_file.write('conv_radius = {:.6f}\n'.format(self.conv_radius))
|
||||
text_file.write('deform_radius = {:.6f}\n'.format(self.deform_radius))
|
||||
text_file.write('fixed_kernel_points = {:s}\n'.format(self.fixed_kernel_points))
|
||||
text_file.write('KP_extent = {:.6f}\n'.format(self.KP_extent))
|
||||
text_file.write('KP_influence = {:s}\n'.format(self.KP_influence))
|
||||
text_file.write('aggregation_mode = {:s}\n'.format(self.aggregation_mode))
|
||||
text_file.write('modulated = {:d}\n'.format(int(self.modulated)))
|
||||
text_file.write('n_frames = {:d}\n'.format(self.n_frames))
|
||||
text_file.write('max_in_points = {:d}\n\n'.format(self.max_in_points))
|
||||
text_file.write('max_val_points = {:d}\n\n'.format(self.max_val_points))
|
||||
text_file.write('val_radius = {:.6f}\n\n'.format(self.val_radius))
|
||||
|
||||
# Training parameters
|
||||
text_file.write("# Training parameters\n")
|
||||
text_file.write("# *******************\n\n")
|
||||
text_file.write('# Training parameters\n')
|
||||
text_file.write('# *******************\n\n')
|
||||
|
||||
text_file.write("learning_rate = {:f}\n".format(self.learning_rate))
|
||||
text_file.write("momentum = {:f}\n".format(self.momentum))
|
||||
text_file.write("lr_decay_epochs =")
|
||||
text_file.write('learning_rate = {:f}\n'.format(self.learning_rate))
|
||||
text_file.write('momentum = {:f}\n'.format(self.momentum))
|
||||
text_file.write('lr_decay_epochs =')
|
||||
for e, d in self.lr_decays.items():
|
||||
text_file.write(" {:d}:{:f}".format(e, d))
|
||||
text_file.write("\n")
|
||||
text_file.write("grad_clip_norm = {:f}\n\n".format(self.grad_clip_norm))
|
||||
text_file.write(' {:d}:{:f}'.format(e, d))
|
||||
text_file.write('\n')
|
||||
text_file.write('grad_clip_norm = {:f}\n\n'.format(self.grad_clip_norm))
|
||||
|
||||
text_file.write("augment_symmetries =")
|
||||
|
||||
text_file.write('augment_symmetries =')
|
||||
for a in self.augment_symmetries:
|
||||
text_file.write(" {:d}".format(int(a)))
|
||||
text_file.write("\n")
|
||||
text_file.write("augment_rotation = {:s}\n".format(self.augment_rotation))
|
||||
text_file.write("augment_noise = {:f}\n".format(self.augment_noise))
|
||||
text_file.write("augment_occlusion = {:s}\n".format(self.augment_occlusion))
|
||||
text_file.write(
|
||||
"augment_occlusion_ratio = {:.6f}\n".format(
|
||||
self.augment_occlusion_ratio
|
||||
)
|
||||
)
|
||||
text_file.write(
|
||||
"augment_occlusion_num = {:d}\n".format(self.augment_occlusion_num)
|
||||
)
|
||||
text_file.write(
|
||||
"augment_scale_anisotropic = {:d}\n".format(
|
||||
int(self.augment_scale_anisotropic)
|
||||
)
|
||||
)
|
||||
text_file.write(
|
||||
"augment_scale_min = {:.6f}\n".format(self.augment_scale_min)
|
||||
)
|
||||
text_file.write(
|
||||
"augment_scale_max = {:.6f}\n".format(self.augment_scale_max)
|
||||
)
|
||||
text_file.write("augment_color = {:.6f}\n\n".format(self.augment_color))
|
||||
text_file.write(' {:d}'.format(int(a)))
|
||||
text_file.write('\n')
|
||||
text_file.write('augment_rotation = {:s}\n'.format(self.augment_rotation))
|
||||
text_file.write('augment_noise = {:f}\n'.format(self.augment_noise))
|
||||
text_file.write('augment_occlusion = {:s}\n'.format(self.augment_occlusion))
|
||||
text_file.write('augment_occlusion_ratio = {:.6f}\n'.format(self.augment_occlusion_ratio))
|
||||
text_file.write('augment_occlusion_num = {:d}\n'.format(self.augment_occlusion_num))
|
||||
text_file.write('augment_scale_anisotropic = {:d}\n'.format(int(self.augment_scale_anisotropic)))
|
||||
text_file.write('augment_scale_min = {:.6f}\n'.format(self.augment_scale_min))
|
||||
text_file.write('augment_scale_max = {:.6f}\n'.format(self.augment_scale_max))
|
||||
text_file.write('augment_color = {:.6f}\n\n'.format(self.augment_color))
|
||||
|
||||
text_file.write("weight_decay = {:f}\n".format(self.weight_decay))
|
||||
text_file.write("segloss_balance = {:s}\n".format(self.segloss_balance))
|
||||
text_file.write("class_w =")
|
||||
text_file.write('weight_decay = {:f}\n'.format(self.weight_decay))
|
||||
text_file.write('segloss_balance = {:s}\n'.format(self.segloss_balance))
|
||||
text_file.write('class_w =')
|
||||
for a in self.class_w:
|
||||
text_file.write(" {:.6f}".format(a))
|
||||
text_file.write("\n")
|
||||
text_file.write(
|
||||
"deform_fitting_mode = {:s}\n".format(self.deform_fitting_mode)
|
||||
)
|
||||
text_file.write(
|
||||
"deform_fitting_power = {:.6f}\n".format(self.deform_fitting_power)
|
||||
)
|
||||
text_file.write("deform_lr_factor = {:.6f}\n".format(self.deform_lr_factor))
|
||||
text_file.write("repulse_extent = {:.6f}\n".format(self.repulse_extent))
|
||||
text_file.write("batch_num = {:d}\n".format(self.batch_num))
|
||||
text_file.write("val_batch_num = {:d}\n".format(self.val_batch_num))
|
||||
text_file.write("max_epoch = {:d}\n".format(self.max_epoch))
|
||||
text_file.write(' {:.6f}'.format(a))
|
||||
text_file.write('\n')
|
||||
text_file.write('deform_fitting_mode = {:s}\n'.format(self.deform_fitting_mode))
|
||||
text_file.write('deform_fitting_power = {:.6f}\n'.format(self.deform_fitting_power))
|
||||
text_file.write('deform_lr_factor = {:.6f}\n'.format(self.deform_lr_factor))
|
||||
text_file.write('repulse_extent = {:.6f}\n'.format(self.repulse_extent))
|
||||
text_file.write('batch_num = {:d}\n'.format(self.batch_num))
|
||||
text_file.write('val_batch_num = {:d}\n'.format(self.val_batch_num))
|
||||
text_file.write('max_epoch = {:d}\n'.format(self.max_epoch))
|
||||
if self.epoch_steps is None:
|
||||
text_file.write("epoch_steps = None\n")
|
||||
text_file.write('epoch_steps = None\n')
|
||||
else:
|
||||
text_file.write("epoch_steps = {:d}\n".format(self.epoch_steps))
|
||||
text_file.write("validation_size = {:d}\n".format(self.validation_size))
|
||||
text_file.write("checkpoint_gap = {:d}\n".format(self.checkpoint_gap))
|
||||
text_file.write('epoch_steps = {:d}\n'.format(self.epoch_steps))
|
||||
text_file.write('validation_size = {:d}\n'.format(self.validation_size))
|
||||
text_file.write('checkpoint_gap = {:d}\n'.format(self.checkpoint_gap))
|
||||
|
||||
|
|
|
@ -23,12 +23,20 @@
|
|||
|
||||
|
||||
# Basic libs
|
||||
import torch
|
||||
import numpy as np
|
||||
from sklearn.neighbors import KDTree
|
||||
from os import makedirs, remove, rename, listdir
|
||||
from os.path import exists, join
|
||||
import time
|
||||
|
||||
import sys
|
||||
|
||||
# PLY reader
|
||||
from utils.ply import write_ply, read_ply
|
||||
|
||||
# Configuration class
|
||||
from utils.config import Config
|
||||
|
||||
|
||||
def show_ModelNet_models(all_points):
|
||||
|
@ -39,7 +47,7 @@ def show_ModelNet_models(all_points):
|
|||
###########################
|
||||
|
||||
# Create figure for features
|
||||
fig1 = mlab.figure("Models", bgcolor=(1, 1, 1), size=(1000, 800))
|
||||
fig1 = mlab.figure('Models', bgcolor=(1, 1, 1), size=(1000, 800))
|
||||
fig1.scene.parallel_projection = False
|
||||
|
||||
# Indices
|
||||
|
@ -47,6 +55,7 @@ def show_ModelNet_models(all_points):
|
|||
file_i = 0
|
||||
|
||||
def update_scene():
|
||||
|
||||
# clear figure
|
||||
mlab.clf(fig1)
|
||||
|
||||
|
@ -57,19 +66,17 @@ def show_ModelNet_models(all_points):
|
|||
points = (points * 1.5 + np.array([1.0, 1.0, 1.0])) * 50.0
|
||||
|
||||
# Show point clouds colorized with activations
|
||||
mlab.points3d(
|
||||
points[:, 0],
|
||||
activations = mlab.points3d(points[:, 0],
|
||||
points[:, 1],
|
||||
points[:, 2],
|
||||
points[:, 2],
|
||||
scale_factor=3.0,
|
||||
scale_mode="none",
|
||||
figure=fig1,
|
||||
)
|
||||
scale_mode='none',
|
||||
figure=fig1)
|
||||
|
||||
# New title
|
||||
mlab.title(str(file_i), color=(0, 0, 0), size=0.3, height=0.01)
|
||||
text = "<--- (press g for previous)" + 50 * " " + "(press h for next) --->"
|
||||
text = '<--- (press g for previous)' + 50 * ' ' + '(press h for next) --->'
|
||||
mlab.text(0.01, 0.01, text, color=(0, 0, 0), width=0.98)
|
||||
mlab.orientation_axes()
|
||||
|
||||
|
@ -78,11 +85,13 @@ def show_ModelNet_models(all_points):
|
|||
def keyboard_callback(vtk_obj, event):
|
||||
global file_i
|
||||
|
||||
if vtk_obj.GetKeyCode() in ["g", "G"]:
|
||||
if vtk_obj.GetKeyCode() in ['g', 'G']:
|
||||
|
||||
file_i = (file_i - 1) % len(all_points)
|
||||
update_scene()
|
||||
|
||||
elif vtk_obj.GetKeyCode() in ["h", "H"]:
|
||||
elif vtk_obj.GetKeyCode() in ['h', 'H']:
|
||||
|
||||
file_i = (file_i + 1) % len(all_points)
|
||||
update_scene()
|
||||
|
||||
|
@ -90,7 +99,7 @@ def show_ModelNet_models(all_points):
|
|||
|
||||
# Draw a first plot
|
||||
update_scene()
|
||||
fig1.scene.interactor.add_observer("KeyPressEvent", keyboard_callback)
|
||||
fig1.scene.interactor.add_observer('KeyPressEvent', keyboard_callback)
|
||||
mlab.show()
|
||||
|
||||
|
||||
|
@ -102,7 +111,7 @@ def show_ModelNet_examples(clouds, cloud_normals=None, cloud_labels=None):
|
|||
###########################
|
||||
|
||||
# Create figure for features
|
||||
fig1 = mlab.figure("Models", bgcolor=(1, 1, 1), size=(1000, 800))
|
||||
fig1 = mlab.figure('Models', bgcolor=(1, 1, 1), size=(1000, 800))
|
||||
fig1.scene.parallel_projection = False
|
||||
|
||||
if cloud_labels is None:
|
||||
|
@ -114,6 +123,7 @@ def show_ModelNet_examples(clouds, cloud_normals=None, cloud_labels=None):
|
|||
show_normals = True
|
||||
|
||||
def update_scene():
|
||||
|
||||
# clear figure
|
||||
mlab.clf(fig1)
|
||||
|
||||
|
@ -129,31 +139,27 @@ def show_ModelNet_examples(clouds, cloud_normals=None, cloud_labels=None):
|
|||
points = (points * 1.5 + np.array([1.0, 1.0, 1.0])) * 50.0
|
||||
|
||||
# Show point clouds colorized with activations
|
||||
mlab.points3d(
|
||||
points[:, 0],
|
||||
activations = mlab.points3d(points[:, 0],
|
||||
points[:, 1],
|
||||
points[:, 2],
|
||||
labels,
|
||||
scale_factor=3.0,
|
||||
scale_mode="none",
|
||||
figure=fig1,
|
||||
)
|
||||
scale_mode='none',
|
||||
figure=fig1)
|
||||
if normals is not None and show_normals:
|
||||
mlab.quiver3d(
|
||||
points[:, 0],
|
||||
activations = mlab.quiver3d(points[:, 0],
|
||||
points[:, 1],
|
||||
points[:, 2],
|
||||
normals[:, 0],
|
||||
normals[:, 1],
|
||||
normals[:, 2],
|
||||
scale_factor=10.0,
|
||||
scale_mode="none",
|
||||
figure=fig1,
|
||||
)
|
||||
scale_mode='none',
|
||||
figure=fig1)
|
||||
|
||||
# New title
|
||||
mlab.title(str(file_i), color=(0, 0, 0), size=0.3, height=0.01)
|
||||
text = "<--- (press g for previous)" + 50 * " " + "(press h for next) --->"
|
||||
text = '<--- (press g for previous)' + 50 * ' ' + '(press h for next) --->'
|
||||
mlab.text(0.01, 0.01, text, color=(0, 0, 0), width=0.98)
|
||||
mlab.orientation_axes()
|
||||
|
||||
|
@ -162,15 +168,15 @@ def show_ModelNet_examples(clouds, cloud_normals=None, cloud_labels=None):
|
|||
def keyboard_callback(vtk_obj, event):
|
||||
global file_i, show_normals
|
||||
|
||||
if vtk_obj.GetKeyCode() in ["g", "G"]:
|
||||
if vtk_obj.GetKeyCode() in ['g', 'G']:
|
||||
file_i = (file_i - 1) % len(clouds)
|
||||
update_scene()
|
||||
|
||||
elif vtk_obj.GetKeyCode() in ["h", "H"]:
|
||||
elif vtk_obj.GetKeyCode() in ['h', 'H']:
|
||||
file_i = (file_i + 1) % len(clouds)
|
||||
update_scene()
|
||||
|
||||
elif vtk_obj.GetKeyCode() in ["n", "N"]:
|
||||
elif vtk_obj.GetKeyCode() in ['n', 'N']:
|
||||
show_normals = not show_normals
|
||||
update_scene()
|
||||
|
||||
|
@ -178,7 +184,7 @@ def show_ModelNet_examples(clouds, cloud_normals=None, cloud_labels=None):
|
|||
|
||||
# Draw a first plot
|
||||
update_scene()
|
||||
fig1.scene.interactor.add_observer("KeyPressEvent", keyboard_callback)
|
||||
fig1.scene.interactor.add_observer('KeyPressEvent', keyboard_callback)
|
||||
mlab.show()
|
||||
|
||||
|
||||
|
@ -190,7 +196,7 @@ def show_neighbors(query, supports, neighbors):
|
|||
###########################
|
||||
|
||||
# Create figure for features
|
||||
fig1 = mlab.figure("Models", bgcolor=(1, 1, 1), size=(1000, 800))
|
||||
fig1 = mlab.figure('Models', bgcolor=(1, 1, 1), size=(1000, 800))
|
||||
fig1.scene.parallel_projection = False
|
||||
|
||||
# Indices
|
||||
|
@ -198,6 +204,7 @@ def show_neighbors(query, supports, neighbors):
|
|||
file_i = 0
|
||||
|
||||
def update_scene():
|
||||
|
||||
# clear figure
|
||||
mlab.clf(fig1)
|
||||
|
||||
|
@ -205,40 +212,36 @@ def show_neighbors(query, supports, neighbors):
|
|||
p1 = (query * 1.5 + np.array([1.0, 1.0, 1.0])) * 50.0
|
||||
p2 = (supports * 1.5 + np.array([1.0, 1.0, 1.0])) * 50.0
|
||||
|
||||
l1 = p1[:, 2] * 0
|
||||
l1 = p1[:, 2]*0
|
||||
l1[file_i] = 1
|
||||
|
||||
l2 = p2[:, 2] * 0 + 2
|
||||
l2 = p2[:, 2]*0 + 2
|
||||
l2[neighbors[file_i]] = 3
|
||||
|
||||
# Show point clouds colorized with activations
|
||||
mlab.points3d(
|
||||
p1[:, 0],
|
||||
activations = mlab.points3d(p1[:, 0],
|
||||
p1[:, 1],
|
||||
p1[:, 2],
|
||||
l1,
|
||||
scale_factor=2.0,
|
||||
scale_mode="none",
|
||||
scale_mode='none',
|
||||
vmin=0.0,
|
||||
vmax=3.0,
|
||||
figure=fig1,
|
||||
)
|
||||
figure=fig1)
|
||||
|
||||
mlab.points3d(
|
||||
p2[:, 0],
|
||||
activations = mlab.points3d(p2[:, 0],
|
||||
p2[:, 1],
|
||||
p2[:, 2],
|
||||
l2,
|
||||
scale_factor=3.0,
|
||||
scale_mode="none",
|
||||
scale_mode='none',
|
||||
vmin=0.0,
|
||||
vmax=3.0,
|
||||
figure=fig1,
|
||||
)
|
||||
figure=fig1)
|
||||
|
||||
# New title
|
||||
mlab.title(str(file_i), color=(0, 0, 0), size=0.3, height=0.01)
|
||||
text = "<--- (press g for previous)" + 50 * " " + "(press h for next) --->"
|
||||
text = '<--- (press g for previous)' + 50 * ' ' + '(press h for next) --->'
|
||||
mlab.text(0.01, 0.01, text, color=(0, 0, 0), width=0.98)
|
||||
mlab.orientation_axes()
|
||||
|
||||
|
@ -247,11 +250,13 @@ def show_neighbors(query, supports, neighbors):
|
|||
def keyboard_callback(vtk_obj, event):
|
||||
global file_i
|
||||
|
||||
if vtk_obj.GetKeyCode() in ["g", "G"]:
|
||||
if vtk_obj.GetKeyCode() in ['g', 'G']:
|
||||
|
||||
file_i = (file_i - 1) % len(query)
|
||||
update_scene()
|
||||
|
||||
elif vtk_obj.GetKeyCode() in ["h", "H"]:
|
||||
elif vtk_obj.GetKeyCode() in ['h', 'H']:
|
||||
|
||||
file_i = (file_i + 1) % len(query)
|
||||
update_scene()
|
||||
|
||||
|
@ -259,7 +264,7 @@ def show_neighbors(query, supports, neighbors):
|
|||
|
||||
# Draw a first plot
|
||||
update_scene()
|
||||
fig1.scene.interactor.add_observer("KeyPressEvent", keyboard_callback)
|
||||
fig1.scene.interactor.add_observer('KeyPressEvent', keyboard_callback)
|
||||
mlab.show()
|
||||
|
||||
|
||||
|
@ -271,7 +276,7 @@ def show_input_batch(batch):
|
|||
###########################
|
||||
|
||||
# Create figure for features
|
||||
fig1 = mlab.figure("Input", bgcolor=(1, 1, 1), size=(1000, 800))
|
||||
fig1 = mlab.figure('Input', bgcolor=(1, 1, 1), size=(1000, 800))
|
||||
fig1.scene.parallel_projection = False
|
||||
|
||||
# Unstack batch
|
||||
|
@ -287,20 +292,18 @@ def show_input_batch(batch):
|
|||
show_pools = False
|
||||
|
||||
def update_scene():
|
||||
|
||||
# clear figure
|
||||
mlab.clf(fig1)
|
||||
|
||||
# Rescale points for visu
|
||||
p = (all_points[l_i][b_i] * 1.5 + np.array([1.0, 1.0, 1.0])) * 50.0
|
||||
labels = p[:, 2] * 0
|
||||
labels = p[:, 2]*0
|
||||
|
||||
if show_pools:
|
||||
p2 = (
|
||||
all_points[l_i + 1][b_i][neighb_i : neighb_i + 1] * 1.5
|
||||
+ np.array([1.0, 1.0, 1.0])
|
||||
) * 50.0
|
||||
p2 = (all_points[l_i+1][b_i][neighb_i:neighb_i+1] * 1.5 + np.array([1.0, 1.0, 1.0])) * 50.0
|
||||
p = np.vstack((p, p2))
|
||||
labels = np.hstack((labels, np.ones((1,), dtype=np.int32) * 3))
|
||||
labels = np.hstack((labels, np.ones((1,), dtype=np.int32)*3))
|
||||
pool_inds = all_pools[l_i][b_i][neighb_i]
|
||||
pool_inds = pool_inds[pool_inds >= 0]
|
||||
labels[pool_inds] = 2
|
||||
|
@ -311,17 +314,16 @@ def show_input_batch(batch):
|
|||
labels[neighb_i] = 3
|
||||
|
||||
# Show point clouds colorized with activations
|
||||
mlab.points3d(
|
||||
p[:, 0],
|
||||
mlab.points3d(p[:, 0],
|
||||
p[:, 1],
|
||||
p[:, 2],
|
||||
labels,
|
||||
scale_factor=2.0,
|
||||
scale_mode="none",
|
||||
scale_mode='none',
|
||||
vmin=0.0,
|
||||
vmax=3.0,
|
||||
figure=fig1,
|
||||
)
|
||||
figure=fig1)
|
||||
|
||||
|
||||
"""
|
||||
mlab.points3d(p[-2:, 0],
|
||||
|
@ -348,16 +350,12 @@ def show_input_batch(batch):
|
|||
"""
|
||||
|
||||
# New title
|
||||
title_str = (
|
||||
"<([) b_i={:d} (])> <(,) l_i={:d} (.)> <(N) n_i={:d} (M)>".format(
|
||||
b_i, l_i, neighb_i
|
||||
)
|
||||
)
|
||||
title_str = '<([) b_i={:d} (])> <(,) l_i={:d} (.)> <(N) n_i={:d} (M)>'.format(b_i, l_i, neighb_i)
|
||||
mlab.title(title_str, color=(0, 0, 0), size=0.3, height=0.90)
|
||||
if show_pools:
|
||||
text = "pools (switch with G)"
|
||||
text = 'pools (switch with G)'
|
||||
else:
|
||||
text = "neighbors (switch with G)"
|
||||
text = 'neighbors (switch with G)'
|
||||
mlab.text(0.01, 0.01, text, color=(0, 0, 0), width=0.3)
|
||||
mlab.orientation_axes()
|
||||
|
||||
|
@ -366,17 +364,17 @@ def show_input_batch(batch):
|
|||
def keyboard_callback(vtk_obj, event):
|
||||
global b_i, l_i, neighb_i, show_pools
|
||||
|
||||
if vtk_obj.GetKeyCode() in ["[", "{"]:
|
||||
if vtk_obj.GetKeyCode() in ['[', '{']:
|
||||
b_i = (b_i - 1) % len(all_points[l_i])
|
||||
neighb_i = 0
|
||||
update_scene()
|
||||
|
||||
elif vtk_obj.GetKeyCode() in ["]", "}"]:
|
||||
elif vtk_obj.GetKeyCode() in [']', '}']:
|
||||
b_i = (b_i + 1) % len(all_points[l_i])
|
||||
neighb_i = 0
|
||||
update_scene()
|
||||
|
||||
elif vtk_obj.GetKeyCode() in [",", "<"]:
|
||||
elif vtk_obj.GetKeyCode() in [',', '<']:
|
||||
if show_pools:
|
||||
l_i = (l_i - 1) % (len(all_points) - 1)
|
||||
else:
|
||||
|
@ -384,7 +382,7 @@ def show_input_batch(batch):
|
|||
neighb_i = 0
|
||||
update_scene()
|
||||
|
||||
elif vtk_obj.GetKeyCode() in [".", ">"]:
|
||||
elif vtk_obj.GetKeyCode() in ['.', '>']:
|
||||
if show_pools:
|
||||
l_i = (l_i + 1) % (len(all_points) - 1)
|
||||
else:
|
||||
|
@ -392,15 +390,15 @@ def show_input_batch(batch):
|
|||
neighb_i = 0
|
||||
update_scene()
|
||||
|
||||
elif vtk_obj.GetKeyCode() in ["n", "N"]:
|
||||
elif vtk_obj.GetKeyCode() in ['n', 'N']:
|
||||
neighb_i = (neighb_i - 1) % all_points[l_i][b_i].shape[0]
|
||||
update_scene()
|
||||
|
||||
elif vtk_obj.GetKeyCode() in ["m", "M"]:
|
||||
elif vtk_obj.GetKeyCode() in ['m', 'M']:
|
||||
neighb_i = (neighb_i + 1) % all_points[l_i][b_i].shape[0]
|
||||
update_scene()
|
||||
|
||||
elif vtk_obj.GetKeyCode() in ["g", "G"]:
|
||||
elif vtk_obj.GetKeyCode() in ['g', 'G']:
|
||||
if l_i < len(all_points) - 1:
|
||||
show_pools = not show_pools
|
||||
neighb_i = 0
|
||||
|
@ -410,5 +408,29 @@ def show_input_batch(batch):
|
|||
|
||||
# Draw a first plot
|
||||
update_scene()
|
||||
fig1.scene.interactor.add_observer("KeyPressEvent", keyboard_callback)
|
||||
fig1.scene.interactor.add_observer('KeyPressEvent', keyboard_callback)
|
||||
mlab.show()
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
|
|
@ -32,7 +32,6 @@ import numpy as np
|
|||
# \***************/
|
||||
#
|
||||
|
||||
|
||||
def fast_confusion(true, pred, label_values=None):
|
||||
"""
|
||||
Fast confusion matrix (100x faster than Scikit learn). But only works if labels are la
|
||||
|
@ -46,25 +45,13 @@ def fast_confusion(true, pred, label_values=None):
|
|||
true = np.squeeze(true)
|
||||
pred = np.squeeze(pred)
|
||||
if len(true.shape) != 1:
|
||||
raise ValueError(
|
||||
"Truth values are stored in a {:d}D array instead of 1D array".format(
|
||||
len(true.shape)
|
||||
)
|
||||
)
|
||||
raise ValueError('Truth values are stored in a {:d}D array instead of 1D array'. format(len(true.shape)))
|
||||
if len(pred.shape) != 1:
|
||||
raise ValueError(
|
||||
"Prediction values are stored in a {:d}D array instead of 1D array".format(
|
||||
len(pred.shape)
|
||||
)
|
||||
)
|
||||
raise ValueError('Prediction values are stored in a {:d}D array instead of 1D array'. format(len(pred.shape)))
|
||||
if true.dtype not in [np.int32, np.int64]:
|
||||
raise ValueError(
|
||||
"Truth values are {:s} instead of int32 or int64".format(true.dtype)
|
||||
)
|
||||
raise ValueError('Truth values are {:s} instead of int32 or int64'.format(true.dtype))
|
||||
if pred.dtype not in [np.int32, np.int64]:
|
||||
raise ValueError(
|
||||
"Prediction values are {:s} instead of int32 or int64".format(pred.dtype)
|
||||
)
|
||||
raise ValueError('Prediction values are {:s} instead of int32 or int64'.format(pred.dtype))
|
||||
true = true.astype(np.int32)
|
||||
pred = pred.astype(np.int32)
|
||||
|
||||
|
@ -75,13 +62,9 @@ def fast_confusion(true, pred, label_values=None):
|
|||
else:
|
||||
# Ensure they are good if given
|
||||
if label_values.dtype not in [np.int32, np.int64]:
|
||||
raise ValueError(
|
||||
"label values are {:s} instead of int32 or int64".format(
|
||||
label_values.dtype
|
||||
)
|
||||
)
|
||||
raise ValueError('label values are {:s} instead of int32 or int64'.format(label_values.dtype))
|
||||
if len(np.unique(label_values)) < len(label_values):
|
||||
raise ValueError("Given labels are not unique")
|
||||
raise ValueError('Given labels are not unique')
|
||||
|
||||
# Sort labels
|
||||
label_values = np.sort(label_values)
|
||||
|
@ -89,32 +72,33 @@ def fast_confusion(true, pred, label_values=None):
|
|||
# Get the number of classes
|
||||
num_classes = len(label_values)
|
||||
|
||||
# print(num_classes)
|
||||
# print(label_values)
|
||||
# print(np.max(true))
|
||||
# print(np.max(pred))
|
||||
# print(np.max(true * num_classes + pred))
|
||||
#print(num_classes)
|
||||
#print(label_values)
|
||||
#print(np.max(true))
|
||||
#print(np.max(pred))
|
||||
#print(np.max(true * num_classes + pred))
|
||||
|
||||
# Start confusion computations
|
||||
if label_values[0] == 0 and label_values[-1] == num_classes - 1:
|
||||
|
||||
# Vectorized confusion
|
||||
vec_conf = np.bincount(true * num_classes + pred)
|
||||
|
||||
# Add possible missing values due to classes not being in pred or true
|
||||
# print(vec_conf.shape)
|
||||
if vec_conf.shape[0] < num_classes**2:
|
||||
vec_conf = np.pad(
|
||||
vec_conf, (0, num_classes**2 - vec_conf.shape[0]), "constant"
|
||||
)
|
||||
# print(vec_conf.shape)
|
||||
#print(vec_conf.shape)
|
||||
if vec_conf.shape[0] < num_classes ** 2:
|
||||
vec_conf = np.pad(vec_conf, (0, num_classes ** 2 - vec_conf.shape[0]), 'constant')
|
||||
#print(vec_conf.shape)
|
||||
|
||||
# Reshape confusion in a matrix
|
||||
return vec_conf.reshape((num_classes, num_classes))
|
||||
|
||||
|
||||
else:
|
||||
|
||||
# Ensure no negative classes
|
||||
if label_values[0] < 0:
|
||||
raise ValueError("Unsupported negative classes")
|
||||
raise ValueError('Unsupported negative classes')
|
||||
|
||||
# Get the data in [0,num_classes[
|
||||
label_map = np.zeros((label_values[-1] + 1,), dtype=np.int32)
|
||||
|
@ -128,15 +112,12 @@ def fast_confusion(true, pred, label_values=None):
|
|||
vec_conf = np.bincount(true * num_classes + pred)
|
||||
|
||||
# Add possible missing values due to classes not being in pred or true
|
||||
if vec_conf.shape[0] < num_classes**2:
|
||||
vec_conf = np.pad(
|
||||
vec_conf, (0, num_classes**2 - vec_conf.shape[0]), "constant"
|
||||
)
|
||||
if vec_conf.shape[0] < num_classes ** 2:
|
||||
vec_conf = np.pad(vec_conf, (0, num_classes ** 2 - vec_conf.shape[0]), 'constant')
|
||||
|
||||
# Reshape confusion in a matrix
|
||||
return vec_conf.reshape((num_classes, num_classes))
|
||||
|
||||
|
||||
def metrics(confusions, ignore_unclassified=False):
|
||||
"""
|
||||
Computes different metrics from confusion matrices.
|
||||
|
@ -147,7 +128,7 @@ def metrics(confusions, ignore_unclassified=False):
|
|||
"""
|
||||
|
||||
# If the first class (often "unclassified") should be ignored, erase it from the confusion.
|
||||
if ignore_unclassified:
|
||||
if (ignore_unclassified):
|
||||
confusions[..., 0, :] = 0
|
||||
confusions[..., :, 0] = 0
|
||||
|
||||
|
@ -195,9 +176,7 @@ def smooth_metrics(confusions, smooth_n=0, ignore_unclassified=False):
|
|||
for epoch in range(confusions.shape[-3]):
|
||||
i0 = max(epoch - smooth_n, 0)
|
||||
i1 = min(epoch + smooth_n + 1, confusions.shape[-3])
|
||||
smoothed_confusions[..., epoch, :, :] = np.sum(
|
||||
confusions[..., i0:i1, :, :], axis=-3
|
||||
)
|
||||
smoothed_confusions[..., epoch, :, :] = np.sum(confusions[..., i0:i1, :, :], axis=-3)
|
||||
|
||||
# Compute TP, FP, FN. This assume that the second to last axis counts the truths (like the first axis of a
|
||||
# confusion matrix), and that the last axis counts the predictions (like the second axis of a confusion matrix)
|
||||
|
|
151
utils/ply.py
151
utils/ply.py
|
@ -28,29 +28,28 @@ import sys
|
|||
|
||||
|
||||
# Define PLY types
|
||||
ply_dtypes = dict(
|
||||
[
|
||||
(b"int8", "i1"),
|
||||
(b"char", "i1"),
|
||||
(b"uint8", "u1"),
|
||||
(b"uchar", "u1"),
|
||||
(b"int16", "i2"),
|
||||
(b"short", "i2"),
|
||||
(b"uint16", "u2"),
|
||||
(b"ushort", "u2"),
|
||||
(b"int32", "i4"),
|
||||
(b"int", "i4"),
|
||||
(b"uint32", "u4"),
|
||||
(b"uint", "u4"),
|
||||
(b"float32", "f4"),
|
||||
(b"float", "f4"),
|
||||
(b"float64", "f8"),
|
||||
(b"double", "f8"),
|
||||
]
|
||||
)
|
||||
ply_dtypes = dict([
|
||||
(b'int8', 'i1'),
|
||||
(b'char', 'i1'),
|
||||
(b'uint8', 'u1'),
|
||||
(b'uchar', 'u1'),
|
||||
(b'int16', 'i2'),
|
||||
(b'short', 'i2'),
|
||||
(b'uint16', 'u2'),
|
||||
(b'ushort', 'u2'),
|
||||
(b'int32', 'i4'),
|
||||
(b'int', 'i4'),
|
||||
(b'uint32', 'u4'),
|
||||
(b'uint', 'u4'),
|
||||
(b'float32', 'f4'),
|
||||
(b'float', 'f4'),
|
||||
(b'float64', 'f8'),
|
||||
(b'double', 'f8')
|
||||
])
|
||||
|
||||
# Numpy reader format
|
||||
valid_formats = {"ascii": "", "binary_big_endian": ">", "binary_little_endian": "<"}
|
||||
valid_formats = {'ascii': '', 'binary_big_endian': '>',
|
||||
'binary_little_endian': '<'}
|
||||
|
||||
|
||||
# ----------------------------------------------------------------------------------------------------------------------
|
||||
|
@ -66,14 +65,14 @@ def parse_header(plyfile, ext):
|
|||
properties = []
|
||||
num_points = None
|
||||
|
||||
while b"end_header" not in line and line != b"":
|
||||
while b'end_header' not in line and line != b'':
|
||||
line = plyfile.readline()
|
||||
|
||||
if b"element" in line:
|
||||
if b'element' in line:
|
||||
line = line.split()
|
||||
num_points = int(line[2])
|
||||
|
||||
elif b"property" in line:
|
||||
elif b'property' in line:
|
||||
line = line.split()
|
||||
properties.append((line[2].decode(), ext + ply_dtypes[line[1]]))
|
||||
|
||||
|
@ -88,27 +87,28 @@ def parse_mesh_header(plyfile, ext):
|
|||
num_faces = None
|
||||
current_element = None
|
||||
|
||||
while b"end_header" not in line and line != b"":
|
||||
|
||||
while b'end_header' not in line and line != b'':
|
||||
line = plyfile.readline()
|
||||
|
||||
# Find point element
|
||||
if b"element vertex" in line:
|
||||
current_element = "vertex"
|
||||
if b'element vertex' in line:
|
||||
current_element = 'vertex'
|
||||
line = line.split()
|
||||
num_points = int(line[2])
|
||||
|
||||
elif b"element face" in line:
|
||||
current_element = "face"
|
||||
elif b'element face' in line:
|
||||
current_element = 'face'
|
||||
line = line.split()
|
||||
num_faces = int(line[2])
|
||||
|
||||
elif b"property" in line:
|
||||
if current_element == "vertex":
|
||||
elif b'property' in line:
|
||||
if current_element == 'vertex':
|
||||
line = line.split()
|
||||
vertex_properties.append((line[2].decode(), ext + ply_dtypes[line[1]]))
|
||||
elif current_element == "vertex":
|
||||
if not line.startswith("property list uchar int"):
|
||||
raise ValueError("Unsupported faces property : " + line)
|
||||
elif current_element == 'vertex':
|
||||
if not line.startswith('property list uchar int'):
|
||||
raise ValueError('Unsupported faces property : ' + line)
|
||||
|
||||
return num_points, num_faces, vertex_properties
|
||||
|
||||
|
@ -150,21 +150,24 @@ def read_ply(filename, triangular_mesh=False):
|
|||
|
||||
"""
|
||||
|
||||
with open(filename, "rb") as plyfile:
|
||||
with open(filename, 'rb') as plyfile:
|
||||
|
||||
|
||||
# Check if the file start with ply
|
||||
if b"ply" not in plyfile.readline():
|
||||
raise ValueError("The file does not start whith the word ply")
|
||||
if b'ply' not in plyfile.readline():
|
||||
raise ValueError('The file does not start whith the word ply')
|
||||
|
||||
# get binary_little/big or ascii
|
||||
fmt = plyfile.readline().split()[1].decode()
|
||||
if fmt == "ascii":
|
||||
raise ValueError("The file is not binary")
|
||||
raise ValueError('The file is not binary')
|
||||
|
||||
# get extension for building the numpy dtypes
|
||||
ext = valid_formats[fmt]
|
||||
|
||||
# PointCloud reader vs mesh reader
|
||||
if triangular_mesh:
|
||||
|
||||
# Parse header
|
||||
num_points, num_faces, properties = parse_mesh_header(plyfile, ext)
|
||||
|
||||
|
@ -172,19 +175,18 @@ def read_ply(filename, triangular_mesh=False):
|
|||
vertex_data = np.fromfile(plyfile, dtype=properties, count=num_points)
|
||||
|
||||
# Get face data
|
||||
face_properties = [
|
||||
("k", ext + "u1"),
|
||||
("v1", ext + "i4"),
|
||||
("v2", ext + "i4"),
|
||||
("v3", ext + "i4"),
|
||||
]
|
||||
face_properties = [('k', ext + 'u1'),
|
||||
('v1', ext + 'i4'),
|
||||
('v2', ext + 'i4'),
|
||||
('v3', ext + 'i4')]
|
||||
faces_data = np.fromfile(plyfile, dtype=face_properties, count=num_faces)
|
||||
|
||||
# Return vertex data and concatenated faces
|
||||
faces = np.vstack((faces_data["v1"], faces_data["v2"], faces_data["v3"])).T
|
||||
faces = np.vstack((faces_data['v1'], faces_data['v2'], faces_data['v3'])).T
|
||||
data = [vertex_data, faces]
|
||||
|
||||
else:
|
||||
|
||||
# Parse header
|
||||
num_points, properties = parse_header(plyfile, ext)
|
||||
|
||||
|
@ -195,17 +197,18 @@ def read_ply(filename, triangular_mesh=False):
|
|||
|
||||
|
||||
def header_properties(field_list, field_names):
|
||||
|
||||
# List of lines to write
|
||||
lines = []
|
||||
|
||||
# First line describing element vertex
|
||||
lines.append("element vertex %d" % field_list[0].shape[0])
|
||||
lines.append('element vertex %d' % field_list[0].shape[0])
|
||||
|
||||
# Properties lines
|
||||
i = 0
|
||||
for fields in field_list:
|
||||
for field in fields.T:
|
||||
lines.append("property %s %s" % (field.dtype.name, field_names[i]))
|
||||
lines.append('property %s %s' % (field.dtype.name, field_names[i]))
|
||||
i += 1
|
||||
|
||||
return lines
|
||||
|
@ -245,59 +248,57 @@ def write_ply(filename, field_list, field_names, triangular_faces=None):
|
|||
"""
|
||||
|
||||
# Format list input to the right form
|
||||
field_list = (
|
||||
list(field_list)
|
||||
if (type(field_list) == list or type(field_list) == tuple)
|
||||
else list((field_list,))
|
||||
)
|
||||
field_list = list(field_list) if (type(field_list) == list or type(field_list) == tuple) else list((field_list,))
|
||||
for i, field in enumerate(field_list):
|
||||
if field.ndim < 2:
|
||||
field_list[i] = field.reshape(-1, 1)
|
||||
if field.ndim > 2:
|
||||
print("fields have more than 2 dimensions")
|
||||
print('fields have more than 2 dimensions')
|
||||
return False
|
||||
|
||||
# check all fields have the same number of data
|
||||
n_points = [field.shape[0] for field in field_list]
|
||||
if not np.all(np.equal(n_points, n_points[0])):
|
||||
print("wrong field dimensions")
|
||||
print('wrong field dimensions')
|
||||
return False
|
||||
|
||||
# Check if field_names and field_list have same nb of column
|
||||
n_fields = np.sum([field.shape[1] for field in field_list])
|
||||
if n_fields != len(field_names):
|
||||
print("wrong number of field names")
|
||||
if (n_fields != len(field_names)):
|
||||
print('wrong number of field names')
|
||||
return False
|
||||
|
||||
# Add extension if not there
|
||||
if not filename.endswith(".ply"):
|
||||
filename += ".ply"
|
||||
if not filename.endswith('.ply'):
|
||||
filename += '.ply'
|
||||
|
||||
# open in text mode to write the header
|
||||
with open(filename, "w") as plyfile:
|
||||
with open(filename, 'w') as plyfile:
|
||||
|
||||
# First magical word
|
||||
header = ["ply"]
|
||||
header = ['ply']
|
||||
|
||||
# Encoding format
|
||||
header.append("format binary_" + sys.byteorder + "_endian 1.0")
|
||||
header.append('format binary_' + sys.byteorder + '_endian 1.0')
|
||||
|
||||
# Points properties description
|
||||
header.extend(header_properties(field_list, field_names))
|
||||
|
||||
# Add faces if needded
|
||||
if triangular_faces is not None:
|
||||
header.append("element face {:d}".format(triangular_faces.shape[0]))
|
||||
header.append("property list uchar int vertex_indices")
|
||||
header.append('element face {:d}'.format(triangular_faces.shape[0]))
|
||||
header.append('property list uchar int vertex_indices')
|
||||
|
||||
# End of header
|
||||
header.append("end_header")
|
||||
header.append('end_header')
|
||||
|
||||
# Write all lines
|
||||
for line in header:
|
||||
plyfile.write("%s\n" % line)
|
||||
|
||||
# open in binary/append to use tofile
|
||||
with open(filename, "ab") as plyfile:
|
||||
with open(filename, 'ab') as plyfile:
|
||||
|
||||
# Create a structured array
|
||||
i = 0
|
||||
type_list = []
|
||||
|
@ -316,19 +317,19 @@ def write_ply(filename, field_list, field_names, triangular_faces=None):
|
|||
|
||||
if triangular_faces is not None:
|
||||
triangular_faces = triangular_faces.astype(np.int32)
|
||||
type_list = [("k", "uint8")] + [(str(ind), "int32") for ind in range(3)]
|
||||
type_list = [('k', 'uint8')] + [(str(ind), 'int32') for ind in range(3)]
|
||||
data = np.empty(triangular_faces.shape[0], dtype=type_list)
|
||||
data["k"] = np.full((triangular_faces.shape[0],), 3, dtype=np.uint8)
|
||||
data["0"] = triangular_faces[:, 0]
|
||||
data["1"] = triangular_faces[:, 1]
|
||||
data["2"] = triangular_faces[:, 2]
|
||||
data['k'] = np.full((triangular_faces.shape[0],), 3, dtype=np.uint8)
|
||||
data['0'] = triangular_faces[:, 0]
|
||||
data['1'] = triangular_faces[:, 1]
|
||||
data['2'] = triangular_faces[:, 2]
|
||||
data.tofile(plyfile)
|
||||
|
||||
return True
|
||||
|
||||
|
||||
def describe_element(name, df):
|
||||
"""Takes the columns of the dataframe and builds a ply-like description
|
||||
""" Takes the columns of the dataframe and builds a ply-like description
|
||||
|
||||
Parameters
|
||||
----------
|
||||
|
@ -339,16 +340,16 @@ def describe_element(name, df):
|
|||
-------
|
||||
element: list[str]
|
||||
"""
|
||||
property_formats = {"f": "float", "u": "uchar", "i": "int"}
|
||||
element = ["element " + name + " " + str(len(df))]
|
||||
property_formats = {'f': 'float', 'u': 'uchar', 'i': 'int'}
|
||||
element = ['element ' + name + ' ' + str(len(df))]
|
||||
|
||||
if name == "face":
|
||||
if name == 'face':
|
||||
element.append("property list uchar int points_indices")
|
||||
|
||||
else:
|
||||
for i in range(len(df.columns)):
|
||||
# get first letter of dtype to infer format
|
||||
f = property_formats[str(df.dtypes[i])[0]]
|
||||
element.append("property " + f + " " + df.columns.values[i])
|
||||
element.append('property ' + f + ' ' + df.columns.values[i])
|
||||
|
||||
return element
|
544
utils/tester.py
544
utils/tester.py
|
@ -24,18 +24,22 @@
|
|||
|
||||
# Basic libs
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
import numpy as np
|
||||
from os import makedirs
|
||||
from os import makedirs, listdir
|
||||
from os.path import exists, join
|
||||
import time
|
||||
import json
|
||||
from sklearn.neighbors import KDTree
|
||||
|
||||
# PLY reader
|
||||
from utils.ply import write_ply
|
||||
from utils.ply import read_ply, write_ply
|
||||
|
||||
# Metrics
|
||||
from utils.metrics import IoU_from_confusions, fast_confusion
|
||||
from sklearn.metrics import confusion_matrix
|
||||
|
||||
# from utils.visualizer import show_ModelNet_models
|
||||
#from utils.visualizer import show_ModelNet_models
|
||||
|
||||
# ----------------------------------------------------------------------------------------------------------------------
|
||||
#
|
||||
|
@ -45,10 +49,12 @@ from utils.metrics import IoU_from_confusions, fast_confusion
|
|||
|
||||
|
||||
class ModelTester:
|
||||
|
||||
# Initialization methods
|
||||
# ------------------------------------------------------------------------------------------------------------------
|
||||
|
||||
def __init__(self, net, chkp_path=None, on_gpu=True):
|
||||
|
||||
############
|
||||
# Parameters
|
||||
############
|
||||
|
@ -65,8 +71,8 @@ class ModelTester:
|
|||
##########################
|
||||
|
||||
checkpoint = torch.load(chkp_path)
|
||||
net.load_state_dict(checkpoint["model_state_dict"])
|
||||
self.epoch = checkpoint["epoch"]
|
||||
net.load_state_dict(checkpoint['model_state_dict'])
|
||||
self.epoch = checkpoint['epoch']
|
||||
net.eval()
|
||||
print("Model and training state restored.")
|
||||
|
||||
|
@ -76,6 +82,7 @@ class ModelTester:
|
|||
# ------------------------------------------------------------------------------------------------------------------
|
||||
|
||||
def classification_test(self, net, test_loader, config, num_votes=100, debug=False):
|
||||
|
||||
############
|
||||
# Initialize
|
||||
############
|
||||
|
@ -84,6 +91,7 @@ class ModelTester:
|
|||
softmax = torch.nn.Softmax(1)
|
||||
|
||||
# Number of classes including ignored labels
|
||||
nc_tot = test_loader.dataset.num_classes
|
||||
|
||||
# Number of classes predicted by the model
|
||||
nc_model = config.num_classes
|
||||
|
@ -96,6 +104,7 @@ class ModelTester:
|
|||
mean_dt = np.zeros(1)
|
||||
last_display = time.time()
|
||||
while np.min(self.test_counts) < num_votes:
|
||||
|
||||
# Run model on all test examples
|
||||
# ******************************
|
||||
|
||||
|
@ -106,11 +115,12 @@ class ModelTester:
|
|||
|
||||
# Start validation loop
|
||||
for batch in test_loader:
|
||||
|
||||
# New time
|
||||
t = t[-1:]
|
||||
t += [time.time()]
|
||||
|
||||
if "cuda" in self.device.type:
|
||||
if 'cuda' in self.device.type:
|
||||
batch.to(self.device)
|
||||
|
||||
# Forward pass
|
||||
|
@ -121,7 +131,7 @@ class ModelTester:
|
|||
targets += [batch.labels.cpu().numpy()]
|
||||
obj_inds += [batch.model_inds.cpu().numpy()]
|
||||
|
||||
if "cuda" in self.device.type:
|
||||
if 'cuda' in self.device.type:
|
||||
torch.cuda.synchronize(self.device)
|
||||
|
||||
# Average timing
|
||||
|
@ -131,28 +141,22 @@ class ModelTester:
|
|||
# Display
|
||||
if (t[-1] - last_display) > 1.0:
|
||||
last_display = t[-1]
|
||||
message = "Test vote {:.0f} : {:.1f}% (timings : {:4.2f} {:4.2f})"
|
||||
print(
|
||||
message.format(
|
||||
np.min(self.test_counts),
|
||||
message = 'Test vote {:.0f} : {:.1f}% (timings : {:4.2f} {:4.2f})'
|
||||
print(message.format(np.min(self.test_counts),
|
||||
100 * len(obj_inds) / config.validation_size,
|
||||
1000 * (mean_dt[0]),
|
||||
1000 * (mean_dt[1]),
|
||||
)
|
||||
)
|
||||
1000 * (mean_dt[1])))
|
||||
# Stack all validation predictions
|
||||
probs = np.vstack(probs)
|
||||
targets = np.hstack(targets)
|
||||
obj_inds = np.hstack(obj_inds)
|
||||
|
||||
if np.any(test_loader.dataset.input_labels[obj_inds] != targets):
|
||||
raise ValueError("wrong object indices")
|
||||
raise ValueError('wrong object indices')
|
||||
|
||||
# Compute incremental average (predictions are always ordered)
|
||||
self.test_counts[obj_inds] += 1
|
||||
self.test_probs[obj_inds] += (probs - self.test_probs[obj_inds]) / (
|
||||
self.test_counts[obj_inds]
|
||||
)
|
||||
self.test_probs[obj_inds] += (probs - self.test_probs[obj_inds]) / (self.test_counts[obj_inds])
|
||||
|
||||
# Save/Display temporary results
|
||||
# ******************************
|
||||
|
@ -160,20 +164,16 @@ class ModelTester:
|
|||
test_labels = np.array(test_loader.dataset.label_values)
|
||||
|
||||
# Compute classification results
|
||||
C1 = fast_confusion(
|
||||
test_loader.dataset.input_labels,
|
||||
C1 = fast_confusion(test_loader.dataset.input_labels,
|
||||
np.argmax(self.test_probs, axis=1),
|
||||
test_labels,
|
||||
)
|
||||
test_labels)
|
||||
|
||||
ACC = 100 * np.sum(np.diag(C1)) / (np.sum(C1) + 1e-6)
|
||||
print("Test Accuracy = {:.1f}%".format(ACC))
|
||||
print('Test Accuracy = {:.1f}%'.format(ACC))
|
||||
|
||||
return
|
||||
|
||||
def cloud_segmentation_test(
|
||||
self, net, test_loader, config, num_votes=100, debug=False
|
||||
):
|
||||
def cloud_segmentation_test(self, net, test_loader, config, num_votes=100, debug=False):
|
||||
"""
|
||||
Test method for cloud segmentation models
|
||||
"""
|
||||
|
@ -188,41 +188,36 @@ class ModelTester:
|
|||
softmax = torch.nn.Softmax(1)
|
||||
|
||||
# Number of classes including ignored labels
|
||||
nc_tot = test_loader.dataset.num_classes
|
||||
|
||||
# Number of classes predicted by the model
|
||||
nc_model = config.num_classes
|
||||
|
||||
# Initiate global prediction over test clouds
|
||||
self.test_probs = [
|
||||
np.zeros((l.shape[0], nc_model)) for l in test_loader.dataset.input_labels
|
||||
]
|
||||
self.test_probs = [np.zeros((l.shape[0], nc_model)) for l in test_loader.dataset.input_labels]
|
||||
|
||||
# Test saving path
|
||||
if config.saving:
|
||||
test_path = join("test", config.saving_path.split("/")[-1])
|
||||
test_path = join('test', config.saving_path.split('/')[-1])
|
||||
if not exists(test_path):
|
||||
makedirs(test_path)
|
||||
if not exists(join(test_path, "predictions")):
|
||||
makedirs(join(test_path, "predictions"))
|
||||
if not exists(join(test_path, "probs")):
|
||||
makedirs(join(test_path, "probs"))
|
||||
if not exists(join(test_path, "potentials")):
|
||||
makedirs(join(test_path, "potentials"))
|
||||
if not exists(join(test_path, 'predictions')):
|
||||
makedirs(join(test_path, 'predictions'))
|
||||
if not exists(join(test_path, 'probs')):
|
||||
makedirs(join(test_path, 'probs'))
|
||||
if not exists(join(test_path, 'potentials')):
|
||||
makedirs(join(test_path, 'potentials'))
|
||||
else:
|
||||
test_path = None
|
||||
|
||||
# If on validation directly compute score
|
||||
if test_loader.dataset.set == "validation":
|
||||
if test_loader.dataset.set == 'validation':
|
||||
val_proportions = np.zeros(nc_model, dtype=np.float32)
|
||||
i = 0
|
||||
for label_value in test_loader.dataset.label_values:
|
||||
if label_value not in test_loader.dataset.ignored_labels:
|
||||
val_proportions[i] = np.sum(
|
||||
[
|
||||
np.sum(labels == label_value)
|
||||
for labels in test_loader.dataset.validation_labels
|
||||
]
|
||||
)
|
||||
val_proportions[i] = np.sum([np.sum(labels == label_value)
|
||||
for labels in test_loader.dataset.validation_labels])
|
||||
i += 1
|
||||
else:
|
||||
val_proportions = None
|
||||
|
@ -240,16 +235,17 @@ class ModelTester:
|
|||
|
||||
# Start test loop
|
||||
while True:
|
||||
print("Initialize workers")
|
||||
print('Initialize workers')
|
||||
for i, batch in enumerate(test_loader):
|
||||
|
||||
# New time
|
||||
t = t[-1:]
|
||||
t += [time.time()]
|
||||
|
||||
if i == 0:
|
||||
print("Done in {:.1f}s".format(t[1] - t[0]))
|
||||
print('Done in {:.1f}s'.format(t[1] - t[0]))
|
||||
|
||||
if "cuda" in self.device.type:
|
||||
if 'cuda' in self.device.type:
|
||||
batch.to(self.device)
|
||||
|
||||
# Forward pass
|
||||
|
@ -270,25 +266,20 @@ class ModelTester:
|
|||
|
||||
i0 = 0
|
||||
for b_i, length in enumerate(lengths):
|
||||
|
||||
# Get prediction
|
||||
points = s_points[i0 : i0 + length]
|
||||
probs = stacked_probs[i0 : i0 + length]
|
||||
inds = in_inds[i0 : i0 + length]
|
||||
points = s_points[i0:i0 + length]
|
||||
probs = stacked_probs[i0:i0 + length]
|
||||
inds = in_inds[i0:i0 + length]
|
||||
c_i = cloud_inds[b_i]
|
||||
|
||||
if 0 < test_radius_ratio < 1:
|
||||
mask = (
|
||||
np.sum(points**2, axis=1)
|
||||
< (test_radius_ratio * config.in_radius) ** 2
|
||||
)
|
||||
mask = np.sum(points ** 2, axis=1) < (test_radius_ratio * config.in_radius) ** 2
|
||||
inds = inds[mask]
|
||||
probs = probs[mask]
|
||||
|
||||
# Update current probs in whole cloud
|
||||
self.test_probs[c_i][inds] = (
|
||||
test_smooth * self.test_probs[c_i][inds]
|
||||
+ (1 - test_smooth) * probs
|
||||
)
|
||||
self.test_probs[c_i][inds] = test_smooth * self.test_probs[c_i][inds] + (1 - test_smooth) * probs
|
||||
i0 += length
|
||||
|
||||
# Average timing
|
||||
|
@ -301,69 +292,50 @@ class ModelTester:
|
|||
# Display
|
||||
if (t[-1] - last_display) > 1.0:
|
||||
last_display = t[-1]
|
||||
message = (
|
||||
"e{:03d}-i{:04d} => {:.1f}% (timings : {:4.2f} {:4.2f} {:4.2f})"
|
||||
)
|
||||
print(
|
||||
message.format(
|
||||
test_epoch,
|
||||
i,
|
||||
message = 'e{:03d}-i{:04d} => {:.1f}% (timings : {:4.2f} {:4.2f} {:4.2f})'
|
||||
print(message.format(test_epoch, i,
|
||||
100 * i / config.validation_size,
|
||||
1000 * (mean_dt[0]),
|
||||
1000 * (mean_dt[1]),
|
||||
1000 * (mean_dt[2]),
|
||||
)
|
||||
)
|
||||
1000 * (mean_dt[2])))
|
||||
|
||||
# Update minimum od potentials
|
||||
new_min = torch.min(test_loader.dataset.min_potentials)
|
||||
print(
|
||||
"Test epoch {:d}, end. Min potential = {:.1f}".format(
|
||||
test_epoch, new_min
|
||||
)
|
||||
)
|
||||
# print([np.mean(pots) for pots in test_loader.dataset.potentials])
|
||||
print('Test epoch {:d}, end. Min potential = {:.1f}'.format(test_epoch, new_min))
|
||||
#print([np.mean(pots) for pots in test_loader.dataset.potentials])
|
||||
|
||||
# Save predicted cloud
|
||||
if last_min + 1 < new_min:
|
||||
|
||||
# Update last_min
|
||||
last_min += 1
|
||||
|
||||
# Show vote results (On subcloud so it is not the good values here)
|
||||
if test_loader.dataset.set == "validation":
|
||||
print("\nConfusion on sub clouds")
|
||||
if test_loader.dataset.set == 'validation':
|
||||
print('\nConfusion on sub clouds')
|
||||
Confs = []
|
||||
for i, file_path in enumerate(test_loader.dataset.files):
|
||||
|
||||
# Insert false columns for ignored labels
|
||||
probs = np.array(self.test_probs[i], copy=True)
|
||||
for l_ind, label_value in enumerate(
|
||||
test_loader.dataset.label_values
|
||||
):
|
||||
for l_ind, label_value in enumerate(test_loader.dataset.label_values):
|
||||
if label_value in test_loader.dataset.ignored_labels:
|
||||
probs = np.insert(probs, l_ind, 0, axis=1)
|
||||
|
||||
# Predicted labels
|
||||
preds = test_loader.dataset.label_values[
|
||||
np.argmax(probs, axis=1)
|
||||
].astype(np.int32)
|
||||
preds = test_loader.dataset.label_values[np.argmax(probs, axis=1)].astype(np.int32)
|
||||
|
||||
# Targets
|
||||
targets = test_loader.dataset.input_labels[i]
|
||||
|
||||
# Confs
|
||||
Confs += [
|
||||
fast_confusion(
|
||||
targets, preds, test_loader.dataset.label_values
|
||||
)
|
||||
]
|
||||
Confs += [fast_confusion(targets, preds, test_loader.dataset.label_values)]
|
||||
|
||||
# Regroup confusions
|
||||
C = np.sum(np.stack(Confs), axis=0).astype(np.float32)
|
||||
|
||||
# Remove ignored labels from confusions
|
||||
for l_ind, label_value in reversed(
|
||||
list(enumerate(test_loader.dataset.label_values))
|
||||
):
|
||||
for l_ind, label_value in reversed(list(enumerate(test_loader.dataset.label_values))):
|
||||
if label_value in test_loader.dataset.ignored_labels:
|
||||
C = np.delete(C, l_ind, axis=0)
|
||||
C = np.delete(C, l_ind, axis=1)
|
||||
|
@ -374,18 +346,20 @@ class ModelTester:
|
|||
# Compute IoUs
|
||||
IoUs = IoU_from_confusions(C)
|
||||
mIoU = np.mean(IoUs)
|
||||
s = "{:5.2f} | ".format(100 * mIoU)
|
||||
s = '{:5.2f} | '.format(100 * mIoU)
|
||||
for IoU in IoUs:
|
||||
s += "{:5.2f} ".format(100 * IoU)
|
||||
print(s + "\n")
|
||||
s += '{:5.2f} '.format(100 * IoU)
|
||||
print(s + '\n')
|
||||
|
||||
# Save real IoU once in a while
|
||||
if int(np.ceil(new_min)) % 10 == 0:
|
||||
|
||||
# Project predictions
|
||||
print("\nReproject Vote #{:d}".format(int(np.floor(new_min))))
|
||||
print('\nReproject Vote #{:d}'.format(int(np.floor(new_min))))
|
||||
t1 = time.time()
|
||||
proj_probs = []
|
||||
for i, file_path in enumerate(test_loader.dataset.files):
|
||||
|
||||
# print(i, file_path, test_loader.dataset.test_proj[i].shape, self.test_probs[i].shape)
|
||||
|
||||
# print(test_loader.dataset.test_proj[i].dtype, np.max(test_loader.dataset.test_proj[i]))
|
||||
|
@ -396,116 +370,90 @@ class ModelTester:
|
|||
proj_probs += [probs]
|
||||
|
||||
# Insert false columns for ignored labels
|
||||
for l_ind, label_value in enumerate(
|
||||
test_loader.dataset.label_values
|
||||
):
|
||||
for l_ind, label_value in enumerate(test_loader.dataset.label_values):
|
||||
if label_value in test_loader.dataset.ignored_labels:
|
||||
proj_probs[i] = np.insert(
|
||||
proj_probs[i], l_ind, 0, axis=1
|
||||
)
|
||||
proj_probs[i] = np.insert(proj_probs[i], l_ind, 0, axis=1)
|
||||
|
||||
t2 = time.time()
|
||||
print("Done in {:.1f} s\n".format(t2 - t1))
|
||||
print('Done in {:.1f} s\n'.format(t2 - t1))
|
||||
|
||||
# Show vote results
|
||||
if test_loader.dataset.set == "validation":
|
||||
print("Confusion on full clouds")
|
||||
if test_loader.dataset.set == 'validation':
|
||||
print('Confusion on full clouds')
|
||||
t1 = time.time()
|
||||
Confs = []
|
||||
for i, file_path in enumerate(test_loader.dataset.files):
|
||||
|
||||
# Get the predicted labels
|
||||
preds = test_loader.dataset.label_values[
|
||||
np.argmax(proj_probs[i], axis=1)
|
||||
].astype(np.int32)
|
||||
preds = test_loader.dataset.label_values[np.argmax(proj_probs[i], axis=1)].astype(np.int32)
|
||||
|
||||
# Confusion
|
||||
targets = test_loader.dataset.validation_labels[i]
|
||||
Confs += [
|
||||
fast_confusion(
|
||||
targets, preds, test_loader.dataset.label_values
|
||||
)
|
||||
]
|
||||
Confs += [fast_confusion(targets, preds, test_loader.dataset.label_values)]
|
||||
|
||||
t2 = time.time()
|
||||
print("Done in {:.1f} s\n".format(t2 - t1))
|
||||
print('Done in {:.1f} s\n'.format(t2 - t1))
|
||||
|
||||
# Regroup confusions
|
||||
C = np.sum(np.stack(Confs), axis=0)
|
||||
|
||||
# Remove ignored labels from confusions
|
||||
for l_ind, label_value in reversed(
|
||||
list(enumerate(test_loader.dataset.label_values))
|
||||
):
|
||||
for l_ind, label_value in reversed(list(enumerate(test_loader.dataset.label_values))):
|
||||
if label_value in test_loader.dataset.ignored_labels:
|
||||
C = np.delete(C, l_ind, axis=0)
|
||||
C = np.delete(C, l_ind, axis=1)
|
||||
|
||||
IoUs = IoU_from_confusions(C)
|
||||
mIoU = np.mean(IoUs)
|
||||
s = "{:5.2f} | ".format(100 * mIoU)
|
||||
s = '{:5.2f} | '.format(100 * mIoU)
|
||||
for IoU in IoUs:
|
||||
s += "{:5.2f} ".format(100 * IoU)
|
||||
print("-" * len(s))
|
||||
s += '{:5.2f} '.format(100 * IoU)
|
||||
print('-' * len(s))
|
||||
print(s)
|
||||
print("-" * len(s) + "\n")
|
||||
print('-' * len(s) + '\n')
|
||||
|
||||
# Save predictions
|
||||
print("Saving clouds")
|
||||
print('Saving clouds')
|
||||
t1 = time.time()
|
||||
for i, file_path in enumerate(test_loader.dataset.files):
|
||||
|
||||
# Get file
|
||||
points = test_loader.dataset.load_evaluation_points(file_path)
|
||||
|
||||
# Get the predicted labels
|
||||
preds = test_loader.dataset.label_values[
|
||||
np.argmax(proj_probs[i], axis=1)
|
||||
].astype(np.int32)
|
||||
preds = test_loader.dataset.label_values[np.argmax(proj_probs[i], axis=1)].astype(np.int32)
|
||||
|
||||
# Save plys
|
||||
cloud_name = file_path.split("/")[-1]
|
||||
test_name = join(test_path, "predictions", cloud_name)
|
||||
write_ply(test_name, [points, preds], ["x", "y", "z", "preds"])
|
||||
test_name2 = join(test_path, "probs", cloud_name)
|
||||
prob_names = [
|
||||
"_".join(test_loader.dataset.label_to_names[label].split())
|
||||
for label in test_loader.dataset.label_values
|
||||
]
|
||||
write_ply(
|
||||
test_name2,
|
||||
cloud_name = file_path.split('/')[-1]
|
||||
test_name = join(test_path, 'predictions', cloud_name)
|
||||
write_ply(test_name,
|
||||
[points, preds],
|
||||
['x', 'y', 'z', 'preds'])
|
||||
test_name2 = join(test_path, 'probs', cloud_name)
|
||||
prob_names = ['_'.join(test_loader.dataset.label_to_names[label].split())
|
||||
for label in test_loader.dataset.label_values]
|
||||
write_ply(test_name2,
|
||||
[points, proj_probs[i]],
|
||||
["x", "y", "z"] + prob_names,
|
||||
)
|
||||
['x', 'y', 'z'] + prob_names)
|
||||
|
||||
# Save potentials
|
||||
pot_points = np.array(
|
||||
test_loader.dataset.pot_trees[i].data, copy=False
|
||||
)
|
||||
pot_name = join(test_path, "potentials", cloud_name)
|
||||
pots = (
|
||||
test_loader.dataset.potentials[i].numpy().astype(np.float32)
|
||||
)
|
||||
write_ply(
|
||||
pot_name,
|
||||
pot_points = np.array(test_loader.dataset.pot_trees[i].data, copy=False)
|
||||
pot_name = join(test_path, 'potentials', cloud_name)
|
||||
pots = test_loader.dataset.potentials[i].numpy().astype(np.float32)
|
||||
write_ply(pot_name,
|
||||
[pot_points.astype(np.float32), pots],
|
||||
["x", "y", "z", "pots"],
|
||||
)
|
||||
['x', 'y', 'z', 'pots'])
|
||||
|
||||
# Save ascii preds
|
||||
if test_loader.dataset.set == "test":
|
||||
if test_loader.dataset.name.startswith("Semantic3D"):
|
||||
ascii_name = join(
|
||||
test_path,
|
||||
"predictions",
|
||||
test_loader.dataset.ascii_files[cloud_name],
|
||||
)
|
||||
if test_loader.dataset.set == 'test':
|
||||
if test_loader.dataset.name.startswith('Semantic3D'):
|
||||
ascii_name = join(test_path, 'predictions', test_loader.dataset.ascii_files[cloud_name])
|
||||
else:
|
||||
ascii_name = join(
|
||||
test_path, "predictions", cloud_name[:-4] + ".txt"
|
||||
)
|
||||
np.savetxt(ascii_name, preds, fmt="%d")
|
||||
ascii_name = join(test_path, 'predictions', cloud_name[:-4] + '.txt')
|
||||
np.savetxt(ascii_name, preds, fmt='%d')
|
||||
|
||||
t2 = time.time()
|
||||
print("Done in {:.1f} s\n".format(t2 - t1))
|
||||
print('Done in {:.1f} s\n'.format(t2 - t1))
|
||||
|
||||
test_epoch += 1
|
||||
|
||||
|
@ -515,9 +463,7 @@ class ModelTester:
|
|||
|
||||
return
|
||||
|
||||
def slam_segmentation_test(
|
||||
self, net, test_loader, config, num_votes=100, debug=True
|
||||
):
|
||||
def slam_segmentation_test(self, net, test_loader, config, num_votes=100, debug=True):
|
||||
"""
|
||||
Test method for slam segmentation models
|
||||
"""
|
||||
|
@ -539,31 +485,29 @@ class ModelTester:
|
|||
test_path = None
|
||||
report_path = None
|
||||
if config.saving:
|
||||
test_path = join("test", config.saving_path.split("/")[-1])
|
||||
test_path = join('test', config.saving_path.split('/')[-1])
|
||||
if not exists(test_path):
|
||||
makedirs(test_path)
|
||||
report_path = join(test_path, "reports")
|
||||
report_path = join(test_path, 'reports')
|
||||
if not exists(report_path):
|
||||
makedirs(report_path)
|
||||
|
||||
if test_loader.dataset.set == "validation":
|
||||
for folder in ["val_predictions", "val_probs"]:
|
||||
if test_loader.dataset.set == 'validation':
|
||||
for folder in ['val_predictions', 'val_probs']:
|
||||
if not exists(join(test_path, folder)):
|
||||
makedirs(join(test_path, folder))
|
||||
else:
|
||||
for folder in ["predictions", "probs"]:
|
||||
for folder in ['predictions', 'probs']:
|
||||
if not exists(join(test_path, folder)):
|
||||
makedirs(join(test_path, folder))
|
||||
|
||||
# Init validation container
|
||||
all_f_preds = []
|
||||
all_f_labels = []
|
||||
if test_loader.dataset.set == "validation":
|
||||
if test_loader.dataset.set == 'validation':
|
||||
for i, seq_frames in enumerate(test_loader.dataset.frames):
|
||||
all_f_preds.append([np.zeros((0,), dtype=np.int32) for _ in seq_frames])
|
||||
all_f_labels.append(
|
||||
[np.zeros((0,), dtype=np.int32) for _ in seq_frames]
|
||||
)
|
||||
all_f_labels.append([np.zeros((0,), dtype=np.int32) for _ in seq_frames])
|
||||
|
||||
#####################
|
||||
# Network predictions
|
||||
|
@ -579,16 +523,17 @@ class ModelTester:
|
|||
|
||||
# Start test loop
|
||||
while True:
|
||||
print("Initialize workers")
|
||||
print('Initialize workers')
|
||||
for i, batch in enumerate(test_loader):
|
||||
|
||||
# New time
|
||||
t = t[-1:]
|
||||
t += [time.time()]
|
||||
|
||||
if i == 0:
|
||||
print("Done in {:.1f}s".format(t[1] - t[0]))
|
||||
print('Done in {:.1f}s'.format(t[1] - t[0]))
|
||||
|
||||
if "cuda" in self.device.type:
|
||||
if 'cuda' in self.device.type:
|
||||
batch.to(self.device)
|
||||
|
||||
# Forward pass
|
||||
|
@ -610,8 +555,9 @@ class ModelTester:
|
|||
|
||||
i0 = 0
|
||||
for b_i, length in enumerate(lengths):
|
||||
|
||||
# Get prediction
|
||||
probs = stk_probs[i0 : i0 + length]
|
||||
probs = stk_probs[i0:i0 + length]
|
||||
proj_inds = r_inds_list[b_i]
|
||||
proj_mask = r_mask_list[b_i]
|
||||
frame_labels = labels_list[b_i]
|
||||
|
@ -627,151 +573,97 @@ class ModelTester:
|
|||
|
||||
# Save probs in a binary file (uint8 format for lighter weight)
|
||||
seq_name = test_loader.dataset.sequences[s_ind]
|
||||
if test_loader.dataset.set == "validation":
|
||||
folder = "val_probs"
|
||||
pred_folder = "val_predictions"
|
||||
if test_loader.dataset.set == 'validation':
|
||||
folder = 'val_probs'
|
||||
pred_folder = 'val_predictions'
|
||||
else:
|
||||
folder = "probs"
|
||||
pred_folder = "predictions"
|
||||
filename = "{:s}_{:07d}.npy".format(seq_name, f_ind)
|
||||
folder = 'probs'
|
||||
pred_folder = 'predictions'
|
||||
filename = '{:s}_{:07d}.npy'.format(seq_name, f_ind)
|
||||
filepath = join(test_path, folder, filename)
|
||||
if exists(filepath):
|
||||
frame_probs_uint8 = np.load(filepath)
|
||||
else:
|
||||
frame_probs_uint8 = np.zeros(
|
||||
(proj_mask.shape[0], nc_model), dtype=np.uint8
|
||||
)
|
||||
frame_probs = (
|
||||
frame_probs_uint8[proj_mask, :].astype(np.float32) / 255
|
||||
)
|
||||
frame_probs = (
|
||||
test_smooth * frame_probs + (1 - test_smooth) * proj_probs
|
||||
)
|
||||
frame_probs_uint8[proj_mask, :] = (frame_probs * 255).astype(
|
||||
np.uint8
|
||||
)
|
||||
frame_probs_uint8 = np.zeros((proj_mask.shape[0], nc_model), dtype=np.uint8)
|
||||
frame_probs = frame_probs_uint8[proj_mask, :].astype(np.float32) / 255
|
||||
frame_probs = test_smooth * frame_probs + (1 - test_smooth) * proj_probs
|
||||
frame_probs_uint8[proj_mask, :] = (frame_probs * 255).astype(np.uint8)
|
||||
np.save(filepath, frame_probs_uint8)
|
||||
|
||||
# Save some prediction in ply format for visual
|
||||
if test_loader.dataset.set == "validation":
|
||||
if test_loader.dataset.set == 'validation':
|
||||
|
||||
# Insert false columns for ignored labels
|
||||
frame_probs_uint8_bis = frame_probs_uint8.copy()
|
||||
for l_ind, label_value in enumerate(
|
||||
test_loader.dataset.label_values
|
||||
):
|
||||
for l_ind, label_value in enumerate(test_loader.dataset.label_values):
|
||||
if label_value in test_loader.dataset.ignored_labels:
|
||||
frame_probs_uint8_bis = np.insert(
|
||||
frame_probs_uint8_bis, l_ind, 0, axis=1
|
||||
)
|
||||
frame_probs_uint8_bis = np.insert(frame_probs_uint8_bis, l_ind, 0, axis=1)
|
||||
|
||||
# Predicted labels
|
||||
frame_preds = test_loader.dataset.label_values[
|
||||
np.argmax(frame_probs_uint8_bis, axis=1)
|
||||
].astype(np.int32)
|
||||
frame_preds = test_loader.dataset.label_values[np.argmax(frame_probs_uint8_bis,
|
||||
axis=1)].astype(np.int32)
|
||||
|
||||
# Save some of the frame pots
|
||||
if f_ind % 20 == 0:
|
||||
seq_path = join(
|
||||
test_loader.dataset.path,
|
||||
"sequences",
|
||||
test_loader.dataset.sequences[s_ind],
|
||||
)
|
||||
velo_file = join(
|
||||
seq_path,
|
||||
"velodyne",
|
||||
test_loader.dataset.frames[s_ind][f_ind] + ".bin",
|
||||
)
|
||||
seq_path = join(test_loader.dataset.path, 'sequences', test_loader.dataset.sequences[s_ind])
|
||||
velo_file = join(seq_path, 'velodyne', test_loader.dataset.frames[s_ind][f_ind] + '.bin')
|
||||
frame_points = np.fromfile(velo_file, dtype=np.float32)
|
||||
frame_points = frame_points.reshape((-1, 4))
|
||||
predpath = join(
|
||||
test_path, pred_folder, filename[:-4] + ".ply"
|
||||
)
|
||||
# pots = test_loader.dataset.f_potentials[s_ind][f_ind]
|
||||
predpath = join(test_path, pred_folder, filename[:-4] + '.ply')
|
||||
#pots = test_loader.dataset.f_potentials[s_ind][f_ind]
|
||||
pots = np.zeros((0,))
|
||||
if pots.shape[0] > 0:
|
||||
write_ply(
|
||||
predpath,
|
||||
[
|
||||
frame_points[:, :3],
|
||||
frame_labels,
|
||||
frame_preds,
|
||||
pots,
|
||||
],
|
||||
["x", "y", "z", "gt", "pre", "pots"],
|
||||
)
|
||||
write_ply(predpath,
|
||||
[frame_points[:, :3], frame_labels, frame_preds, pots],
|
||||
['x', 'y', 'z', 'gt', 'pre', 'pots'])
|
||||
else:
|
||||
write_ply(
|
||||
predpath,
|
||||
write_ply(predpath,
|
||||
[frame_points[:, :3], frame_labels, frame_preds],
|
||||
["x", "y", "z", "gt", "pre"],
|
||||
)
|
||||
['x', 'y', 'z', 'gt', 'pre'])
|
||||
|
||||
# Also Save lbl probabilities
|
||||
probpath = join(
|
||||
test_path, folder, filename[:-4] + "_probs.ply"
|
||||
)
|
||||
lbl_names = [
|
||||
test_loader.dataset.label_to_names[l]
|
||||
probpath = join(test_path, folder, filename[:-4] + '_probs.ply')
|
||||
lbl_names = [test_loader.dataset.label_to_names[l]
|
||||
for l in test_loader.dataset.label_values
|
||||
if l not in test_loader.dataset.ignored_labels
|
||||
]
|
||||
write_ply(
|
||||
probpath,
|
||||
if l not in test_loader.dataset.ignored_labels]
|
||||
write_ply(probpath,
|
||||
[frame_points[:, :3], frame_probs_uint8],
|
||||
["x", "y", "z"] + lbl_names,
|
||||
)
|
||||
['x', 'y', 'z'] + lbl_names)
|
||||
|
||||
# keep frame preds in memory
|
||||
all_f_preds[s_ind][f_ind] = frame_preds
|
||||
all_f_labels[s_ind][f_ind] = frame_labels
|
||||
|
||||
else:
|
||||
|
||||
# Save some of the frame preds
|
||||
if f_inds[b_i, 1] % 100 == 0:
|
||||
|
||||
# Insert false columns for ignored labels
|
||||
for l_ind, label_value in enumerate(
|
||||
test_loader.dataset.label_values
|
||||
):
|
||||
for l_ind, label_value in enumerate(test_loader.dataset.label_values):
|
||||
if label_value in test_loader.dataset.ignored_labels:
|
||||
frame_probs_uint8 = np.insert(
|
||||
frame_probs_uint8, l_ind, 0, axis=1
|
||||
)
|
||||
frame_probs_uint8 = np.insert(frame_probs_uint8, l_ind, 0, axis=1)
|
||||
|
||||
# Predicted labels
|
||||
frame_preds = test_loader.dataset.label_values[
|
||||
np.argmax(frame_probs_uint8, axis=1)
|
||||
].astype(np.int32)
|
||||
frame_preds = test_loader.dataset.label_values[np.argmax(frame_probs_uint8,
|
||||
axis=1)].astype(np.int32)
|
||||
|
||||
# Load points
|
||||
seq_path = join(
|
||||
test_loader.dataset.path,
|
||||
"sequences",
|
||||
test_loader.dataset.sequences[s_ind],
|
||||
)
|
||||
velo_file = join(
|
||||
seq_path,
|
||||
"velodyne",
|
||||
test_loader.dataset.frames[s_ind][f_ind] + ".bin",
|
||||
)
|
||||
seq_path = join(test_loader.dataset.path, 'sequences', test_loader.dataset.sequences[s_ind])
|
||||
velo_file = join(seq_path, 'velodyne', test_loader.dataset.frames[s_ind][f_ind] + '.bin')
|
||||
frame_points = np.fromfile(velo_file, dtype=np.float32)
|
||||
frame_points = frame_points.reshape((-1, 4))
|
||||
predpath = join(
|
||||
test_path, pred_folder, filename[:-4] + ".ply"
|
||||
)
|
||||
# pots = test_loader.dataset.f_potentials[s_ind][f_ind]
|
||||
predpath = join(test_path, pred_folder, filename[:-4] + '.ply')
|
||||
#pots = test_loader.dataset.f_potentials[s_ind][f_ind]
|
||||
pots = np.zeros((0,))
|
||||
if pots.shape[0] > 0:
|
||||
write_ply(
|
||||
predpath,
|
||||
write_ply(predpath,
|
||||
[frame_points[:, :3], frame_preds, pots],
|
||||
["x", "y", "z", "pre", "pots"],
|
||||
)
|
||||
['x', 'y', 'z', 'pre', 'pots'])
|
||||
else:
|
||||
write_ply(
|
||||
predpath,
|
||||
write_ply(predpath,
|
||||
[frame_points[:, :3], frame_preds],
|
||||
["x", "y", "z", "pre"],
|
||||
)
|
||||
['x', 'y', 'z', 'pre'])
|
||||
|
||||
# Stack all prediction for this epoch
|
||||
i0 += length
|
||||
|
@ -783,45 +675,30 @@ class ModelTester:
|
|||
# Display
|
||||
if (t[-1] - last_display) > 1.0:
|
||||
last_display = t[-1]
|
||||
message = "e{:03d}-i{:04d} => {:.1f}% (timings : {:4.2f} {:4.2f} {:4.2f}) / pots {:d} => {:.1f}%"
|
||||
min_pot = int(
|
||||
torch.floor(torch.min(test_loader.dataset.potentials))
|
||||
)
|
||||
pot_num = (
|
||||
torch.sum(test_loader.dataset.potentials > min_pot + 0.5)
|
||||
.type(torch.int32)
|
||||
.item()
|
||||
)
|
||||
current_num = (
|
||||
pot_num
|
||||
+ (i + 1 - config.validation_size) * config.val_batch_num
|
||||
)
|
||||
print(
|
||||
message.format(
|
||||
test_epoch,
|
||||
i,
|
||||
message = 'e{:03d}-i{:04d} => {:.1f}% (timings : {:4.2f} {:4.2f} {:4.2f}) / pots {:d} => {:.1f}%'
|
||||
min_pot = int(torch.floor(torch.min(test_loader.dataset.potentials)))
|
||||
pot_num = torch.sum(test_loader.dataset.potentials > min_pot + 0.5).type(torch.int32).item()
|
||||
current_num = pot_num + (i + 1 - config.validation_size) * config.val_batch_num
|
||||
print(message.format(test_epoch, i,
|
||||
100 * i / config.validation_size,
|
||||
1000 * (mean_dt[0]),
|
||||
1000 * (mean_dt[1]),
|
||||
1000 * (mean_dt[2]),
|
||||
min_pot,
|
||||
100.0 * current_num / len(test_loader.dataset.potentials),
|
||||
)
|
||||
)
|
||||
100.0 * current_num / len(test_loader.dataset.potentials)))
|
||||
|
||||
|
||||
# Update minimum od potentials
|
||||
new_min = torch.min(test_loader.dataset.potentials)
|
||||
print(
|
||||
"Test epoch {:d}, end. Min potential = {:.1f}".format(
|
||||
test_epoch, new_min
|
||||
)
|
||||
)
|
||||
print('Test epoch {:d}, end. Min potential = {:.1f}'.format(test_epoch, new_min))
|
||||
|
||||
if last_min + 1 < new_min:
|
||||
|
||||
# Update last_min
|
||||
last_min += 1
|
||||
|
||||
if test_loader.dataset.set == "validation" and last_min % 1 == 0:
|
||||
if test_loader.dataset.set == 'validation' and last_min % 1 == 0:
|
||||
|
||||
#####################################
|
||||
# Results on the whole validation set
|
||||
#####################################
|
||||
|
@ -829,13 +706,13 @@ class ModelTester:
|
|||
# Confusions for our subparts of validation set
|
||||
Confs = np.zeros((len(predictions), nc_tot, nc_tot), dtype=np.int32)
|
||||
for i, (preds, truth) in enumerate(zip(predictions, targets)):
|
||||
|
||||
# Confusions
|
||||
Confs[i, :, :] = fast_confusion(
|
||||
truth, preds, test_loader.dataset.label_values
|
||||
).astype(np.int32)
|
||||
Confs[i, :, :] = fast_confusion(truth, preds, test_loader.dataset.label_values).astype(np.int32)
|
||||
|
||||
|
||||
# Show vote results
|
||||
print("\nCompute confusion")
|
||||
print('\nCompute confusion')
|
||||
|
||||
val_preds = []
|
||||
val_labels = []
|
||||
|
@ -846,25 +723,21 @@ class ModelTester:
|
|||
val_preds = np.hstack(val_preds)
|
||||
val_labels = np.hstack(val_labels)
|
||||
t2 = time.time()
|
||||
C_tot = fast_confusion(
|
||||
val_labels, val_preds, test_loader.dataset.label_values
|
||||
)
|
||||
C_tot = fast_confusion(val_labels, val_preds, test_loader.dataset.label_values)
|
||||
t3 = time.time()
|
||||
print(" Stacking time : {:.1f}s".format(t2 - t1))
|
||||
print("Confusion time : {:.1f}s".format(t3 - t2))
|
||||
print(' Stacking time : {:.1f}s'.format(t2 - t1))
|
||||
print('Confusion time : {:.1f}s'.format(t3 - t2))
|
||||
|
||||
s1 = "\n"
|
||||
s1 = '\n'
|
||||
for cc in C_tot:
|
||||
for c in cc:
|
||||
s1 += "{:7.0f} ".format(c)
|
||||
s1 += "\n"
|
||||
s1 += '{:7.0f} '.format(c)
|
||||
s1 += '\n'
|
||||
if debug:
|
||||
print(s1)
|
||||
|
||||
# Remove ignored labels from confusions
|
||||
for l_ind, label_value in reversed(
|
||||
list(enumerate(test_loader.dataset.label_values))
|
||||
):
|
||||
for l_ind, label_value in reversed(list(enumerate(test_loader.dataset.label_values))):
|
||||
if label_value in test_loader.dataset.ignored_labels:
|
||||
C_tot = np.delete(C_tot, l_ind, axis=0)
|
||||
C_tot = np.delete(C_tot, l_ind, axis=1)
|
||||
|
@ -874,23 +747,21 @@ class ModelTester:
|
|||
|
||||
# Compute IoUs
|
||||
mIoU = np.mean(val_IoUs)
|
||||
s2 = "{:5.2f} | ".format(100 * mIoU)
|
||||
s2 = '{:5.2f} | '.format(100 * mIoU)
|
||||
for IoU in val_IoUs:
|
||||
s2 += "{:5.2f} ".format(100 * IoU)
|
||||
print(s2 + "\n")
|
||||
s2 += '{:5.2f} '.format(100 * IoU)
|
||||
print(s2 + '\n')
|
||||
|
||||
# Save a report
|
||||
report_file = join(
|
||||
report_path, "report_{:04d}.txt".format(int(np.floor(last_min)))
|
||||
)
|
||||
str = "Report of the confusion and metrics\n"
|
||||
str += "***********************************\n\n\n"
|
||||
str += "Confusion matrix:\n\n"
|
||||
report_file = join(report_path, 'report_{:04d}.txt'.format(int(np.floor(last_min))))
|
||||
str = 'Report of the confusion and metrics\n'
|
||||
str += '***********************************\n\n\n'
|
||||
str += 'Confusion matrix:\n\n'
|
||||
str += s1
|
||||
str += "\nIoU values:\n\n"
|
||||
str += '\nIoU values:\n\n'
|
||||
str += s2
|
||||
str += "\n\n"
|
||||
with open(report_file, "w") as f:
|
||||
str += '\n\n'
|
||||
with open(report_file, 'w') as f:
|
||||
f.write(str)
|
||||
|
||||
test_epoch += 1
|
||||
|
@ -900,3 +771,28 @@ class ModelTester:
|
|||
break
|
||||
|
||||
return
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
|
419
utils/trainer.py
419
utils/trainer.py
|
@ -24,17 +24,24 @@
|
|||
|
||||
# Basic libs
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
import numpy as np
|
||||
import pickle
|
||||
import os
|
||||
from os import makedirs, remove
|
||||
from os.path import exists, join
|
||||
import time
|
||||
import sys
|
||||
|
||||
# PLY reader
|
||||
from utils.ply import write_ply
|
||||
from utils.ply import read_ply, write_ply
|
||||
|
||||
# Metrics
|
||||
from utils.metrics import IoU_from_confusions, fast_confusion
|
||||
from utils.config import Config
|
||||
from sklearn.neighbors import KDTree
|
||||
|
||||
from models.blocks import KPConv
|
||||
|
||||
|
||||
# ----------------------------------------------------------------------------------------------------------------------
|
||||
|
@ -45,6 +52,7 @@ from utils.config import Config
|
|||
|
||||
|
||||
class ModelTrainer:
|
||||
|
||||
# Initialization methods
|
||||
# ------------------------------------------------------------------------------------------------------------------
|
||||
|
||||
|
@ -67,15 +75,14 @@ class ModelTrainer:
|
|||
self.step = 0
|
||||
|
||||
# Optimizer with specific learning rate for deformable KPConv
|
||||
deform_params = [v for k, v in net.named_parameters() if "offset" in k]
|
||||
other_params = [v for k, v in net.named_parameters() if "offset" not in k]
|
||||
deform_params = [v for k, v in net.named_parameters() if 'offset' in k]
|
||||
other_params = [v for k, v in net.named_parameters() if 'offset' not in k]
|
||||
deform_lr = config.learning_rate * config.deform_lr_factor
|
||||
self.optimizer = torch.optim.SGD(
|
||||
[{"params": other_params}, {"params": deform_params, "lr": deform_lr}],
|
||||
self.optimizer = torch.optim.SGD([{'params': other_params},
|
||||
{'params': deform_params, 'lr': deform_lr}],
|
||||
lr=config.learning_rate,
|
||||
momentum=config.momentum,
|
||||
weight_decay=config.weight_decay,
|
||||
)
|
||||
weight_decay=config.weight_decay)
|
||||
|
||||
# Choose to train on CPU or GPU
|
||||
if on_gpu and torch.cuda.is_available():
|
||||
|
@ -88,26 +95,24 @@ class ModelTrainer:
|
|||
# Load previous checkpoint
|
||||
##########################
|
||||
|
||||
if chkp_path is not None:
|
||||
if (chkp_path is not None):
|
||||
if finetune:
|
||||
checkpoint = torch.load(chkp_path)
|
||||
net.load_state_dict(checkpoint["model_state_dict"])
|
||||
net.load_state_dict(checkpoint['model_state_dict'])
|
||||
net.train()
|
||||
print("Model restored and ready for finetuning.")
|
||||
else:
|
||||
checkpoint = torch.load(chkp_path)
|
||||
net.load_state_dict(checkpoint["model_state_dict"])
|
||||
self.optimizer.load_state_dict(checkpoint["optimizer_state_dict"])
|
||||
self.epoch = checkpoint["epoch"]
|
||||
net.load_state_dict(checkpoint['model_state_dict'])
|
||||
self.optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
|
||||
self.epoch = checkpoint['epoch']
|
||||
net.train()
|
||||
print("Model and training state restored.")
|
||||
|
||||
# Path of the result folder
|
||||
if config.saving:
|
||||
if config.saving_path is None:
|
||||
config.saving_path = time.strftime(
|
||||
"results/Log_%Y-%m-%d_%H-%M-%S", time.gmtime()
|
||||
)
|
||||
config.saving_path = time.strftime('results/Log_%Y-%m-%d_%H-%M-%S', time.gmtime())
|
||||
if not exists(config.saving_path):
|
||||
makedirs(config.saving_path)
|
||||
config.save()
|
||||
|
@ -128,17 +133,17 @@ class ModelTrainer:
|
|||
|
||||
if config.saving:
|
||||
# Training log file
|
||||
with open(join(config.saving_path, "training.txt"), "w") as file:
|
||||
file.write("epochs steps out_loss offset_loss train_accuracy time\n")
|
||||
with open(join(config.saving_path, 'training.txt'), "w") as file:
|
||||
file.write('epochs steps out_loss offset_loss train_accuracy time\n')
|
||||
|
||||
# Killing file (simply delete this file when you want to stop the training)
|
||||
PID_file = join(config.saving_path, "running_PID.txt")
|
||||
PID_file = join(config.saving_path, 'running_PID.txt')
|
||||
if not exists(PID_file):
|
||||
with open(PID_file, "w") as file:
|
||||
file.write("Launched with PyCharm")
|
||||
file.write('Launched with PyCharm')
|
||||
|
||||
# Checkpoints directory
|
||||
checkpoint_directory = join(config.saving_path, "checkpoints")
|
||||
checkpoint_directory = join(config.saving_path, 'checkpoints')
|
||||
if not exists(checkpoint_directory):
|
||||
makedirs(checkpoint_directory)
|
||||
else:
|
||||
|
@ -153,12 +158,14 @@ class ModelTrainer:
|
|||
|
||||
# Start training loop
|
||||
for epoch in range(config.max_epoch):
|
||||
|
||||
# Remove File for kill signal
|
||||
if epoch == config.max_epoch - 1 and exists(PID_file):
|
||||
remove(PID_file)
|
||||
|
||||
self.step = 0
|
||||
for batch in training_loader:
|
||||
|
||||
# Check kill signal (running_PID.txt deleted)
|
||||
if config.saving and not exists(PID_file):
|
||||
continue
|
||||
|
@ -171,7 +178,7 @@ class ModelTrainer:
|
|||
t = t[-1:]
|
||||
t += [time.time()]
|
||||
|
||||
if "cuda" in self.device.type:
|
||||
if 'cuda' in self.device.type:
|
||||
batch.to(self.device)
|
||||
|
||||
# zero the parameter gradients
|
||||
|
@ -188,12 +195,11 @@ class ModelTrainer:
|
|||
loss.backward()
|
||||
|
||||
if config.grad_clip_norm > 0:
|
||||
# torch.nn.utils.clip_grad_norm_(net.parameters(), config.grad_clip_norm)
|
||||
torch.nn.utils.clip_grad_value_(
|
||||
net.parameters(), config.grad_clip_norm
|
||||
)
|
||||
#torch.nn.utils.clip_grad_norm_(net.parameters(), config.grad_clip_norm)
|
||||
torch.nn.utils.clip_grad_value_(net.parameters(), config.grad_clip_norm)
|
||||
self.optimizer.step()
|
||||
|
||||
|
||||
torch.cuda.empty_cache()
|
||||
torch.cuda.synchronize(self.device)
|
||||
|
||||
|
@ -208,33 +214,25 @@ class ModelTrainer:
|
|||
# Console display (only one per second)
|
||||
if (t[-1] - last_display) > 1.0:
|
||||
last_display = t[-1]
|
||||
message = "e{:03d}-i{:04d} => L={:.3f} acc={:3.0f}% / t(ms): {:5.1f} {:5.1f} {:5.1f})"
|
||||
print(
|
||||
message.format(
|
||||
self.epoch,
|
||||
self.step,
|
||||
message = 'e{:03d}-i{:04d} => L={:.3f} acc={:3.0f}% / t(ms): {:5.1f} {:5.1f} {:5.1f})'
|
||||
print(message.format(self.epoch, self.step,
|
||||
loss.item(),
|
||||
100 * acc,
|
||||
100*acc,
|
||||
1000 * mean_dt[0],
|
||||
1000 * mean_dt[1],
|
||||
1000 * mean_dt[2],
|
||||
)
|
||||
)
|
||||
1000 * mean_dt[2]))
|
||||
|
||||
# Log file
|
||||
if config.saving:
|
||||
with open(join(config.saving_path, "training.txt"), "a") as file:
|
||||
message = "{:d} {:d} {:.3f} {:.3f} {:.3f} {:.3f}\n"
|
||||
file.write(
|
||||
message.format(
|
||||
self.epoch,
|
||||
with open(join(config.saving_path, 'training.txt'), "a") as file:
|
||||
message = '{:d} {:d} {:.3f} {:.3f} {:.3f} {:.3f}\n'
|
||||
file.write(message.format(self.epoch,
|
||||
self.step,
|
||||
net.output_loss,
|
||||
net.reg_loss,
|
||||
acc,
|
||||
t[-1] - t0,
|
||||
)
|
||||
)
|
||||
t[-1] - t0))
|
||||
|
||||
|
||||
self.step += 1
|
||||
|
||||
|
@ -249,7 +247,7 @@ class ModelTrainer:
|
|||
# Update learning rate
|
||||
if self.epoch in config.lr_decays:
|
||||
for param_group in self.optimizer.param_groups:
|
||||
param_group["lr"] *= config.lr_decays[self.epoch]
|
||||
param_group['lr'] *= config.lr_decays[self.epoch]
|
||||
|
||||
# Update epoch
|
||||
self.epoch += 1
|
||||
|
@ -257,22 +255,18 @@ class ModelTrainer:
|
|||
# Saving
|
||||
if config.saving:
|
||||
# Get current state dict
|
||||
save_dict = {
|
||||
"epoch": self.epoch,
|
||||
"model_state_dict": net.state_dict(),
|
||||
"optimizer_state_dict": self.optimizer.state_dict(),
|
||||
"saving_path": config.saving_path,
|
||||
}
|
||||
save_dict = {'epoch': self.epoch,
|
||||
'model_state_dict': net.state_dict(),
|
||||
'optimizer_state_dict': self.optimizer.state_dict(),
|
||||
'saving_path': config.saving_path}
|
||||
|
||||
# Save current state of the network (for restoring purposes)
|
||||
checkpoint_path = join(checkpoint_directory, "current_chkp.tar")
|
||||
checkpoint_path = join(checkpoint_directory, 'current_chkp.tar')
|
||||
torch.save(save_dict, checkpoint_path)
|
||||
|
||||
# Save checkpoints occasionally
|
||||
if (self.epoch + 1) % config.checkpoint_gap == 0:
|
||||
checkpoint_path = join(
|
||||
checkpoint_directory, "chkp_{:04d}.tar".format(self.epoch + 1)
|
||||
)
|
||||
checkpoint_path = join(checkpoint_directory, 'chkp_{:04d}.tar'.format(self.epoch + 1))
|
||||
torch.save(save_dict, checkpoint_path)
|
||||
|
||||
# Validation
|
||||
|
@ -280,23 +274,24 @@ class ModelTrainer:
|
|||
self.validation(net, val_loader, config)
|
||||
net.train()
|
||||
|
||||
print("Finished Training")
|
||||
print('Finished Training')
|
||||
return
|
||||
|
||||
# Validation methods
|
||||
# ------------------------------------------------------------------------------------------------------------------
|
||||
|
||||
def validation(self, net, val_loader, config: Config):
|
||||
if config.dataset_task == "classification":
|
||||
|
||||
if config.dataset_task == 'classification':
|
||||
self.object_classification_validation(net, val_loader, config)
|
||||
elif config.dataset_task == "segmentation":
|
||||
elif config.dataset_task == 'segmentation':
|
||||
self.object_segmentation_validation(net, val_loader, config)
|
||||
elif config.dataset_task == "cloud_segmentation":
|
||||
elif config.dataset_task == 'cloud_segmentation':
|
||||
self.cloud_segmentation_validation(net, val_loader, config)
|
||||
elif config.dataset_task == "slam_segmentation":
|
||||
elif config.dataset_task == 'slam_segmentation':
|
||||
self.slam_segmentation_validation(net, val_loader, config)
|
||||
else:
|
||||
raise ValueError("No validation method implemented for this network type")
|
||||
raise ValueError('No validation method implemented for this network type')
|
||||
|
||||
def object_classification_validation(self, net, val_loader, config):
|
||||
"""
|
||||
|
@ -318,7 +313,7 @@ class ModelTrainer:
|
|||
softmax = torch.nn.Softmax(1)
|
||||
|
||||
# Initialize global prediction over all models
|
||||
if not hasattr(self, "val_probs"):
|
||||
if not hasattr(self, 'val_probs'):
|
||||
self.val_probs = np.zeros((val_loader.dataset.num_models, nc_model))
|
||||
|
||||
#####################
|
||||
|
@ -335,11 +330,12 @@ class ModelTrainer:
|
|||
|
||||
# Start validation loop
|
||||
for batch in val_loader:
|
||||
|
||||
# New time
|
||||
t = t[-1:]
|
||||
t += [time.time()]
|
||||
|
||||
if "cuda" in self.device.type:
|
||||
if 'cuda' in self.device.type:
|
||||
batch.to(self.device)
|
||||
|
||||
# Forward pass
|
||||
|
@ -358,14 +354,10 @@ class ModelTrainer:
|
|||
# Display
|
||||
if (t[-1] - last_display) > 1.0:
|
||||
last_display = t[-1]
|
||||
message = "Validation : {:.1f}% (timings : {:4.2f} {:4.2f})"
|
||||
print(
|
||||
message.format(
|
||||
100 * len(obj_inds) / config.validation_size,
|
||||
message = 'Validation : {:.1f}% (timings : {:4.2f} {:4.2f})'
|
||||
print(message.format(100 * len(obj_inds) / config.validation_size,
|
||||
1000 * (mean_dt[0]),
|
||||
1000 * (mean_dt[1]),
|
||||
)
|
||||
)
|
||||
1000 * (mean_dt[1])))
|
||||
|
||||
# Stack all validation predictions
|
||||
probs = np.vstack(probs)
|
||||
|
@ -376,9 +368,7 @@ class ModelTrainer:
|
|||
# Voting validation
|
||||
###################
|
||||
|
||||
self.val_probs[obj_inds] = (
|
||||
val_smooth * self.val_probs[obj_inds] + (1 - val_smooth) * probs
|
||||
)
|
||||
self.val_probs[obj_inds] = val_smooth * self.val_probs[obj_inds] + (1-val_smooth) * probs
|
||||
|
||||
############
|
||||
# Confusions
|
||||
|
@ -387,38 +377,39 @@ class ModelTrainer:
|
|||
validation_labels = np.array(val_loader.dataset.label_values)
|
||||
|
||||
# Compute classification results
|
||||
C1 = fast_confusion(targets, np.argmax(probs, axis=1), validation_labels)
|
||||
C1 = fast_confusion(targets,
|
||||
np.argmax(probs, axis=1),
|
||||
validation_labels)
|
||||
|
||||
# Compute votes confusion
|
||||
C2 = fast_confusion(
|
||||
val_loader.dataset.input_labels,
|
||||
C2 = fast_confusion(val_loader.dataset.input_labels,
|
||||
np.argmax(self.val_probs, axis=1),
|
||||
validation_labels,
|
||||
)
|
||||
validation_labels)
|
||||
|
||||
|
||||
# Saving (optionnal)
|
||||
if config.saving:
|
||||
print("Save confusions")
|
||||
conf_list = [C1, C2]
|
||||
file_list = ["val_confs.txt", "vote_confs.txt"]
|
||||
file_list = ['val_confs.txt', 'vote_confs.txt']
|
||||
for conf, conf_file in zip(conf_list, file_list):
|
||||
test_file = join(config.saving_path, conf_file)
|
||||
if exists(test_file):
|
||||
with open(test_file, "a") as text_file:
|
||||
for line in conf:
|
||||
for value in line:
|
||||
text_file.write("%d " % value)
|
||||
text_file.write("\n")
|
||||
text_file.write('%d ' % value)
|
||||
text_file.write('\n')
|
||||
else:
|
||||
with open(test_file, "w") as text_file:
|
||||
for line in conf:
|
||||
for value in line:
|
||||
text_file.write("%d " % value)
|
||||
text_file.write("\n")
|
||||
text_file.write('%d ' % value)
|
||||
text_file.write('\n')
|
||||
|
||||
val_ACC = 100 * np.sum(np.diag(C1)) / (np.sum(C1) + 1e-6)
|
||||
vote_ACC = 100 * np.sum(np.diag(C2)) / (np.sum(C2) + 1e-6)
|
||||
print("Accuracies : val = {:.1f}% / vote = {:.1f}%".format(val_ACC, vote_ACC))
|
||||
print('Accuracies : val = {:.1f}% / vote = {:.1f}%'.format(val_ACC, vote_ACC))
|
||||
|
||||
return C1
|
||||
|
||||
|
@ -447,25 +438,19 @@ class ModelTrainer:
|
|||
# Number of classes predicted by the model
|
||||
nc_model = config.num_classes
|
||||
|
||||
# print(nc_tot)
|
||||
# print(nc_model)
|
||||
#print(nc_tot)
|
||||
#print(nc_model)
|
||||
|
||||
# Initiate global prediction over validation clouds
|
||||
if not hasattr(self, "validation_probs"):
|
||||
self.validation_probs = [
|
||||
np.zeros((l.shape[0], nc_model))
|
||||
for l in val_loader.dataset.input_labels
|
||||
]
|
||||
if not hasattr(self, 'validation_probs'):
|
||||
self.validation_probs = [np.zeros((l.shape[0], nc_model))
|
||||
for l in val_loader.dataset.input_labels]
|
||||
self.val_proportions = np.zeros(nc_model, dtype=np.float32)
|
||||
i = 0
|
||||
for label_value in val_loader.dataset.label_values:
|
||||
if label_value not in val_loader.dataset.ignored_labels:
|
||||
self.val_proportions[i] = np.sum(
|
||||
[
|
||||
np.sum(labels == label_value)
|
||||
for labels in val_loader.dataset.validation_labels
|
||||
]
|
||||
)
|
||||
self.val_proportions[i] = np.sum([np.sum(labels == label_value)
|
||||
for labels in val_loader.dataset.validation_labels])
|
||||
i += 1
|
||||
|
||||
#####################
|
||||
|
@ -479,15 +464,17 @@ class ModelTrainer:
|
|||
last_display = time.time()
|
||||
mean_dt = np.zeros(1)
|
||||
|
||||
|
||||
t1 = time.time()
|
||||
|
||||
# Start validation loop
|
||||
for i, batch in enumerate(val_loader):
|
||||
|
||||
# New time
|
||||
t = t[-1:]
|
||||
t += [time.time()]
|
||||
|
||||
if "cuda" in self.device.type:
|
||||
if 'cuda' in self.device.type:
|
||||
batch.to(self.device)
|
||||
|
||||
# Forward pass
|
||||
|
@ -506,17 +493,16 @@ class ModelTrainer:
|
|||
|
||||
i0 = 0
|
||||
for b_i, length in enumerate(lengths):
|
||||
|
||||
# Get prediction
|
||||
target = labels[i0 : i0 + length]
|
||||
probs = stacked_probs[i0 : i0 + length]
|
||||
inds = in_inds[i0 : i0 + length]
|
||||
target = labels[i0:i0 + length]
|
||||
probs = stacked_probs[i0:i0 + length]
|
||||
inds = in_inds[i0:i0 + length]
|
||||
c_i = cloud_inds[b_i]
|
||||
|
||||
# Update current probs in whole cloud
|
||||
self.validation_probs[c_i][inds] = (
|
||||
val_smooth * self.validation_probs[c_i][inds]
|
||||
self.validation_probs[c_i][inds] = val_smooth * self.validation_probs[c_i][inds] \
|
||||
+ (1 - val_smooth) * probs
|
||||
)
|
||||
|
||||
# Stack all prediction for this epoch
|
||||
predictions.append(probs)
|
||||
|
@ -530,20 +516,17 @@ class ModelTrainer:
|
|||
# Display
|
||||
if (t[-1] - last_display) > 1.0:
|
||||
last_display = t[-1]
|
||||
message = "Validation : {:.1f}% (timings : {:4.2f} {:4.2f})"
|
||||
print(
|
||||
message.format(
|
||||
100 * i / config.validation_size,
|
||||
message = 'Validation : {:.1f}% (timings : {:4.2f} {:4.2f})'
|
||||
print(message.format(100 * i / config.validation_size,
|
||||
1000 * (mean_dt[0]),
|
||||
1000 * (mean_dt[1]),
|
||||
)
|
||||
)
|
||||
1000 * (mean_dt[1])))
|
||||
|
||||
t2 = time.time()
|
||||
|
||||
# Confusions for our subparts of validation set
|
||||
Confs = np.zeros((len(predictions), nc_tot, nc_tot), dtype=np.int32)
|
||||
for i, (probs, truth) in enumerate(zip(predictions, targets)):
|
||||
|
||||
# Insert false columns for ignored labels
|
||||
for l_ind, label_value in enumerate(val_loader.dataset.label_values):
|
||||
if label_value in val_loader.dataset.ignored_labels:
|
||||
|
@ -553,9 +536,8 @@ class ModelTrainer:
|
|||
preds = val_loader.dataset.label_values[np.argmax(probs, axis=1)]
|
||||
|
||||
# Confusions
|
||||
Confs[i, :, :] = fast_confusion(
|
||||
truth, preds, val_loader.dataset.label_values
|
||||
).astype(np.int32)
|
||||
Confs[i, :, :] = fast_confusion(truth, preds, val_loader.dataset.label_values).astype(np.int32)
|
||||
|
||||
|
||||
t3 = time.time()
|
||||
|
||||
|
@ -563,9 +545,7 @@ class ModelTrainer:
|
|||
C = np.sum(Confs, axis=0).astype(np.float32)
|
||||
|
||||
# Remove ignored labels from confusions
|
||||
for l_ind, label_value in reversed(
|
||||
list(enumerate(val_loader.dataset.label_values))
|
||||
):
|
||||
for l_ind, label_value in reversed(list(enumerate(val_loader.dataset.label_values))):
|
||||
if label_value in val_loader.dataset.ignored_labels:
|
||||
C = np.delete(C, l_ind, axis=0)
|
||||
C = np.delete(C, l_ind, axis=1)
|
||||
|
@ -573,6 +553,7 @@ class ModelTrainer:
|
|||
# Balance with real validation proportions
|
||||
C *= np.expand_dims(self.val_proportions / (np.sum(C, axis=1) + 1e-6), 1)
|
||||
|
||||
|
||||
t4 = time.time()
|
||||
|
||||
# Objects IoU
|
||||
|
@ -582,14 +563,15 @@ class ModelTrainer:
|
|||
|
||||
# Saving (optionnal)
|
||||
if config.saving:
|
||||
|
||||
# Name of saving file
|
||||
test_file = join(config.saving_path, "val_IoUs.txt")
|
||||
test_file = join(config.saving_path, 'val_IoUs.txt')
|
||||
|
||||
# Line to write:
|
||||
line = ""
|
||||
line = ''
|
||||
for IoU in IoUs:
|
||||
line += "{:.3f} ".format(IoU)
|
||||
line = line + "\n"
|
||||
line += '{:.3f} '.format(IoU)
|
||||
line = line + '\n'
|
||||
|
||||
# Write in file
|
||||
if exists(test_file):
|
||||
|
@ -601,36 +583,33 @@ class ModelTrainer:
|
|||
|
||||
# Save potentials
|
||||
if val_loader.dataset.use_potentials:
|
||||
pot_path = join(config.saving_path, "potentials")
|
||||
pot_path = join(config.saving_path, 'potentials')
|
||||
if not exists(pot_path):
|
||||
makedirs(pot_path)
|
||||
files = val_loader.dataset.files
|
||||
for i, file_path in enumerate(files):
|
||||
pot_points = np.array(
|
||||
val_loader.dataset.pot_trees[i].data, copy=False
|
||||
)
|
||||
cloud_name = file_path.split("/")[-1]
|
||||
pot_points = np.array(val_loader.dataset.pot_trees[i].data, copy=False)
|
||||
cloud_name = file_path.split('/')[-1]
|
||||
pot_name = join(pot_path, cloud_name)
|
||||
pots = val_loader.dataset.potentials[i].numpy().astype(np.float32)
|
||||
write_ply(
|
||||
pot_name,
|
||||
write_ply(pot_name,
|
||||
[pot_points.astype(np.float32), pots],
|
||||
["x", "y", "z", "pots"],
|
||||
)
|
||||
['x', 'y', 'z', 'pots'])
|
||||
|
||||
t6 = time.time()
|
||||
|
||||
# Print instance mean
|
||||
mIoU = 100 * np.mean(IoUs)
|
||||
print("{:s} mean IoU = {:.1f}%".format(config.dataset, mIoU))
|
||||
print('{:s} mean IoU = {:.1f}%'.format(config.dataset, mIoU))
|
||||
|
||||
# Save predicted cloud occasionally
|
||||
if config.saving and (self.epoch + 1) % config.checkpoint_gap == 0:
|
||||
val_path = join(config.saving_path, "val_preds_{:d}".format(self.epoch + 1))
|
||||
val_path = join(config.saving_path, 'val_preds_{:d}'.format(self.epoch + 1))
|
||||
if not exists(val_path):
|
||||
makedirs(val_path)
|
||||
files = val_loader.dataset.files
|
||||
for i, file_path in enumerate(files):
|
||||
|
||||
# Get points
|
||||
points = val_loader.dataset.load_evaluation_points(file_path)
|
||||
|
||||
|
@ -643,36 +622,34 @@ class ModelTrainer:
|
|||
sub_probs = np.insert(sub_probs, l_ind, 0, axis=1)
|
||||
|
||||
# Get the predicted labels
|
||||
sub_preds = val_loader.dataset.label_values[
|
||||
np.argmax(sub_probs, axis=1).astype(np.int32)
|
||||
]
|
||||
sub_preds = val_loader.dataset.label_values[np.argmax(sub_probs, axis=1).astype(np.int32)]
|
||||
|
||||
# Reproject preds on the evaluations points
|
||||
preds = (sub_preds[val_loader.dataset.test_proj[i]]).astype(np.int32)
|
||||
|
||||
# Path of saved validation file
|
||||
cloud_name = file_path.split("/")[-1]
|
||||
cloud_name = file_path.split('/')[-1]
|
||||
val_name = join(val_path, cloud_name)
|
||||
|
||||
# Save file
|
||||
labels = val_loader.dataset.validation_labels[i].astype(np.int32)
|
||||
write_ply(
|
||||
val_name, [points, preds, labels], ["x", "y", "z", "preds", "class"]
|
||||
)
|
||||
write_ply(val_name,
|
||||
[points, preds, labels],
|
||||
['x', 'y', 'z', 'preds', 'class'])
|
||||
|
||||
# Display timings
|
||||
t7 = time.time()
|
||||
if debug:
|
||||
print("\n************************\n")
|
||||
print("Validation timings:")
|
||||
print("Init ...... {:.1f}s".format(t1 - t0))
|
||||
print("Loop ...... {:.1f}s".format(t2 - t1))
|
||||
print("Confs ..... {:.1f}s".format(t3 - t2))
|
||||
print("Confs bis . {:.1f}s".format(t4 - t3))
|
||||
print("IoU ....... {:.1f}s".format(t5 - t4))
|
||||
print("Save1 ..... {:.1f}s".format(t6 - t5))
|
||||
print("Save2 ..... {:.1f}s".format(t7 - t6))
|
||||
print("\n************************\n")
|
||||
print('\n************************\n')
|
||||
print('Validation timings:')
|
||||
print('Init ...... {:.1f}s'.format(t1 - t0))
|
||||
print('Loop ...... {:.1f}s'.format(t2 - t1))
|
||||
print('Confs ..... {:.1f}s'.format(t3 - t2))
|
||||
print('Confs bis . {:.1f}s'.format(t4 - t3))
|
||||
print('IoU ....... {:.1f}s'.format(t5 - t4))
|
||||
print('Save1 ..... {:.1f}s'.format(t6 - t5))
|
||||
print('Save2 ..... {:.1f}s'.format(t7 - t6))
|
||||
print('\n************************\n')
|
||||
|
||||
return
|
||||
|
||||
|
@ -692,11 +669,12 @@ class ModelTrainer:
|
|||
return
|
||||
|
||||
# Choose validation smoothing parameter (0 for no smothing, 0.99 for big smoothing)
|
||||
val_smooth = 0.95
|
||||
softmax = torch.nn.Softmax(1)
|
||||
|
||||
# Create folder for validation predictions
|
||||
if not exists(join(config.saving_path, "val_preds")):
|
||||
makedirs(join(config.saving_path, "val_preds"))
|
||||
if not exists (join(config.saving_path, 'val_preds')):
|
||||
makedirs(join(config.saving_path, 'val_preds'))
|
||||
|
||||
# initiate the dataset validation containers
|
||||
val_loader.dataset.val_points = []
|
||||
|
@ -718,15 +696,17 @@ class ModelTrainer:
|
|||
last_display = time.time()
|
||||
mean_dt = np.zeros(1)
|
||||
|
||||
|
||||
t1 = time.time()
|
||||
|
||||
# Start validation loop
|
||||
for i, batch in enumerate(val_loader):
|
||||
|
||||
# New time
|
||||
t = t[-1:]
|
||||
t += [time.time()]
|
||||
|
||||
if "cuda" in self.device.type:
|
||||
if 'cuda' in self.device.type:
|
||||
batch.to(self.device)
|
||||
|
||||
# Forward pass
|
||||
|
@ -746,8 +726,9 @@ class ModelTrainer:
|
|||
|
||||
i0 = 0
|
||||
for b_i, length in enumerate(lengths):
|
||||
|
||||
# Get prediction
|
||||
probs = stk_probs[i0 : i0 + length]
|
||||
probs = stk_probs[i0:i0 + length]
|
||||
proj_inds = r_inds_list[b_i]
|
||||
proj_mask = r_mask_list[b_i]
|
||||
frame_labels = labels_list[b_i]
|
||||
|
@ -770,10 +751,8 @@ class ModelTrainer:
|
|||
preds = val_loader.dataset.label_values[np.argmax(proj_probs, axis=1)]
|
||||
|
||||
# Save predictions in a binary file
|
||||
filename = "{:s}_{:07d}.npy".format(
|
||||
val_loader.dataset.sequences[s_ind], f_ind
|
||||
)
|
||||
filepath = join(config.saving_path, "val_preds", filename)
|
||||
filename = '{:s}_{:07d}.npy'.format(val_loader.dataset.sequences[s_ind], f_ind)
|
||||
filepath = join(config.saving_path, 'val_preds', filename)
|
||||
if exists(filepath):
|
||||
frame_preds = np.load(filepath)
|
||||
else:
|
||||
|
@ -783,30 +762,18 @@ class ModelTrainer:
|
|||
|
||||
# Save some of the frame pots
|
||||
if f_ind % 20 == 0:
|
||||
seq_path = join(
|
||||
val_loader.dataset.path,
|
||||
"sequences",
|
||||
val_loader.dataset.sequences[s_ind],
|
||||
)
|
||||
velo_file = join(
|
||||
seq_path,
|
||||
"velodyne",
|
||||
val_loader.dataset.frames[s_ind][f_ind] + ".bin",
|
||||
)
|
||||
seq_path = join(val_loader.dataset.path, 'sequences', val_loader.dataset.sequences[s_ind])
|
||||
velo_file = join(seq_path, 'velodyne', val_loader.dataset.frames[s_ind][f_ind] + '.bin')
|
||||
frame_points = np.fromfile(velo_file, dtype=np.float32)
|
||||
frame_points = frame_points.reshape((-1, 4))
|
||||
write_ply(
|
||||
filepath[:-4] + "_pots.ply",
|
||||
write_ply(filepath[:-4] + '_pots.ply',
|
||||
[frame_points[:, :3], frame_labels, frame_preds],
|
||||
["x", "y", "z", "gt", "pre"],
|
||||
)
|
||||
['x', 'y', 'z', 'gt', 'pre'])
|
||||
|
||||
# Update validation confusions
|
||||
frame_C = fast_confusion(
|
||||
frame_labels,
|
||||
frame_C = fast_confusion(frame_labels,
|
||||
frame_preds.astype(np.int32),
|
||||
val_loader.dataset.label_values,
|
||||
)
|
||||
val_loader.dataset.label_values)
|
||||
val_loader.dataset.val_confs[s_ind][f_ind, :, :] = frame_C
|
||||
|
||||
# Stack all prediction for this epoch
|
||||
|
@ -823,24 +790,19 @@ class ModelTrainer:
|
|||
# Display
|
||||
if (t[-1] - last_display) > 1.0:
|
||||
last_display = t[-1]
|
||||
message = "Validation : {:.1f}% (timings : {:4.2f} {:4.2f})"
|
||||
print(
|
||||
message.format(
|
||||
100 * i / config.validation_size,
|
||||
message = 'Validation : {:.1f}% (timings : {:4.2f} {:4.2f})'
|
||||
print(message.format(100 * i / config.validation_size,
|
||||
1000 * (mean_dt[0]),
|
||||
1000 * (mean_dt[1]),
|
||||
)
|
||||
)
|
||||
1000 * (mean_dt[1])))
|
||||
|
||||
t2 = time.time()
|
||||
|
||||
# Confusions for our subparts of validation set
|
||||
Confs = np.zeros((len(predictions), nc_tot, nc_tot), dtype=np.int32)
|
||||
for i, (preds, truth) in enumerate(zip(predictions, targets)):
|
||||
|
||||
# Confusions
|
||||
Confs[i, :, :] = fast_confusion(
|
||||
truth, preds, val_loader.dataset.label_values
|
||||
).astype(np.int32)
|
||||
Confs[i, :, :] = fast_confusion(truth, preds, val_loader.dataset.label_values).astype(np.int32)
|
||||
|
||||
t3 = time.time()
|
||||
|
||||
|
@ -852,14 +814,10 @@ class ModelTrainer:
|
|||
C = np.sum(Confs, axis=0).astype(np.float32)
|
||||
|
||||
# Balance with real validation proportions
|
||||
C *= np.expand_dims(
|
||||
val_loader.dataset.class_proportions / (np.sum(C, axis=1) + 1e-6), 1
|
||||
)
|
||||
C *= np.expand_dims(val_loader.dataset.class_proportions / (np.sum(C, axis=1) + 1e-6), 1)
|
||||
|
||||
# Remove ignored labels from confusions
|
||||
for l_ind, label_value in reversed(
|
||||
list(enumerate(val_loader.dataset.label_values))
|
||||
):
|
||||
for l_ind, label_value in reversed(list(enumerate(val_loader.dataset.label_values))):
|
||||
if label_value in val_loader.dataset.ignored_labels:
|
||||
C = np.delete(C, l_ind, axis=0)
|
||||
C = np.delete(C, l_ind, axis=1)
|
||||
|
@ -874,25 +832,19 @@ class ModelTrainer:
|
|||
t4 = time.time()
|
||||
|
||||
# Sum all validation confusions
|
||||
C_tot = [
|
||||
np.sum(seq_C, axis=0)
|
||||
for seq_C in val_loader.dataset.val_confs
|
||||
if len(seq_C) > 0
|
||||
]
|
||||
C_tot = [np.sum(seq_C, axis=0) for seq_C in val_loader.dataset.val_confs if len(seq_C) > 0]
|
||||
C_tot = np.sum(np.stack(C_tot, axis=0), axis=0)
|
||||
|
||||
if debug:
|
||||
s = "\n"
|
||||
s = '\n'
|
||||
for cc in C_tot:
|
||||
for c in cc:
|
||||
s += "{:8.1f} ".format(c)
|
||||
s += "\n"
|
||||
s += '{:8.1f} '.format(c)
|
||||
s += '\n'
|
||||
print(s)
|
||||
|
||||
# Remove ignored labels from confusions
|
||||
for l_ind, label_value in reversed(
|
||||
list(enumerate(val_loader.dataset.label_values))
|
||||
):
|
||||
for l_ind, label_value in reversed(list(enumerate(val_loader.dataset.label_values))):
|
||||
if label_value in val_loader.dataset.ignored_labels:
|
||||
C_tot = np.delete(C_tot, l_ind, axis=0)
|
||||
C_tot = np.delete(C_tot, l_ind, axis=1)
|
||||
|
@ -904,17 +856,19 @@ class ModelTrainer:
|
|||
|
||||
# Saving (optionnal)
|
||||
if config.saving:
|
||||
|
||||
IoU_list = [IoUs, val_IoUs]
|
||||
file_list = ["subpart_IoUs.txt", "val_IoUs.txt"]
|
||||
file_list = ['subpart_IoUs.txt', 'val_IoUs.txt']
|
||||
for IoUs_to_save, IoU_file in zip(IoU_list, file_list):
|
||||
|
||||
# Name of saving file
|
||||
test_file = join(config.saving_path, IoU_file)
|
||||
|
||||
# Line to write:
|
||||
line = ""
|
||||
line = ''
|
||||
for IoU in IoUs_to_save:
|
||||
line += "{:.3f} ".format(IoU)
|
||||
line = line + "\n"
|
||||
line += '{:.3f} '.format(IoU)
|
||||
line = line + '\n'
|
||||
|
||||
# Write in file
|
||||
if exists(test_file):
|
||||
|
@ -926,22 +880,57 @@ class ModelTrainer:
|
|||
|
||||
# Print instance mean
|
||||
mIoU = 100 * np.mean(IoUs)
|
||||
print("{:s} : subpart mIoU = {:.1f} %".format(config.dataset, mIoU))
|
||||
print('{:s} : subpart mIoU = {:.1f} %'.format(config.dataset, mIoU))
|
||||
mIoU = 100 * np.mean(val_IoUs)
|
||||
print("{:s} : val mIoU = {:.1f} %".format(config.dataset, mIoU))
|
||||
print('{:s} : val mIoU = {:.1f} %'.format(config.dataset, mIoU))
|
||||
|
||||
t6 = time.time()
|
||||
|
||||
# Display timings
|
||||
if debug:
|
||||
print("\n************************\n")
|
||||
print("Validation timings:")
|
||||
print("Init ...... {:.1f}s".format(t1 - t0))
|
||||
print("Loop ...... {:.1f}s".format(t2 - t1))
|
||||
print("Confs ..... {:.1f}s".format(t3 - t2))
|
||||
print("IoU1 ...... {:.1f}s".format(t4 - t3))
|
||||
print("IoU2 ...... {:.1f}s".format(t5 - t4))
|
||||
print("Save ...... {:.1f}s".format(t6 - t5))
|
||||
print("\n************************\n")
|
||||
print('\n************************\n')
|
||||
print('Validation timings:')
|
||||
print('Init ...... {:.1f}s'.format(t1 - t0))
|
||||
print('Loop ...... {:.1f}s'.format(t2 - t1))
|
||||
print('Confs ..... {:.1f}s'.format(t3 - t2))
|
||||
print('IoU1 ...... {:.1f}s'.format(t4 - t3))
|
||||
print('IoU2 ...... {:.1f}s'.format(t5 - t4))
|
||||
print('Save ...... {:.1f}s'.format(t6 - t5))
|
||||
print('\n************************\n')
|
||||
|
||||
return
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
|
|
@ -26,18 +26,19 @@
|
|||
import torch
|
||||
import numpy as np
|
||||
from sklearn.neighbors import KDTree
|
||||
from os import listdir
|
||||
from os.path import join
|
||||
from os import makedirs, remove, rename, listdir
|
||||
from os.path import exists, join
|
||||
import time
|
||||
from mayavi import mlab
|
||||
import sys
|
||||
|
||||
from models.blocks import KPConv
|
||||
|
||||
# PLY reader
|
||||
from utils.ply import write_ply
|
||||
from utils.ply import write_ply, read_ply
|
||||
|
||||
# Configuration class
|
||||
from utils.config import bcolors
|
||||
from utils.config import Config, bcolors
|
||||
|
||||
|
||||
# ----------------------------------------------------------------------------------------------------------------------
|
||||
|
@ -48,6 +49,7 @@ from utils.config import bcolors
|
|||
|
||||
|
||||
class ModelVisualizer:
|
||||
|
||||
# Initialization methods
|
||||
# ------------------------------------------------------------------------------------------------------------------
|
||||
|
||||
|
@ -79,13 +81,13 @@ class ModelVisualizer:
|
|||
checkpoint = torch.load(chkp_path)
|
||||
|
||||
new_dict = {}
|
||||
for k, v in checkpoint["model_state_dict"].items():
|
||||
if "blocs" in k:
|
||||
k = k.replace("blocs", "blocks")
|
||||
for k, v in checkpoint['model_state_dict'].items():
|
||||
if 'blocs' in k:
|
||||
k = k.replace('blocs', 'blocks')
|
||||
new_dict[k] = v
|
||||
|
||||
net.load_state_dict(new_dict)
|
||||
self.epoch = checkpoint["epoch"]
|
||||
self.epoch = checkpoint['epoch']
|
||||
net.eval()
|
||||
print("\nModel state restored from {:s}.".format(chkp_path))
|
||||
|
||||
|
@ -103,10 +105,8 @@ class ModelVisualizer:
|
|||
# First choose the visualized deformations
|
||||
##########################################
|
||||
|
||||
print(
|
||||
"\nList of the deformable convolution available (chosen one highlighted in green)"
|
||||
)
|
||||
fmt_str = " {:}{:2d} > KPConv(r={:.3f}, Din={:d}, Dout={:d}){:}"
|
||||
print('\nList of the deformable convolution available (chosen one highlighted in green)')
|
||||
fmt_str = ' {:}{:2d} > KPConv(r={:.3f}, Din={:d}, Dout={:d}){:}'
|
||||
deform_convs = []
|
||||
for m in net.modules():
|
||||
if isinstance(m, KPConv) and m.deformable:
|
||||
|
@ -114,34 +114,27 @@ class ModelVisualizer:
|
|||
color = bcolors.OKGREEN
|
||||
else:
|
||||
color = bcolors.FAIL
|
||||
print(
|
||||
fmt_str.format(
|
||||
color,
|
||||
len(deform_convs),
|
||||
m.radius,
|
||||
m.in_channels,
|
||||
m.out_channels,
|
||||
bcolors.ENDC,
|
||||
)
|
||||
)
|
||||
print(fmt_str.format(color, len(deform_convs), m.radius, m.in_channels, m.out_channels, bcolors.ENDC))
|
||||
deform_convs.append(m)
|
||||
|
||||
################
|
||||
# Initialization
|
||||
################
|
||||
|
||||
print("\n****************************************************\n")
|
||||
print('\n****************************************************\n')
|
||||
|
||||
# Loop variables
|
||||
time.time()
|
||||
t0 = time.time()
|
||||
t = [time.time()]
|
||||
time.time()
|
||||
np.zeros(1)
|
||||
last_display = time.time()
|
||||
mean_dt = np.zeros(1)
|
||||
count = 0
|
||||
|
||||
# Start training loop
|
||||
for epoch in range(config.max_epoch):
|
||||
|
||||
for batch in loader:
|
||||
|
||||
##################
|
||||
# Processing batch
|
||||
##################
|
||||
|
@ -150,20 +143,16 @@ class ModelVisualizer:
|
|||
t = t[-1:]
|
||||
t += [time.time()]
|
||||
|
||||
if "cuda" in self.device.type:
|
||||
if 'cuda' in self.device.type:
|
||||
batch.to(self.device)
|
||||
|
||||
# Forward pass
|
||||
net(batch, config)
|
||||
original_KP = (
|
||||
deform_convs[deform_idx].kernel_points.cpu().detach().numpy()
|
||||
)
|
||||
stacked_deformed_KP = (
|
||||
deform_convs[deform_idx].deformed_KP.cpu().detach().numpy()
|
||||
)
|
||||
outputs = net(batch, config)
|
||||
original_KP = deform_convs[deform_idx].kernel_points.cpu().detach().numpy()
|
||||
stacked_deformed_KP = deform_convs[deform_idx].deformed_KP.cpu().detach().numpy()
|
||||
count += batch.lengths[0].shape[0]
|
||||
|
||||
if "cuda" in self.device.type:
|
||||
if 'cuda' in self.device.type:
|
||||
torch.cuda.synchronize(self.device)
|
||||
|
||||
# Find layer
|
||||
|
@ -182,23 +171,17 @@ class ModelVisualizer:
|
|||
lookuptrees = []
|
||||
i0 = 0
|
||||
for b_i, length in enumerate(batch.lengths[0]):
|
||||
in_points.append(
|
||||
batch.points[0][i0 : i0 + length].cpu().detach().numpy()
|
||||
)
|
||||
in_points.append(batch.points[0][i0:i0 + length].cpu().detach().numpy())
|
||||
if batch.features.shape[1] == 4:
|
||||
in_colors.append(
|
||||
batch.features[i0 : i0 + length, 1:].cpu().detach().numpy()
|
||||
)
|
||||
in_colors.append(batch.features[i0:i0 + length, 1:].cpu().detach().numpy())
|
||||
else:
|
||||
in_colors.append(None)
|
||||
i0 += length
|
||||
|
||||
i0 = 0
|
||||
for b_i, length in enumerate(batch.lengths[l]):
|
||||
points.append(
|
||||
batch.points[l][i0 : i0 + length].cpu().detach().numpy()
|
||||
)
|
||||
deformed_KP.append(stacked_deformed_KP[i0 : i0 + length])
|
||||
points.append(batch.points[l][i0:i0 + length].cpu().detach().numpy())
|
||||
deformed_KP.append(stacked_deformed_KP[i0:i0 + length])
|
||||
lookuptrees.append(KDTree(points[-1]))
|
||||
i0 += length
|
||||
|
||||
|
@ -207,9 +190,7 @@ class ModelVisualizer:
|
|||
###########################
|
||||
|
||||
# Create figure for features
|
||||
fig1 = mlab.figure(
|
||||
"Deformations", bgcolor=(1.0, 1.0, 1.0), size=(1280, 920)
|
||||
)
|
||||
fig1 = mlab.figure('Deformations', bgcolor=(1.0, 1.0, 1.0), size=(1280, 920))
|
||||
fig1.scene.parallel_projection = False
|
||||
|
||||
# Indices
|
||||
|
@ -223,41 +204,26 @@ class ModelVisualizer:
|
|||
aim_point = np.zeros((1, 3))
|
||||
|
||||
def picker_callback(picker):
|
||||
"""Picker callback: this get called when on pick events."""
|
||||
""" Picker callback: this get called when on pick events.
|
||||
"""
|
||||
global plots, aim_point
|
||||
|
||||
if "in_points" in plots:
|
||||
if plots["in_points"].actor.actor._vtk_obj in [
|
||||
o._vtk_obj for o in picker.actors
|
||||
]:
|
||||
point_rez = (
|
||||
plots["in_points"]
|
||||
.glyph.glyph_source.glyph_source.output.points.to_array()
|
||||
.shape[0]
|
||||
)
|
||||
if 'in_points' in plots:
|
||||
if plots['in_points'].actor.actor._vtk_obj in [o._vtk_obj for o in picker.actors]:
|
||||
point_rez = plots['in_points'].glyph.glyph_source.glyph_source.output.points.to_array().shape[0]
|
||||
new_point_i = int(np.floor(picker.point_id / point_rez))
|
||||
if new_point_i < len(plots["in_points"].mlab_source.points):
|
||||
if new_point_i < len(plots['in_points'].mlab_source.points):
|
||||
# Get closest point in the layer we are interested in
|
||||
aim_point = plots["in_points"].mlab_source.points[
|
||||
new_point_i : new_point_i + 1
|
||||
]
|
||||
aim_point = plots['in_points'].mlab_source.points[new_point_i:new_point_i + 1]
|
||||
update_scene()
|
||||
|
||||
if "points" in plots:
|
||||
if plots["points"].actor.actor._vtk_obj in [
|
||||
o._vtk_obj for o in picker.actors
|
||||
]:
|
||||
point_rez = (
|
||||
plots["points"]
|
||||
.glyph.glyph_source.glyph_source.output.points.to_array()
|
||||
.shape[0]
|
||||
)
|
||||
if 'points' in plots:
|
||||
if plots['points'].actor.actor._vtk_obj in [o._vtk_obj for o in picker.actors]:
|
||||
point_rez = plots['points'].glyph.glyph_source.glyph_source.output.points.to_array().shape[0]
|
||||
new_point_i = int(np.floor(picker.point_id / point_rez))
|
||||
if new_point_i < len(plots["points"].mlab_source.points):
|
||||
if new_point_i < len(plots['points'].mlab_source.points):
|
||||
# Get closest point in the layer we are interested in
|
||||
aim_point = plots["points"].mlab_source.points[
|
||||
new_point_i : new_point_i + 1
|
||||
]
|
||||
aim_point = plots['points'].mlab_source.points[new_point_i:new_point_i + 1]
|
||||
update_scene()
|
||||
|
||||
def update_scene():
|
||||
|
@ -277,68 +243,61 @@ class ModelVisualizer:
|
|||
p = points[obj_i]
|
||||
|
||||
# Rescale points for visu
|
||||
p = p * 1.5 / config.in_radius
|
||||
p = (p * 1.5 / config.in_radius)
|
||||
|
||||
|
||||
# Show point cloud
|
||||
if show_in_p <= 1:
|
||||
plots["points"] = mlab.points3d(
|
||||
p[:, 0],
|
||||
plots['points'] = mlab.points3d(p[:, 0],
|
||||
p[:, 1],
|
||||
p[:, 2],
|
||||
resolution=8,
|
||||
scale_factor=p_scale,
|
||||
scale_mode="none",
|
||||
scale_mode='none',
|
||||
color=(0, 1, 1),
|
||||
figure=fig1,
|
||||
)
|
||||
figure=fig1)
|
||||
|
||||
if show_in_p >= 1:
|
||||
|
||||
# Get points and colors
|
||||
in_p = in_points[obj_i]
|
||||
in_p = in_p * 1.5 / config.in_radius
|
||||
in_p = (in_p * 1.5 / config.in_radius)
|
||||
|
||||
# Color point cloud if possible
|
||||
in_c = in_colors[obj_i]
|
||||
if in_c is not None:
|
||||
|
||||
# Primitives
|
||||
scalars = np.arange(
|
||||
len(in_p)
|
||||
) # Key point: set an integer for each point
|
||||
scalars = np.arange(len(in_p)) # Key point: set an integer for each point
|
||||
|
||||
# Define color table (including alpha), which must be uint8 and [0,255]
|
||||
colors = np.hstack((in_c, np.ones_like(in_c[:, :1])))
|
||||
colors = (colors * 255).astype(np.uint8)
|
||||
|
||||
plots["in_points"] = mlab.points3d(
|
||||
in_p[:, 0],
|
||||
plots['in_points'] = mlab.points3d(in_p[:, 0],
|
||||
in_p[:, 1],
|
||||
in_p[:, 2],
|
||||
scalars,
|
||||
resolution=8,
|
||||
scale_factor=p_scale * 0.8,
|
||||
scale_mode="none",
|
||||
figure=fig1,
|
||||
)
|
||||
plots[
|
||||
"in_points"
|
||||
].module_manager.scalar_lut_manager.lut.table = colors
|
||||
scale_factor=p_scale*0.8,
|
||||
scale_mode='none',
|
||||
figure=fig1)
|
||||
plots['in_points'].module_manager.scalar_lut_manager.lut.table = colors
|
||||
|
||||
else:
|
||||
plots["in_points"] = mlab.points3d(
|
||||
in_p[:, 0],
|
||||
|
||||
plots['in_points'] = mlab.points3d(in_p[:, 0],
|
||||
in_p[:, 1],
|
||||
in_p[:, 2],
|
||||
resolution=8,
|
||||
scale_factor=p_scale * 0.8,
|
||||
scale_mode="none",
|
||||
figure=fig1,
|
||||
)
|
||||
scale_factor=p_scale*0.8,
|
||||
scale_mode='none',
|
||||
figure=fig1)
|
||||
|
||||
|
||||
# Get KP locations
|
||||
rescaled_aim_point = aim_point * config.in_radius / 1.5
|
||||
point_i = lookuptrees[obj_i].query(
|
||||
rescaled_aim_point, return_distance=False
|
||||
)[0][0]
|
||||
point_i = lookuptrees[obj_i].query(rescaled_aim_point, return_distance=False)[0][0]
|
||||
if offsets:
|
||||
KP = points[obj_i][point_i] + deformed_KP[obj_i][point_i]
|
||||
scals = np.ones_like(KP[:, 0])
|
||||
|
@ -346,46 +305,35 @@ class ModelVisualizer:
|
|||
KP = points[obj_i][point_i] + original_KP
|
||||
scals = np.zeros_like(KP[:, 0])
|
||||
|
||||
KP = KP * 1.5 / config.in_radius
|
||||
KP = (KP * 1.5 / config.in_radius)
|
||||
|
||||
plots["KP"] = mlab.points3d(
|
||||
KP[:, 0],
|
||||
plots['KP'] = mlab.points3d(KP[:, 0],
|
||||
KP[:, 1],
|
||||
KP[:, 2],
|
||||
scals,
|
||||
colormap="autumn",
|
||||
colormap='autumn',
|
||||
resolution=8,
|
||||
scale_factor=1.2 * p_scale,
|
||||
scale_mode="none",
|
||||
scale_factor=1.2*p_scale,
|
||||
scale_mode='none',
|
||||
vmin=0,
|
||||
vmax=1,
|
||||
figure=fig1,
|
||||
)
|
||||
figure=fig1)
|
||||
|
||||
|
||||
if True:
|
||||
plots["center"] = mlab.points3d(
|
||||
p[point_i, 0],
|
||||
plots['center'] = mlab.points3d(p[point_i, 0],
|
||||
p[point_i, 1],
|
||||
p[point_i, 2],
|
||||
scale_factor=1.1 * p_scale,
|
||||
scale_mode="none",
|
||||
scale_factor=1.1*p_scale,
|
||||
scale_mode='none',
|
||||
color=(0, 1, 0),
|
||||
figure=fig1,
|
||||
)
|
||||
figure=fig1)
|
||||
|
||||
# New title
|
||||
plots["title"] = mlab.title(
|
||||
str(obj_i), color=(0, 0, 0), size=0.3, height=0.01
|
||||
)
|
||||
text = (
|
||||
"<--- (press g for previous)"
|
||||
+ 50 * " "
|
||||
+ "(press h for next) --->"
|
||||
)
|
||||
plots["text"] = mlab.text(
|
||||
0.01, 0.01, text, color=(0, 0, 0), width=0.98
|
||||
)
|
||||
plots["orient"] = mlab.orientation_axes()
|
||||
plots['title'] = mlab.title(str(obj_i), color=(0, 0, 0), size=0.3, height=0.01)
|
||||
text = '<--- (press g for previous)' + 50 * ' ' + '(press h for next) --->'
|
||||
plots['text'] = mlab.text(0.01, 0.01, text, color=(0, 0, 0), width=0.98)
|
||||
plots['orient'] = mlab.orientation_axes()
|
||||
|
||||
# Set the saved view
|
||||
mlab.view(*v)
|
||||
|
@ -399,10 +347,12 @@ class ModelVisualizer:
|
|||
# Get KP locations
|
||||
|
||||
KP_def = points[obj_i][point_i] + deformed_KP[obj_i][point_i]
|
||||
KP_def = KP_def * 1.5 / config.in_radius
|
||||
KP_def = (KP_def * 1.5 / config.in_radius)
|
||||
KP_def_color = (1, 0, 0)
|
||||
|
||||
KP_rigid = points[obj_i][point_i] + original_KP
|
||||
KP_rigid = KP_rigid * 1.5 / config.in_radius
|
||||
KP_rigid = (KP_rigid * 1.5 / config.in_radius)
|
||||
KP_rigid_color = (1, 0.7, 0)
|
||||
|
||||
if offsets:
|
||||
t_list = np.linspace(0, 1, 150, dtype=np.float32)
|
||||
|
@ -412,12 +362,10 @@ class ModelVisualizer:
|
|||
@mlab.animate(delay=10)
|
||||
def anim():
|
||||
for t in t_list:
|
||||
plots["KP"].mlab_source.set(
|
||||
x=t * KP_def[:, 0] + (1 - t) * KP_rigid[:, 0],
|
||||
plots['KP'].mlab_source.set(x=t * KP_def[:, 0] + (1 - t) * KP_rigid[:, 0],
|
||||
y=t * KP_def[:, 1] + (1 - t) * KP_rigid[:, 1],
|
||||
z=t * KP_def[:, 2] + (1 - t) * KP_rigid[:, 2],
|
||||
scalars=t * np.ones_like(KP_def[:, 0]),
|
||||
)
|
||||
scalars=t * np.ones_like(KP_def[:, 0]))
|
||||
|
||||
yield
|
||||
|
||||
|
@ -428,63 +376,58 @@ class ModelVisualizer:
|
|||
def keyboard_callback(vtk_obj, event):
|
||||
global obj_i, point_i, offsets, p_scale, show_in_p
|
||||
|
||||
if vtk_obj.GetKeyCode() in ["b", "B"]:
|
||||
if vtk_obj.GetKeyCode() in ['b', 'B']:
|
||||
p_scale /= 1.5
|
||||
update_scene()
|
||||
|
||||
elif vtk_obj.GetKeyCode() in ["n", "N"]:
|
||||
elif vtk_obj.GetKeyCode() in ['n', 'N']:
|
||||
p_scale *= 1.5
|
||||
update_scene()
|
||||
|
||||
if vtk_obj.GetKeyCode() in ["g", "G"]:
|
||||
if vtk_obj.GetKeyCode() in ['g', 'G']:
|
||||
obj_i = (obj_i - 1) % len(deformed_KP)
|
||||
point_i = 0
|
||||
update_scene()
|
||||
|
||||
elif vtk_obj.GetKeyCode() in ["h", "H"]:
|
||||
elif vtk_obj.GetKeyCode() in ['h', 'H']:
|
||||
obj_i = (obj_i + 1) % len(deformed_KP)
|
||||
point_i = 0
|
||||
update_scene()
|
||||
|
||||
elif vtk_obj.GetKeyCode() in ["k", "K"]:
|
||||
elif vtk_obj.GetKeyCode() in ['k', 'K']:
|
||||
offsets = not offsets
|
||||
animate_kernel()
|
||||
|
||||
elif vtk_obj.GetKeyCode() in ["z", "Z"]:
|
||||
elif vtk_obj.GetKeyCode() in ['z', 'Z']:
|
||||
show_in_p = (show_in_p + 1) % 3
|
||||
update_scene()
|
||||
|
||||
elif vtk_obj.GetKeyCode() in ["0"]:
|
||||
print("Saving")
|
||||
elif vtk_obj.GetKeyCode() in ['0']:
|
||||
|
||||
print('Saving')
|
||||
|
||||
# Find a new name
|
||||
file_i = 0
|
||||
file_name = "KP_{:03d}.ply".format(file_i)
|
||||
files = [f for f in listdir("KP_clouds") if f.endswith(".ply")]
|
||||
file_name = 'KP_{:03d}.ply'.format(file_i)
|
||||
files = [f for f in listdir('KP_clouds') if f.endswith('.ply')]
|
||||
while file_name in files:
|
||||
file_i += 1
|
||||
file_name = "KP_{:03d}.ply".format(file_i)
|
||||
file_name = 'KP_{:03d}.ply'.format(file_i)
|
||||
|
||||
KP_deform = points[obj_i][point_i] + deformed_KP[obj_i][point_i]
|
||||
KP_normal = points[obj_i][point_i] + original_KP
|
||||
|
||||
# Save
|
||||
write_ply(
|
||||
join("KP_clouds", file_name),
|
||||
write_ply(join('KP_clouds', file_name),
|
||||
[in_points[obj_i], in_colors[obj_i]],
|
||||
["x", "y", "z", "red", "green", "blue"],
|
||||
)
|
||||
write_ply(
|
||||
join("KP_clouds", "KP_{:03d}_deform.ply".format(file_i)),
|
||||
['x', 'y', 'z', 'red', 'green', 'blue'])
|
||||
write_ply(join('KP_clouds', 'KP_{:03d}_deform.ply'.format(file_i)),
|
||||
[KP_deform],
|
||||
["x", "y", "z"],
|
||||
)
|
||||
write_ply(
|
||||
join("KP_clouds", "KP_{:03d}_normal.ply".format(file_i)),
|
||||
['x', 'y', 'z'])
|
||||
write_ply(join('KP_clouds', 'KP_{:03d}_normal.ply'.format(file_i)),
|
||||
[KP_normal],
|
||||
["x", "y", "z"],
|
||||
)
|
||||
print("OK")
|
||||
['x', 'y', 'z'])
|
||||
print('OK')
|
||||
|
||||
return
|
||||
|
||||
|
@ -492,7 +435,7 @@ class ModelVisualizer:
|
|||
pick_func = fig1.on_mouse_pick(picker_callback)
|
||||
pick_func.tolerance = 0.01
|
||||
update_scene()
|
||||
fig1.scene.interactor.add_observer("KeyPressEvent", keyboard_callback)
|
||||
fig1.scene.interactor.add_observer('KeyPressEvent', keyboard_callback)
|
||||
mlab.show()
|
||||
|
||||
return
|
||||
|
@ -502,12 +445,13 @@ class ModelVisualizer:
|
|||
|
||||
|
||||
def show_ModelNet_models(all_points):
|
||||
|
||||
###########################
|
||||
# Interactive visualization
|
||||
###########################
|
||||
|
||||
# Create figure for features
|
||||
fig1 = mlab.figure("Models", bgcolor=(1, 1, 1), size=(1000, 800))
|
||||
fig1 = mlab.figure('Models', bgcolor=(1, 1, 1), size=(1000, 800))
|
||||
fig1.scene.parallel_projection = False
|
||||
|
||||
# Indices
|
||||
|
@ -515,6 +459,7 @@ def show_ModelNet_models(all_points):
|
|||
file_i = 0
|
||||
|
||||
def update_scene():
|
||||
|
||||
# clear figure
|
||||
mlab.clf(fig1)
|
||||
|
||||
|
@ -525,19 +470,17 @@ def show_ModelNet_models(all_points):
|
|||
points = (points * 1.5 + np.array([1.0, 1.0, 1.0])) * 50.0
|
||||
|
||||
# Show point clouds colorized with activations
|
||||
mlab.points3d(
|
||||
points[:, 0],
|
||||
activations = mlab.points3d(points[:, 0],
|
||||
points[:, 1],
|
||||
points[:, 2],
|
||||
points[:, 2],
|
||||
scale_factor=3.0,
|
||||
scale_mode="none",
|
||||
figure=fig1,
|
||||
)
|
||||
scale_mode='none',
|
||||
figure=fig1)
|
||||
|
||||
# New title
|
||||
mlab.title(str(file_i), color=(0, 0, 0), size=0.3, height=0.01)
|
||||
text = "<--- (press g for previous)" + 50 * " " + "(press h for next) --->"
|
||||
text = '<--- (press g for previous)' + 50 * ' ' + '(press h for next) --->'
|
||||
mlab.text(0.01, 0.01, text, color=(0, 0, 0), width=0.98)
|
||||
mlab.orientation_axes()
|
||||
|
||||
|
@ -546,11 +489,13 @@ def show_ModelNet_models(all_points):
|
|||
def keyboard_callback(vtk_obj, event):
|
||||
global file_i
|
||||
|
||||
if vtk_obj.GetKeyCode() in ["g", "G"]:
|
||||
if vtk_obj.GetKeyCode() in ['g', 'G']:
|
||||
|
||||
file_i = (file_i - 1) % len(all_points)
|
||||
update_scene()
|
||||
|
||||
elif vtk_obj.GetKeyCode() in ["h", "H"]:
|
||||
elif vtk_obj.GetKeyCode() in ['h', 'H']:
|
||||
|
||||
file_i = (file_i + 1) % len(all_points)
|
||||
update_scene()
|
||||
|
||||
|
@ -558,5 +503,29 @@ def show_ModelNet_models(all_points):
|
|||
|
||||
# Draw a first plot
|
||||
update_scene()
|
||||
fig1.scene.interactor.add_observer("KeyPressEvent", keyboard_callback)
|
||||
fig1.scene.interactor.add_observer('KeyPressEvent', keyboard_callback)
|
||||
mlab.show()
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
|
|
@ -22,12 +22,15 @@
|
|||
#
|
||||
|
||||
# Common libs
|
||||
import signal
|
||||
import os
|
||||
import numpy as np
|
||||
import sys
|
||||
import torch
|
||||
|
||||
# Dataset
|
||||
from datasetss.ModelNet40 import *
|
||||
from datasetss.S3DIS import *
|
||||
from datasets.ModelNet40 import *
|
||||
from datasets.S3DIS import *
|
||||
from torch.utils.data import DataLoader
|
||||
|
||||
from utils.config import Config
|
||||
|
@ -41,25 +44,20 @@ from models.architectures import KPCNN, KPFCNN
|
|||
# \***************/
|
||||
#
|
||||
|
||||
|
||||
def model_choice(chosen_log):
|
||||
|
||||
###########################
|
||||
# Call the test initializer
|
||||
###########################
|
||||
|
||||
# Automatically retrieve the last trained model
|
||||
if chosen_log in ["last_ModelNet40", "last_ShapeNetPart", "last_S3DIS"]:
|
||||
if chosen_log in ['last_ModelNet40', 'last_ShapeNetPart', 'last_S3DIS']:
|
||||
|
||||
# Dataset name
|
||||
test_dataset = "_".join(chosen_log.split("_")[1:])
|
||||
test_dataset = '_'.join(chosen_log.split('_')[1:])
|
||||
|
||||
# List all training logs
|
||||
logs = np.sort(
|
||||
[
|
||||
os.path.join("results", f)
|
||||
for f in os.listdir("results")
|
||||
if f.startswith("Log")
|
||||
]
|
||||
)
|
||||
logs = np.sort([os.path.join('results', f) for f in os.listdir('results') if f.startswith('Log')])
|
||||
|
||||
# Find the last log of asked dataset
|
||||
for log in logs[::-1]:
|
||||
|
@ -69,12 +67,12 @@ def model_choice(chosen_log):
|
|||
chosen_log = log
|
||||
break
|
||||
|
||||
if chosen_log in ["last_ModelNet40", "last_ShapeNetPart", "last_S3DIS"]:
|
||||
if chosen_log in ['last_ModelNet40', 'last_ShapeNetPart', 'last_S3DIS']:
|
||||
raise ValueError('No log of the dataset "' + test_dataset + '" found')
|
||||
|
||||
# Check if log exists
|
||||
if not os.path.exists(chosen_log):
|
||||
raise ValueError("The given log does not exists: " + chosen_log)
|
||||
raise ValueError('The given log does not exists: ' + chosen_log)
|
||||
|
||||
return chosen_log
|
||||
|
||||
|
@ -85,7 +83,8 @@ def model_choice(chosen_log):
|
|||
# \***************/
|
||||
#
|
||||
|
||||
if __name__ == "__main__":
|
||||
if __name__ == '__main__':
|
||||
|
||||
###############################
|
||||
# Choose the model to visualize
|
||||
###############################
|
||||
|
@ -95,7 +94,7 @@ if __name__ == "__main__":
|
|||
# > 'last_XXX': Automatically retrieve the last trained model on dataset XXX
|
||||
# > 'results/Log_YYYY-MM-DD_HH-MM-SS': Directly provide the path of a trained model
|
||||
|
||||
chosen_log = "results/Log_2020-04-23_19-42-18"
|
||||
chosen_log = 'results/Log_2020-04-23_19-42-18'
|
||||
|
||||
# Choose the index of the checkpoint to load OR None if you want to load the current checkpoint
|
||||
chkp_idx = None
|
||||
|
@ -111,25 +110,25 @@ if __name__ == "__main__":
|
|||
############################
|
||||
|
||||
# Set which gpu is going to be used
|
||||
GPU_ID = "0"
|
||||
GPU_ID = '0'
|
||||
|
||||
# Set GPU visible device
|
||||
os.environ["CUDA_VISIBLE_DEVICES"] = GPU_ID
|
||||
os.environ['CUDA_VISIBLE_DEVICES'] = GPU_ID
|
||||
|
||||
###############
|
||||
# Previous chkp
|
||||
###############
|
||||
|
||||
# Find all checkpoints in the chosen training folder
|
||||
chkp_path = os.path.join(chosen_log, "checkpoints")
|
||||
chkps = [f for f in os.listdir(chkp_path) if f[:4] == "chkp"]
|
||||
chkp_path = os.path.join(chosen_log, 'checkpoints')
|
||||
chkps = [f for f in os.listdir(chkp_path) if f[:4] == 'chkp']
|
||||
|
||||
# Find which snapshot to restore
|
||||
if chkp_idx is None:
|
||||
chosen_chkp = "current_chkp.tar"
|
||||
chosen_chkp = 'current_chkp.tar'
|
||||
else:
|
||||
chosen_chkp = np.sort(chkps)[chkp_idx]
|
||||
chosen_chkp = os.path.join(chosen_log, "checkpoints", chosen_chkp)
|
||||
chosen_chkp = os.path.join(chosen_log, 'checkpoints', chosen_chkp)
|
||||
|
||||
# Initialize configuration class
|
||||
config = Config()
|
||||
|
@ -151,54 +150,53 @@ if __name__ == "__main__":
|
|||
##############
|
||||
|
||||
print()
|
||||
print("Data Preparation")
|
||||
print("****************")
|
||||
print('Data Preparation')
|
||||
print('****************')
|
||||
|
||||
# Initiate dataset
|
||||
if config.dataset.startswith("ModelNet40"):
|
||||
if config.dataset.startswith('ModelNet40'):
|
||||
test_dataset = ModelNet40Dataset(config, train=False)
|
||||
test_sampler = ModelNet40Sampler(test_dataset)
|
||||
collate_fn = ModelNet40Collate
|
||||
elif config.dataset == "S3DIS":
|
||||
test_dataset = S3DISDataset(config, set="validation", use_potentials=True)
|
||||
elif config.dataset == 'S3DIS':
|
||||
test_dataset = S3DISDataset(config, set='validation', use_potentials=True)
|
||||
test_sampler = S3DISSampler(test_dataset)
|
||||
collate_fn = S3DISCollate
|
||||
else:
|
||||
raise ValueError("Unsupported dataset : " + config.dataset)
|
||||
raise ValueError('Unsupported dataset : ' + config.dataset)
|
||||
|
||||
# Data loader
|
||||
test_loader = DataLoader(
|
||||
test_dataset,
|
||||
test_loader = DataLoader(test_dataset,
|
||||
batch_size=1,
|
||||
sampler=test_sampler,
|
||||
collate_fn=collate_fn,
|
||||
num_workers=config.input_threads,
|
||||
pin_memory=True,
|
||||
)
|
||||
pin_memory=True)
|
||||
|
||||
# Calibrate samplers
|
||||
test_sampler.calibration(test_loader, verbose=True)
|
||||
|
||||
print("\nModel Preparation")
|
||||
print("*****************")
|
||||
print('\nModel Preparation')
|
||||
print('*****************')
|
||||
|
||||
# Define network model
|
||||
t1 = time.time()
|
||||
if config.dataset_task == "classification":
|
||||
if config.dataset_task == 'classification':
|
||||
net = KPCNN(config)
|
||||
elif config.dataset_task in ["cloud_segmentation", "slam_segmentation"]:
|
||||
elif config.dataset_task in ['cloud_segmentation', 'slam_segmentation']:
|
||||
net = KPFCNN(config, test_dataset.label_values, test_dataset.ignored_labels)
|
||||
else:
|
||||
raise ValueError(
|
||||
"Unsupported dataset_task for deformation visu: " + config.dataset_task
|
||||
)
|
||||
raise ValueError('Unsupported dataset_task for deformation visu: ' + config.dataset_task)
|
||||
|
||||
# Define a visualizer class
|
||||
visualizer = ModelVisualizer(net, config, chkp_path=chosen_chkp, on_gpu=False)
|
||||
print("Done in {:.1f}s\n".format(time.time() - t1))
|
||||
print('Done in {:.1f}s\n'.format(time.time() - t1))
|
||||
|
||||
print("\nStart visualization")
|
||||
print("*******************")
|
||||
print('\nStart visualization')
|
||||
print('*******************')
|
||||
|
||||
# Training
|
||||
visualizer.show_deformable_kernels(net, test_loader, config, deform_idx)
|
||||
|
||||
|
||||
|
||||
|
|
Loading…
Reference in a new issue