Corrections
This commit is contained in:
parent
7a4530f8e0
commit
b4e1a9dcc9
|
@ -805,38 +805,10 @@ class ModelNet40CustomBatch:
|
||||||
return all_p_list
|
return all_p_list
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def ModelNet40Collate(batch_data):
|
def ModelNet40Collate(batch_data):
|
||||||
return ModelNet40CustomBatch(batch_data)
|
return ModelNet40CustomBatch(batch_data)
|
||||||
|
|
||||||
|
|
||||||
class ModelNet40WorkerInitDebug():
|
|
||||||
"""Callable class that Initializes workers."""
|
|
||||||
|
|
||||||
def __init__(self, dataset):
|
|
||||||
self.dataset = dataset
|
|
||||||
return
|
|
||||||
|
|
||||||
def __call__(self, worker_id):
|
|
||||||
|
|
||||||
# Print workers info
|
|
||||||
worker_info = get_worker_info()
|
|
||||||
print(worker_info)
|
|
||||||
|
|
||||||
# Get associated dataset
|
|
||||||
dataset = worker_info.dataset # the dataset copy in this worker process
|
|
||||||
|
|
||||||
# In windows, each worker has its own copy of the dataset. In Linux, this is shared in memory
|
|
||||||
print(dataset.input_labels.__array_interface__['data'])
|
|
||||||
print(worker_info.dataset.input_labels.__array_interface__['data'])
|
|
||||||
print(self.dataset.input_labels.__array_interface__['data'])
|
|
||||||
|
|
||||||
# configure the dataset to only process the split workload
|
|
||||||
|
|
||||||
return
|
|
||||||
|
|
||||||
|
|
||||||
# ----------------------------------------------------------------------------------------------------------------------
|
# ----------------------------------------------------------------------------------------------------------------------
|
||||||
#
|
#
|
||||||
# Debug functions
|
# Debug functions
|
||||||
|
@ -994,3 +966,30 @@ def debug_batch_and_neighbors_calib(dataset, sampler, loader):
|
||||||
|
|
||||||
_, counts = np.unique(dataset.input_labels, return_counts=True)
|
_, counts = np.unique(dataset.input_labels, return_counts=True)
|
||||||
print(counts)
|
print(counts)
|
||||||
|
|
||||||
|
|
||||||
|
class ModelNet40WorkerInitDebug:
|
||||||
|
"""Callable class that Initializes workers."""
|
||||||
|
|
||||||
|
def __init__(self, dataset):
|
||||||
|
self.dataset = dataset
|
||||||
|
return
|
||||||
|
|
||||||
|
def __call__(self, worker_id):
|
||||||
|
|
||||||
|
# Print workers info
|
||||||
|
worker_info = get_worker_info()
|
||||||
|
print(worker_info)
|
||||||
|
|
||||||
|
# Get associated dataset
|
||||||
|
dataset = worker_info.dataset # the dataset copy in this worker process
|
||||||
|
|
||||||
|
# In windows, each worker has its own copy of the dataset. In Linux, this is shared in memory
|
||||||
|
print(dataset.input_labels.__array_interface__['data'])
|
||||||
|
print(worker_info.dataset.input_labels.__array_interface__['data'])
|
||||||
|
print(self.dataset.input_labels.__array_interface__['data'])
|
||||||
|
|
||||||
|
# configure the dataset to only process the split workload
|
||||||
|
|
||||||
|
return
|
||||||
|
|
||||||
|
|
|
@ -28,7 +28,6 @@ import numpy as np
|
||||||
import pickle
|
import pickle
|
||||||
import torch
|
import torch
|
||||||
import math
|
import math
|
||||||
#from mayavi import mlab
|
|
||||||
from multiprocessing import Lock
|
from multiprocessing import Lock
|
||||||
|
|
||||||
|
|
||||||
|
@ -877,7 +876,6 @@ class S3DISDataset(PointCloudDataset):
|
||||||
return np.vstack((data['x'], data['y'], data['z'])).T
|
return np.vstack((data['x'], data['y'], data['z'])).T
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# ----------------------------------------------------------------------------------------------------------------------
|
# ----------------------------------------------------------------------------------------------------------------------
|
||||||
#
|
#
|
||||||
# Utility classes definition
|
# Utility classes definition
|
||||||
|
|
|
@ -28,7 +28,6 @@ import numpy as np
|
||||||
import pickle
|
import pickle
|
||||||
import torch
|
import torch
|
||||||
import yaml
|
import yaml
|
||||||
#from mayavi import mlab
|
|
||||||
from multiprocessing import Lock
|
from multiprocessing import Lock
|
||||||
|
|
||||||
|
|
||||||
|
@ -245,9 +244,6 @@ class SemanticKittiDataset(PointCloudDataset):
|
||||||
merged_labels = np.zeros((0,), dtype=np.int32)
|
merged_labels = np.zeros((0,), dtype=np.int32)
|
||||||
merged_coords = np.zeros((0, 4), dtype=np.float32)
|
merged_coords = np.zeros((0, 4), dtype=np.float32)
|
||||||
|
|
||||||
# In case of validation also keep original point and reproj indices
|
|
||||||
|
|
||||||
|
|
||||||
# Get center of the first frame in world coordinates
|
# Get center of the first frame in world coordinates
|
||||||
p_origin = np.zeros((1, 4))
|
p_origin = np.zeros((1, 4))
|
||||||
p_origin[0, 3] = 1
|
p_origin[0, 3] = 1
|
||||||
|
@ -346,11 +342,6 @@ class SemanticKittiDataset(PointCloudDataset):
|
||||||
# Merge n_frames together
|
# Merge n_frames together
|
||||||
#########################
|
#########################
|
||||||
|
|
||||||
# Too see yielding speed with debug timings method, collapse points (reduce mapping time to nearly 0)
|
|
||||||
#merged_points = merged_points[:100, :]
|
|
||||||
#merged_labels = merged_labels[:100]
|
|
||||||
#merged_points *= 0.1
|
|
||||||
|
|
||||||
# Subsample merged frames
|
# Subsample merged frames
|
||||||
in_pts, in_fts, in_lbls = grid_subsampling(merged_points,
|
in_pts, in_fts, in_lbls = grid_subsampling(merged_points,
|
||||||
features=merged_coords,
|
features=merged_coords,
|
||||||
|
@ -455,8 +446,8 @@ class SemanticKittiDataset(PointCloudDataset):
|
||||||
else:
|
else:
|
||||||
raise ValueError('Only accepted input dimensions are 1, 4 and 7 (without and with XYZ)')
|
raise ValueError('Only accepted input dimensions are 1, 4 and 7 (without and with XYZ)')
|
||||||
|
|
||||||
|
|
||||||
t += [time.time()]
|
t += [time.time()]
|
||||||
|
|
||||||
#######################
|
#######################
|
||||||
# Create network inputs
|
# Create network inputs
|
||||||
#######################
|
#######################
|
||||||
|
@ -546,27 +537,6 @@ class SemanticKittiDataset(PointCloudDataset):
|
||||||
ti += 1
|
ti += 1
|
||||||
print('\n************************\n')
|
print('\n************************\n')
|
||||||
|
|
||||||
# Timings: (in test configuration)
|
|
||||||
# Lock ...... 0.1ms
|
|
||||||
# Init ...... 0.0ms
|
|
||||||
# Load ...... 40.0ms
|
|
||||||
# subs ...... 143.6ms
|
|
||||||
# drop ...... 4.6ms
|
|
||||||
# reproj .... 297.4ms
|
|
||||||
# augment ... 7.5ms
|
|
||||||
# stack ..... 0.0ms
|
|
||||||
# concat .... 1.4ms
|
|
||||||
# input ..... 816.0ms
|
|
||||||
# stack ..... 0.0ms
|
|
||||||
|
|
||||||
# TODO: Where can we gain time for the robot real time test?
|
|
||||||
# > Load: no disk read necessary + pose useless if we only use one frame for testing
|
|
||||||
# > Drop: We can drop even more points. Random choice could be faster without replace=False
|
|
||||||
# > reproj: No reprojection needed
|
|
||||||
# > Augment: See which data agment we want at test time
|
|
||||||
# > input: MAIN BOTTLENECK. We need to see if we can do faster, maybe with some parallelisation. neighbors
|
|
||||||
# and subsampling accelerated with lidar frame order
|
|
||||||
|
|
||||||
return [self.config.num_layers] + input_list
|
return [self.config.num_layers] + input_list
|
||||||
|
|
||||||
def load_calib_poses(self):
|
def load_calib_poses(self):
|
||||||
|
@ -744,6 +714,12 @@ class SemanticKittiDataset(PointCloudDataset):
|
||||||
return poses
|
return poses
|
||||||
|
|
||||||
|
|
||||||
|
# ----------------------------------------------------------------------------------------------------------------------
|
||||||
|
#
|
||||||
|
# Utility classes definition
|
||||||
|
# \********************************/
|
||||||
|
|
||||||
|
|
||||||
class SemanticKittiSampler(Sampler):
|
class SemanticKittiSampler(Sampler):
|
||||||
"""Sampler for SemanticKitti"""
|
"""Sampler for SemanticKitti"""
|
||||||
|
|
||||||
|
@ -1390,6 +1366,12 @@ def SemanticKittiCollate(batch_data):
|
||||||
return SemanticKittiCustomBatch(batch_data)
|
return SemanticKittiCustomBatch(batch_data)
|
||||||
|
|
||||||
|
|
||||||
|
# ----------------------------------------------------------------------------------------------------------------------
|
||||||
|
#
|
||||||
|
# Debug functions
|
||||||
|
# \*********************/
|
||||||
|
|
||||||
|
|
||||||
def debug_timing(dataset, loader):
|
def debug_timing(dataset, loader):
|
||||||
"""Timing of generator function"""
|
"""Timing of generator function"""
|
||||||
|
|
||||||
|
|
|
@ -74,7 +74,8 @@ def grid_subsampling(points, features=None, labels=None, sampleDl=0.1, verbose=0
|
||||||
verbose=verbose)
|
verbose=verbose)
|
||||||
|
|
||||||
|
|
||||||
def batch_grid_subsampling(points, batches_len, features=None, labels=None, sampleDl=0.1, max_p=0, verbose=0):
|
def batch_grid_subsampling(points, batches_len, features=None, labels=None,
|
||||||
|
sampleDl=0.1, max_p=0, verbose=0, random_grid_orient=True):
|
||||||
"""
|
"""
|
||||||
CPP wrapper for a grid subsampling (method = barycenter for points and features)
|
CPP wrapper for a grid subsampling (method = barycenter for points and features)
|
||||||
:param points: (N, 3) matrix of input points
|
:param points: (N, 3) matrix of input points
|
||||||
|
@ -85,34 +86,100 @@ def batch_grid_subsampling(points, batches_len, features=None, labels=None, samp
|
||||||
:return: subsampled points, with features and/or labels depending of the input
|
:return: subsampled points, with features and/or labels depending of the input
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
R = None
|
||||||
|
B = len(batches_len)
|
||||||
|
if random_grid_orient:
|
||||||
|
|
||||||
|
########################################################
|
||||||
|
# Create a random rotation matrix for each batch element
|
||||||
|
########################################################
|
||||||
|
|
||||||
|
# Choose two random angles for the first vector in polar coordinates
|
||||||
|
theta = np.random.rand(B) * 2 * np.pi
|
||||||
|
phi = (np.random.rand(B) - 0.5) * np.pi
|
||||||
|
|
||||||
|
# Create the first vector in carthesian coordinates
|
||||||
|
u = np.vstack([np.cos(theta) * np.cos(phi), np.sin(theta) * np.cos(phi), np.sin(phi)])
|
||||||
|
|
||||||
|
# Choose a random rotation angle
|
||||||
|
alpha = np.random.rand(B) * 2 * np.pi
|
||||||
|
|
||||||
|
# Create the rotation matrix with this vector and angle
|
||||||
|
R = create_3D_rotations(u.T, alpha).astype(np.float32)
|
||||||
|
|
||||||
|
#################
|
||||||
|
# Apply rotations
|
||||||
|
#################
|
||||||
|
|
||||||
|
i0 = 0
|
||||||
|
points = points.copy()
|
||||||
|
for bi, length in enumerate(batches_len):
|
||||||
|
# Apply the rotation
|
||||||
|
points[i0:i0 + length, :] = np.sum(np.expand_dims(points[i0:i0 + length, :], 2) * R[bi], axis=1)
|
||||||
|
i0 += length
|
||||||
|
|
||||||
|
#######################
|
||||||
|
# Sunsample and realign
|
||||||
|
#######################
|
||||||
|
|
||||||
if (features is None) and (labels is None):
|
if (features is None) and (labels is None):
|
||||||
return cpp_subsampling.subsample_batch(points,
|
s_points, s_len = cpp_subsampling.subsample_batch(points,
|
||||||
batches_len,
|
batches_len,
|
||||||
sampleDl=sampleDl,
|
sampleDl=sampleDl,
|
||||||
max_p=max_p,
|
max_p=max_p,
|
||||||
verbose=verbose)
|
verbose=verbose)
|
||||||
|
if random_grid_orient:
|
||||||
|
i0 = 0
|
||||||
|
for bi, length in enumerate(s_len):
|
||||||
|
s_points[i0:i0 + length, :] = np.sum(np.expand_dims(s_points[i0:i0 + length, :], 2) * R[bi].T, axis=1)
|
||||||
|
i0 += length
|
||||||
|
return s_points, s_len
|
||||||
|
|
||||||
elif (labels is None):
|
elif (labels is None):
|
||||||
return cpp_subsampling.subsample_batch(points,
|
s_points, s_len, s_features = cpp_subsampling.subsample_batch(points,
|
||||||
batches_len,
|
batches_len,
|
||||||
features=features,
|
features=features,
|
||||||
sampleDl=sampleDl,
|
sampleDl=sampleDl,
|
||||||
max_p=max_p,
|
max_p=max_p,
|
||||||
verbose=verbose)
|
verbose=verbose)
|
||||||
|
if random_grid_orient:
|
||||||
|
i0 = 0
|
||||||
|
for bi, length in enumerate(s_len):
|
||||||
|
# Apply the rotation
|
||||||
|
s_points[i0:i0 + length, :] = np.sum(np.expand_dims(s_points[i0:i0 + length, :], 2) * R[bi].T, axis=1)
|
||||||
|
i0 += length
|
||||||
|
return s_points, s_len, s_features
|
||||||
|
|
||||||
elif (features is None):
|
elif (features is None):
|
||||||
return cpp_subsampling.subsample_batch(points,
|
s_points, s_len, s_labels = cpp_subsampling.subsample_batch(points,
|
||||||
batches_len,
|
batches_len,
|
||||||
classes=labels,
|
classes=labels,
|
||||||
sampleDl=sampleDl,
|
sampleDl=sampleDl,
|
||||||
max_p=max_p,
|
max_p=max_p,
|
||||||
verbose=verbose)
|
verbose=verbose)
|
||||||
|
if random_grid_orient:
|
||||||
|
i0 = 0
|
||||||
|
for bi, length in enumerate(s_len):
|
||||||
|
# Apply the rotation
|
||||||
|
s_points[i0:i0 + length, :] = np.sum(np.expand_dims(s_points[i0:i0 + length, :], 2) * R[bi].T, axis=1)
|
||||||
|
i0 += length
|
||||||
|
return s_points, s_len, s_labels
|
||||||
|
|
||||||
else:
|
else:
|
||||||
return cpp_subsampling.subsample_batch(points,
|
s_points, s_len, s_features, s_labels = cpp_subsampling.subsample_batch(points,
|
||||||
batches_len,
|
batches_len,
|
||||||
features=features,
|
features=features,
|
||||||
classes=labels,
|
classes=labels,
|
||||||
sampleDl=sampleDl,
|
sampleDl=sampleDl,
|
||||||
max_p=max_p,
|
max_p=max_p,
|
||||||
verbose=verbose)
|
verbose=verbose)
|
||||||
|
if random_grid_orient:
|
||||||
|
i0 = 0
|
||||||
|
for bi, length in enumerate(s_len):
|
||||||
|
# Apply the rotation
|
||||||
|
s_points[i0:i0 + length, :] = np.sum(np.expand_dims(s_points[i0:i0 + length, :], 2) * R[bi].T, axis=1)
|
||||||
|
i0 += length
|
||||||
|
return s_points, s_len, s_features, s_labels
|
||||||
|
|
||||||
|
|
||||||
def batch_neighbors(queries, supports, q_batches, s_batches, radius):
|
def batch_neighbors(queries, supports, q_batches, s_batches, radius):
|
||||||
|
@ -499,8 +566,6 @@ class PointCloudDataset(Dataset):
|
||||||
# Return inputs
|
# Return inputs
|
||||||
###############
|
###############
|
||||||
|
|
||||||
# Save deform layers
|
|
||||||
|
|
||||||
# list of network inputs
|
# list of network inputs
|
||||||
li = input_points + input_neighbors + input_pools + input_upsamples + input_stack_lengths
|
li = input_points + input_neighbors + input_pools + input_upsamples + input_stack_lengths
|
||||||
li += [stacked_features, labels]
|
li += [stacked_features, labels]
|
||||||
|
|
|
@ -74,6 +74,7 @@ def create_3D_rotations(axis, angle):
|
||||||
|
|
||||||
return np.reshape(R, (-1, 3, 3))
|
return np.reshape(R, (-1, 3, 3))
|
||||||
|
|
||||||
|
|
||||||
def spherical_Lloyd(radius, num_cells, dimension=3, fixed='center', approximation='monte-carlo',
|
def spherical_Lloyd(radius, num_cells, dimension=3, fixed='center', approximation='monte-carlo',
|
||||||
approx_n=5000, max_iter=500, momentum=0.9, verbose=0):
|
approx_n=5000, max_iter=500, momentum=0.9, verbose=0):
|
||||||
"""
|
"""
|
||||||
|
|
|
@ -44,12 +44,8 @@ def p2p_fitting_regularizer(net):
|
||||||
# Fitting loss
|
# Fitting loss
|
||||||
##############
|
##############
|
||||||
|
|
||||||
# Get the distance to closest input point
|
# Get the distance to closest input point and normalize to be independant from layers
|
||||||
|
KP_min_d2 = m.min_d2 / (m.KP_extent ** 2)
|
||||||
KP_min_d2, _ = torch.min(m.deformed_d2, dim=1)
|
|
||||||
|
|
||||||
# Normalize KP locations to be independant from layers
|
|
||||||
KP_min_d2 = KP_min_d2 / (m.KP_extent ** 2)
|
|
||||||
|
|
||||||
# Loss will be the square distance to closest input point. We use L1 because dist is already squared
|
# Loss will be the square distance to closest input point. We use L1 because dist is already squared
|
||||||
fitting_loss += net.l1(KP_min_d2, torch.zeros_like(KP_min_d2))
|
fitting_loss += net.l1(KP_min_d2, torch.zeros_like(KP_min_d2))
|
||||||
|
@ -65,11 +61,11 @@ def p2p_fitting_regularizer(net):
|
||||||
for i in range(net.K):
|
for i in range(net.K):
|
||||||
other_KP = torch.cat([KP_locs[:, :i, :], KP_locs[:, i + 1:, :]], dim=1).detach()
|
other_KP = torch.cat([KP_locs[:, :i, :], KP_locs[:, i + 1:, :]], dim=1).detach()
|
||||||
distances = torch.sqrt(torch.sum((other_KP - KP_locs[:, i:i + 1, :]) ** 2, dim=2))
|
distances = torch.sqrt(torch.sum((other_KP - KP_locs[:, i:i + 1, :]) ** 2, dim=2))
|
||||||
rep_loss = torch.sum(torch.clamp_max(distances - 1.0, max=0.0) ** 2, dim=1)
|
rep_loss = torch.sum(torch.clamp_max(distances - net.repulse_extent, max=0.0) ** 2, dim=1)
|
||||||
repulsive_loss += net.l1(rep_loss, torch.zeros_like(rep_loss)) / net.K
|
repulsive_loss += net.l1(rep_loss, torch.zeros_like(rep_loss)) / net.K
|
||||||
|
|
||||||
# The hook effectively affect both regularizer and output loss. So here we have to divide by deform_loss_power
|
# The hook effectively affect both regularizer and output loss. So here we have to divide by deform_loss_power
|
||||||
return (net.deform_fitting_power / net.deform_loss_power) * (fitting_loss + repulsive_loss)
|
return (net.deform_fitting_power / net.deform_loss_power) * (2 * fitting_loss + repulsive_loss)
|
||||||
|
|
||||||
|
|
||||||
class KPCNN(nn.Module):
|
class KPCNN(nn.Module):
|
||||||
|
@ -144,6 +140,7 @@ class KPCNN(nn.Module):
|
||||||
self.deform_fitting_mode = config.deform_fitting_mode
|
self.deform_fitting_mode = config.deform_fitting_mode
|
||||||
self.deform_fitting_power = config.deform_fitting_power
|
self.deform_fitting_power = config.deform_fitting_power
|
||||||
self.deform_loss_power = config.deform_loss_power
|
self.deform_loss_power = config.deform_loss_power
|
||||||
|
self.repulse_extent = config.repulse_extent
|
||||||
self.output_loss = 0
|
self.output_loss = 0
|
||||||
self.reg_loss = 0
|
self.reg_loss = 0
|
||||||
self.l1 = nn.L1Loss()
|
self.l1 = nn.L1Loss()
|
||||||
|
@ -329,6 +326,7 @@ class KPFCNN(nn.Module):
|
||||||
self.deform_fitting_mode = config.deform_fitting_mode
|
self.deform_fitting_mode = config.deform_fitting_mode
|
||||||
self.deform_fitting_power = config.deform_fitting_power
|
self.deform_fitting_power = config.deform_fitting_power
|
||||||
self.deform_loss_power = config.deform_loss_power
|
self.deform_loss_power = config.deform_loss_power
|
||||||
|
self.repulse_extent = config.repulse_extent
|
||||||
self.output_loss = 0
|
self.output_loss = 0
|
||||||
self.reg_loss = 0
|
self.reg_loss = 0
|
||||||
self.l1 = nn.L1Loss()
|
self.l1 = nn.L1Loss()
|
||||||
|
|
|
@ -174,8 +174,10 @@ class KPConv(nn.Module):
|
||||||
self.deformable = deformable
|
self.deformable = deformable
|
||||||
self.modulated = modulated
|
self.modulated = modulated
|
||||||
|
|
||||||
|
self.in_offset_channels = in_channels
|
||||||
|
|
||||||
# Running variable containing deformed KP distance to input points. (used in regularization loss)
|
# Running variable containing deformed KP distance to input points. (used in regularization loss)
|
||||||
self.deformed_d2 = None
|
self.min_d2 = None
|
||||||
self.deformed_KP = None
|
self.deformed_KP = None
|
||||||
self.offset_features = None
|
self.offset_features = None
|
||||||
|
|
||||||
|
@ -191,7 +193,7 @@ class KPConv(nn.Module):
|
||||||
self.offset_dim = self.p_dim * self.K
|
self.offset_dim = self.p_dim * self.K
|
||||||
self.offset_conv = KPConv(self.K,
|
self.offset_conv = KPConv(self.K,
|
||||||
self.p_dim,
|
self.p_dim,
|
||||||
in_channels,
|
self.in_offset_channels,
|
||||||
self.offset_dim,
|
self.offset_dim,
|
||||||
KP_extent,
|
KP_extent,
|
||||||
radius,
|
radius,
|
||||||
|
@ -241,7 +243,10 @@ class KPConv(nn.Module):
|
||||||
###################
|
###################
|
||||||
|
|
||||||
if self.deformable:
|
if self.deformable:
|
||||||
self.offset_features = self.offset_conv(q_pts, s_pts, neighb_inds, x) + self.offset_bias
|
|
||||||
|
# Get offsets with a KPConv that only takes part of the features
|
||||||
|
x_offsets = x[:, :self.in_offset_channels]
|
||||||
|
self.offset_features = self.offset_conv(q_pts, s_pts, neighb_inds, x_offsets) + self.offset_bias
|
||||||
|
|
||||||
if self.modulated:
|
if self.modulated:
|
||||||
|
|
||||||
|
@ -295,18 +300,33 @@ class KPConv(nn.Module):
|
||||||
sq_distances = torch.sum(differences ** 2, dim=3)
|
sq_distances = torch.sum(differences ** 2, dim=3)
|
||||||
|
|
||||||
# Optimization by ignoring points outside a deformed KP range
|
# Optimization by ignoring points outside a deformed KP range
|
||||||
if False and self.deformable:
|
if self.deformable:
|
||||||
|
|
||||||
|
# Save distances for loss
|
||||||
|
self.min_d2, _ = torch.min(sq_distances, dim=1)
|
||||||
|
|
||||||
# Boolean of the neighbors in range of a kernel point [n_points, n_neighbors]
|
# Boolean of the neighbors in range of a kernel point [n_points, n_neighbors]
|
||||||
in_range = torch.any(sq_distances < self.KP_extent ** 2, dim=2)
|
in_range = torch.any(sq_distances < self.KP_extent ** 2, dim=2).type(torch.int32)
|
||||||
|
|
||||||
# New value of max neighbors
|
# New value of max neighbors
|
||||||
new_max_neighb = torch.max(torch.sum(in_range, dim=1))
|
new_max_neighb = torch.max(torch.sum(in_range, dim=1))
|
||||||
|
|
||||||
print(sq_distances.shape[1], '=>', new_max_neighb.item())
|
# For each row of neighbors, indices of the ones that are in range [n_points, new_max_neighb]
|
||||||
|
neighb_row_bool, neighb_row_inds = torch.topk(in_range, new_max_neighb.item(), dim=1)
|
||||||
|
|
||||||
# Save distances for loss
|
# Gather new neighbor indices [n_points, new_max_neighb]
|
||||||
if self.deformable:
|
new_neighb_inds = neighb_inds.gather(1, neighb_row_inds, sparse_grad=False)
|
||||||
self.deformed_d2 = sq_distances
|
|
||||||
|
# Gather new distances to KP [n_points, new_max_neighb, n_kpoints]
|
||||||
|
neighb_row_inds.unsqueeze_(2)
|
||||||
|
neighb_row_inds = neighb_row_inds.expand(-1, -1, self.K)
|
||||||
|
sq_distances = sq_distances.gather(1, neighb_row_inds, sparse_grad=False)
|
||||||
|
|
||||||
|
# New shadow neighbors have to point to the last shadow point
|
||||||
|
new_neighb_inds *= neighb_row_bool
|
||||||
|
new_neighb_inds -= (neighb_row_bool.type(torch.int64) - 1) * int(s_pts.shape[0] - 1)
|
||||||
|
else:
|
||||||
|
new_neighb_inds = neighb_inds
|
||||||
|
|
||||||
# Get Kernel point influences [n_points, n_kpoints, n_neighbors]
|
# Get Kernel point influences [n_points, n_kpoints, n_neighbors]
|
||||||
if self.KP_influence == 'constant':
|
if self.KP_influence == 'constant':
|
||||||
|
@ -339,7 +359,7 @@ class KPConv(nn.Module):
|
||||||
x = torch.cat((x, torch.zeros_like(x[:1, :])), 0)
|
x = torch.cat((x, torch.zeros_like(x[:1, :])), 0)
|
||||||
|
|
||||||
# Get the features of each neighborhood [n_points, n_neighbors, in_fdim]
|
# Get the features of each neighborhood [n_points, n_neighbors, in_fdim]
|
||||||
neighb_x = gather(x, neighb_inds)
|
neighb_x = gather(x, new_neighb_inds)
|
||||||
|
|
||||||
# Apply distance weights [n_points, n_kpoints, in_fdim]
|
# Apply distance weights [n_points, n_kpoints, in_fdim]
|
||||||
weighted_features = torch.matmul(all_weights, neighb_x)
|
weighted_features = torch.matmul(all_weights, neighb_x)
|
||||||
|
|
|
@ -80,32 +80,6 @@ def running_mean(signal, n, axis=0, stride=1):
|
||||||
return None
|
return None
|
||||||
|
|
||||||
|
|
||||||
def IoU_multi_metrics(all_IoUs, smooth_n):
|
|
||||||
|
|
||||||
# Get mean IoU for consecutive epochs to directly get a mean
|
|
||||||
all_mIoUs = [np.hstack([np.mean(obj_IoUs, axis=1) for obj_IoUs in epoch_IoUs]) for epoch_IoUs in all_IoUs]
|
|
||||||
smoothed_mIoUs = []
|
|
||||||
for epoch in range(len(all_mIoUs)):
|
|
||||||
i0 = max(epoch - smooth_n, 0)
|
|
||||||
i1 = min(epoch + smooth_n + 1, len(all_mIoUs))
|
|
||||||
smoothed_mIoUs += [np.mean(np.hstack(all_mIoUs[i0:i1]))]
|
|
||||||
|
|
||||||
# Get mean for each class
|
|
||||||
all_objs_mIoUs = [[np.mean(obj_IoUs, axis=1) for obj_IoUs in epoch_IoUs] for epoch_IoUs in all_IoUs]
|
|
||||||
smoothed_obj_mIoUs = []
|
|
||||||
for epoch in range(len(all_objs_mIoUs)):
|
|
||||||
i0 = max(epoch - smooth_n, 0)
|
|
||||||
i1 = min(epoch + smooth_n + 1, len(all_objs_mIoUs))
|
|
||||||
|
|
||||||
epoch_obj_mIoUs = []
|
|
||||||
for obj in range(len(all_objs_mIoUs[0])):
|
|
||||||
epoch_obj_mIoUs += [np.mean(np.hstack([objs_mIoUs[obj] for objs_mIoUs in all_objs_mIoUs[i0:i1]]))]
|
|
||||||
|
|
||||||
smoothed_obj_mIoUs += [epoch_obj_mIoUs]
|
|
||||||
|
|
||||||
return np.array(smoothed_mIoUs), np.array(smoothed_obj_mIoUs)
|
|
||||||
|
|
||||||
|
|
||||||
def IoU_class_metrics(all_IoUs, smooth_n):
|
def IoU_class_metrics(all_IoUs, smooth_n):
|
||||||
|
|
||||||
# Get mean IoU per class for consecutive epochs to directly get a mean without further smoothing
|
# Get mean IoU per class for consecutive epochs to directly get a mean without further smoothing
|
||||||
|
@ -215,66 +189,11 @@ def load_snap_clouds(path, dataset, only_last=False):
|
||||||
return cloud_epochs, IoU_from_confusions(Confs)
|
return cloud_epochs, IoU_from_confusions(Confs)
|
||||||
|
|
||||||
|
|
||||||
def load_multi_snap_clouds(path, dataset, file_i, only_last=False):
|
# ----------------------------------------------------------------------------------------------------------------------
|
||||||
|
#
|
||||||
cloud_folders = np.array([join(path, f) for f in listdir(path) if f.startswith('val_preds')])
|
# Plot functions
|
||||||
cloud_epochs = np.array([int(f.split('_')[-1]) for f in cloud_folders])
|
# \********************/
|
||||||
epoch_order = np.argsort(cloud_epochs)
|
#
|
||||||
cloud_epochs = cloud_epochs[epoch_order]
|
|
||||||
cloud_folders = cloud_folders[epoch_order]
|
|
||||||
|
|
||||||
if len(cloud_folders) > 0:
|
|
||||||
dataset_folders = [f for f in listdir(cloud_folders[0]) if dataset.name in f]
|
|
||||||
cloud_folders = [join(f, dataset_folders[file_i]) for f in cloud_folders]
|
|
||||||
|
|
||||||
Confs = np.zeros((len(cloud_epochs), dataset.num_classes, dataset.num_classes), dtype=np.int32)
|
|
||||||
for c_i, cloud_folder in enumerate(cloud_folders):
|
|
||||||
if only_last and c_i < len(cloud_epochs) - 1:
|
|
||||||
continue
|
|
||||||
|
|
||||||
# Load confusion if previously saved
|
|
||||||
conf_file = join(cloud_folder, 'conf_{:s}.txt'.format(dataset.name))
|
|
||||||
if isfile(conf_file):
|
|
||||||
Confs[c_i] += np.loadtxt(conf_file, dtype=np.int32)
|
|
||||||
|
|
||||||
else:
|
|
||||||
for f in listdir(cloud_folder):
|
|
||||||
if f.endswith('.ply') and not f.endswith('sub.ply'):
|
|
||||||
if np.any([cloud_path.endswith(f) for cloud_path in dataset.files]):
|
|
||||||
data = read_ply(join(cloud_folder, f))
|
|
||||||
labels = data['class']
|
|
||||||
preds = data['preds']
|
|
||||||
Confs[c_i] += confusion_matrix(labels, preds, dataset.label_values).astype(np.int32)
|
|
||||||
|
|
||||||
np.savetxt(conf_file, Confs[c_i], '%12d')
|
|
||||||
|
|
||||||
# Erase ply to save disk memory
|
|
||||||
if c_i < len(cloud_folders) - 1:
|
|
||||||
for f in listdir(cloud_folder):
|
|
||||||
if f.endswith('.ply'):
|
|
||||||
remove(join(cloud_folder, f))
|
|
||||||
|
|
||||||
# Remove ignored labels from confusions
|
|
||||||
for l_ind, label_value in reversed(list(enumerate(dataset.label_values))):
|
|
||||||
if label_value in dataset.ignored_labels:
|
|
||||||
Confs = np.delete(Confs, l_ind, axis=1)
|
|
||||||
Confs = np.delete(Confs, l_ind, axis=2)
|
|
||||||
|
|
||||||
return cloud_epochs, IoU_from_confusions(Confs)
|
|
||||||
|
|
||||||
|
|
||||||
def load_multi_IoU(filename, n_parts):
|
|
||||||
|
|
||||||
with open(filename, 'r') as f:
|
|
||||||
lines = f.readlines()
|
|
||||||
|
|
||||||
# Load all IoUs
|
|
||||||
all_IoUs = []
|
|
||||||
for i, line in enumerate(lines):
|
|
||||||
obj_IoUs = [[float(IoU) for IoU in s.split()] for s in line.split('/')]
|
|
||||||
obj_IoUs = [np.reshape(IoUs, [-1, n_parts[obj]]) for obj, IoUs in enumerate(obj_IoUs)]
|
|
||||||
all_IoUs += [obj_IoUs]
|
|
||||||
return all_IoUs
|
|
||||||
|
|
||||||
|
|
||||||
def compare_trainings(list_of_paths, list_of_labels=None):
|
def compare_trainings(list_of_paths, list_of_labels=None):
|
||||||
|
@ -410,149 +329,6 @@ def compare_trainings(list_of_paths, list_of_labels=None):
|
||||||
plt.show()
|
plt.show()
|
||||||
|
|
||||||
|
|
||||||
def compare_convergences_multisegment(list_of_paths, list_of_labels=None):
|
|
||||||
|
|
||||||
# Parameters
|
|
||||||
# **********
|
|
||||||
|
|
||||||
steps_per_epoch = 0
|
|
||||||
smooth_n = 5
|
|
||||||
|
|
||||||
if list_of_labels is None:
|
|
||||||
list_of_labels = [str(i) for i in range(len(list_of_paths))]
|
|
||||||
|
|
||||||
# Read Logs
|
|
||||||
# *********
|
|
||||||
|
|
||||||
all_pred_epochs = []
|
|
||||||
all_instances_mIoUs = []
|
|
||||||
all_objs_mIoUs = []
|
|
||||||
all_objs_IoUs = []
|
|
||||||
all_parts = []
|
|
||||||
|
|
||||||
obj_list = ['Air', 'Bag', 'Cap', 'Car', 'Cha', 'Ear', 'Gui', 'Kni',
|
|
||||||
'Lam', 'Lap', 'Mot', 'Mug', 'Pis', 'Roc', 'Ska', 'Tab']
|
|
||||||
print('Objs | Inst | Air Bag Cap Car Cha Ear Gui Kni Lam Lap Mot Mug Pis Roc Ska Tab')
|
|
||||||
print('-----|------|--------------------------------------------------------------------------------')
|
|
||||||
for path in list_of_paths:
|
|
||||||
|
|
||||||
# Load parameters
|
|
||||||
config = Config()
|
|
||||||
config.load(path)
|
|
||||||
|
|
||||||
# Get the number of classes
|
|
||||||
n_parts = [4, 2, 2, 4, 4, 3, 3, 2, 4, 2, 6, 2, 3, 3, 3, 3]
|
|
||||||
part = config.dataset.split('_')[-1]
|
|
||||||
|
|
||||||
# Get validation confusions
|
|
||||||
file = join(path, 'val_IoUs.txt')
|
|
||||||
val_IoUs = load_multi_IoU(file, n_parts)
|
|
||||||
|
|
||||||
file = join(path, 'vote_IoUs.txt')
|
|
||||||
vote_IoUs = load_multi_IoU(file, n_parts)
|
|
||||||
|
|
||||||
#print(len(val_IoUs[0]))
|
|
||||||
#print(val_IoUs[0][0].shape)
|
|
||||||
|
|
||||||
# Get mean IoU
|
|
||||||
#instances_mIoUs, objs_mIoUs = IoU_multi_metrics(val_IoUs, smooth_n)
|
|
||||||
|
|
||||||
# Get mean IoU
|
|
||||||
instances_mIoUs, objs_mIoUs = IoU_multi_metrics(vote_IoUs, smooth_n)
|
|
||||||
|
|
||||||
# Aggregate results
|
|
||||||
all_pred_epochs += [np.array([i for i in range(len(val_IoUs))])]
|
|
||||||
all_instances_mIoUs += [instances_mIoUs]
|
|
||||||
all_objs_IoUs += [objs_mIoUs]
|
|
||||||
all_objs_mIoUs += [np.mean(objs_mIoUs, axis=1)]
|
|
||||||
|
|
||||||
if part == 'multi':
|
|
||||||
s = '{:4.1f} | {:4.1f} | '.format(100 * np.mean(objs_mIoUs[-1]), 100 * instances_mIoUs[-1])
|
|
||||||
for obj_mIoU in objs_mIoUs[-1]:
|
|
||||||
s += '{:4.1f} '.format(100 * obj_mIoU)
|
|
||||||
print(s)
|
|
||||||
else:
|
|
||||||
s = ' -- | -- | '
|
|
||||||
for obj_name in obj_list:
|
|
||||||
if part.startswith(obj_name):
|
|
||||||
s += '{:4.1f} '.format(100 * instances_mIoUs[-1])
|
|
||||||
else:
|
|
||||||
s += ' -- '.format(100 * instances_mIoUs[-1])
|
|
||||||
print(s)
|
|
||||||
all_parts += [part]
|
|
||||||
|
|
||||||
# Plots
|
|
||||||
# *****
|
|
||||||
|
|
||||||
if 'multi' in all_parts:
|
|
||||||
|
|
||||||
# Figure
|
|
||||||
fig = plt.figure('Instances mIoU')
|
|
||||||
for i, label in enumerate(list_of_labels):
|
|
||||||
if all_parts[i] == 'multi':
|
|
||||||
plt.plot(all_pred_epochs[i], all_instances_mIoUs[i], linewidth=1, label=label)
|
|
||||||
plt.xlabel('epochs')
|
|
||||||
plt.ylabel('IoU')
|
|
||||||
|
|
||||||
# Set limits for y axis
|
|
||||||
#plt.ylim(0.55, 0.95)
|
|
||||||
|
|
||||||
# Display legends and title
|
|
||||||
plt.legend(loc=4)
|
|
||||||
|
|
||||||
# Customize the graph
|
|
||||||
ax = fig.gca()
|
|
||||||
ax.grid(linestyle='-.', which='both')
|
|
||||||
#ax.set_yticks(np.arange(0.8, 1.02, 0.02))
|
|
||||||
|
|
||||||
# Figure
|
|
||||||
fig = plt.figure('mean of categories mIoU')
|
|
||||||
for i, label in enumerate(list_of_labels):
|
|
||||||
if all_parts[i] == 'multi':
|
|
||||||
plt.plot(all_pred_epochs[i], all_objs_mIoUs[i], linewidth=1, label=label)
|
|
||||||
plt.xlabel('epochs')
|
|
||||||
plt.ylabel('IoU')
|
|
||||||
|
|
||||||
# Set limits for y axis
|
|
||||||
#plt.ylim(0.8, 1)
|
|
||||||
|
|
||||||
# Display legends and title
|
|
||||||
plt.legend(loc=4)
|
|
||||||
|
|
||||||
# Customize the graph
|
|
||||||
ax = fig.gca()
|
|
||||||
ax.grid(linestyle='-.', which='both')
|
|
||||||
#ax.set_yticks(np.arange(0.8, 1.02, 0.02))
|
|
||||||
|
|
||||||
for obj_i, obj_name in enumerate(obj_list):
|
|
||||||
if np.any([part.startswith(obj_name) for part in all_parts]):
|
|
||||||
# Figure
|
|
||||||
fig = plt.figure(obj_name + ' mIoU')
|
|
||||||
for i, label in enumerate(list_of_labels):
|
|
||||||
if all_parts[i] == 'multi':
|
|
||||||
plt.plot(all_pred_epochs[i], all_objs_IoUs[i][:, obj_i], linewidth=1, label=label)
|
|
||||||
elif all_parts[i].startswith(obj_name):
|
|
||||||
plt.plot(all_pred_epochs[i], all_objs_mIoUs[i], linewidth=1, label=label)
|
|
||||||
plt.xlabel('epochs')
|
|
||||||
plt.ylabel('IoU')
|
|
||||||
|
|
||||||
# Set limits for y axis
|
|
||||||
#plt.ylim(0.8, 1)
|
|
||||||
|
|
||||||
# Display legends and title
|
|
||||||
plt.legend(loc=4)
|
|
||||||
|
|
||||||
# Customize the graph
|
|
||||||
ax = fig.gca()
|
|
||||||
ax.grid(linestyle='-.', which='both')
|
|
||||||
#ax.set_yticks(np.arange(0.8, 1.02, 0.02))
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# Show all
|
|
||||||
plt.show()
|
|
||||||
|
|
||||||
|
|
||||||
def compare_convergences_segment(dataset, list_of_paths, list_of_names=None):
|
def compare_convergences_segment(dataset, list_of_paths, list_of_names=None):
|
||||||
|
|
||||||
# Parameters
|
# Parameters
|
||||||
|
@ -784,168 +560,6 @@ def compare_convergences_classif(list_of_paths, list_of_labels=None):
|
||||||
plt.show()
|
plt.show()
|
||||||
|
|
||||||
|
|
||||||
def compare_convergences_multicloud(list_of_paths, multi, multi_datasets, list_of_names=None):
|
|
||||||
|
|
||||||
# Parameters
|
|
||||||
# **********
|
|
||||||
|
|
||||||
smooth_n = 10
|
|
||||||
|
|
||||||
if list_of_names is None:
|
|
||||||
list_of_names = [str(i) for i in range(len(list_of_paths))]
|
|
||||||
|
|
||||||
|
|
||||||
# Loop on all datasets:
|
|
||||||
for plot_dataset in multi_datasets:
|
|
||||||
print('\n')
|
|
||||||
print(plot_dataset)
|
|
||||||
print('*'*len(plot_dataset))
|
|
||||||
print()
|
|
||||||
|
|
||||||
# Load dataset parameters
|
|
||||||
if plot_dataset.startswith('S3DIS'):
|
|
||||||
dataset = S3DISDataset()
|
|
||||||
elif plot_dataset.startswith('Scann'):
|
|
||||||
dataset = ScannetDataset()
|
|
||||||
elif plot_dataset.startswith('Seman'):
|
|
||||||
dataset = Semantic3DDataset()
|
|
||||||
elif plot_dataset.startswith('NPM3D'):
|
|
||||||
dataset = NPM3DDataset()
|
|
||||||
else:
|
|
||||||
raise ValueError('Unsupported dataset : ' + plot_dataset)
|
|
||||||
|
|
||||||
# Read Logs
|
|
||||||
# *********
|
|
||||||
|
|
||||||
all_pred_epochs = []
|
|
||||||
all_mIoUs = []
|
|
||||||
all_class_IoUs = []
|
|
||||||
all_snap_epochs = []
|
|
||||||
all_snap_IoUs = []
|
|
||||||
all_names = []
|
|
||||||
|
|
||||||
class_list = [dataset.label_to_names[label] for label in dataset.label_values
|
|
||||||
if label not in dataset.ignored_labels]
|
|
||||||
|
|
||||||
s = '{:^10}|'.format('mean')
|
|
||||||
for c in class_list:
|
|
||||||
s += '{:^10}'.format(c)
|
|
||||||
print(s)
|
|
||||||
print(10*'-' + '|' + 10*dataset.num_classes*'-')
|
|
||||||
for log_i, (path, is_multi) in enumerate(zip(list_of_paths, multi)):
|
|
||||||
|
|
||||||
n_c = None
|
|
||||||
if is_multi:
|
|
||||||
config = MultiConfig()
|
|
||||||
config.load(path)
|
|
||||||
if plot_dataset in config.datasets:
|
|
||||||
val_IoU_files = []
|
|
||||||
for d_i in np.where(np.array(config.datasets) == plot_dataset)[0]:
|
|
||||||
n_c = config.num_classes[d_i]
|
|
||||||
val_IoU_files.append(join(path, 'val_IoUs_{:d}_{:s}.txt'.format(d_i, plot_dataset)))
|
|
||||||
else:
|
|
||||||
continue
|
|
||||||
else:
|
|
||||||
config = Config()
|
|
||||||
config.load(path)
|
|
||||||
if plot_dataset == config.dataset:
|
|
||||||
n_c = config.num_classes
|
|
||||||
val_IoU_files = [join(path, 'val_IoUs.txt')]
|
|
||||||
else:
|
|
||||||
continue
|
|
||||||
|
|
||||||
for file_i, file in enumerate(val_IoU_files):
|
|
||||||
|
|
||||||
# Load validation IoUs
|
|
||||||
val_IoUs = load_single_IoU(file, n_c)
|
|
||||||
|
|
||||||
# Get mean IoU
|
|
||||||
class_IoUs, mIoUs = IoU_class_metrics(val_IoUs, smooth_n)
|
|
||||||
|
|
||||||
# Aggregate results
|
|
||||||
all_pred_epochs += [np.array([i for i in range(len(val_IoUs))])]
|
|
||||||
all_mIoUs += [mIoUs]
|
|
||||||
all_class_IoUs += [class_IoUs]
|
|
||||||
all_names += [list_of_names[log_i]+'_{:d}'.format(file_i+1)]
|
|
||||||
|
|
||||||
s = '{:^10.1f}|'.format(100*mIoUs[-1])
|
|
||||||
for IoU in class_IoUs[-1]:
|
|
||||||
s += '{:^10.1f}'.format(100*IoU)
|
|
||||||
print(s)
|
|
||||||
|
|
||||||
# Get optional full validation on clouds
|
|
||||||
if is_multi:
|
|
||||||
snap_epochs, snap_IoUs = load_multi_snap_clouds(path, dataset, file_i)
|
|
||||||
else:
|
|
||||||
snap_epochs, snap_IoUs = load_snap_clouds(path, dataset)
|
|
||||||
all_snap_epochs += [snap_epochs]
|
|
||||||
all_snap_IoUs += [snap_IoUs]
|
|
||||||
|
|
||||||
print(10*'-' + '|' + 10*dataset.num_classes*'-')
|
|
||||||
for snap_IoUs in all_snap_IoUs:
|
|
||||||
if len(snap_IoUs) > 0:
|
|
||||||
s = '{:^10.1f}|'.format(100*np.mean(snap_IoUs[-1]))
|
|
||||||
for IoU in snap_IoUs[-1]:
|
|
||||||
s += '{:^10.1f}'.format(100*IoU)
|
|
||||||
else:
|
|
||||||
s = '{:^10s}'.format('-')
|
|
||||||
for _ in range(dataset.num_classes):
|
|
||||||
s += '{:^10s}'.format('-')
|
|
||||||
print(s)
|
|
||||||
|
|
||||||
# Plots
|
|
||||||
# *****
|
|
||||||
|
|
||||||
# Figure
|
|
||||||
fig = plt.figure('mIoUs')
|
|
||||||
for i, name in enumerate(all_names):
|
|
||||||
p = plt.plot(all_pred_epochs[i], all_mIoUs[i], '--', linewidth=1, label=name)
|
|
||||||
plt.plot(all_snap_epochs[i], np.mean(all_snap_IoUs[i], axis=1), linewidth=1, color=p[-1].get_color())
|
|
||||||
|
|
||||||
plt.title(plot_dataset)
|
|
||||||
plt.xlabel('epochs')
|
|
||||||
plt.ylabel('IoU')
|
|
||||||
|
|
||||||
# Set limits for y axis
|
|
||||||
#plt.ylim(0.55, 0.95)
|
|
||||||
|
|
||||||
# Display legends and title
|
|
||||||
plt.legend(loc=4)
|
|
||||||
|
|
||||||
# Customize the graph
|
|
||||||
ax = fig.gca()
|
|
||||||
ax.grid(linestyle='-.', which='both')
|
|
||||||
#ax.set_yticks(np.arange(0.8, 1.02, 0.02))
|
|
||||||
|
|
||||||
displayed_classes = [0, 1, 2, 3, 4, 5, 6, 7]
|
|
||||||
displayed_classes = []
|
|
||||||
for c_i, c_name in enumerate(class_list):
|
|
||||||
if c_i in displayed_classes:
|
|
||||||
|
|
||||||
# Figure
|
|
||||||
fig = plt.figure(c_name + ' IoU')
|
|
||||||
for i, name in enumerate(list_of_names):
|
|
||||||
plt.plot(all_pred_epochs[i], all_class_IoUs[i][:, c_i], linewidth=1, label=name)
|
|
||||||
plt.xlabel('epochs')
|
|
||||||
plt.ylabel('IoU')
|
|
||||||
|
|
||||||
# Set limits for y axis
|
|
||||||
#plt.ylim(0.8, 1)
|
|
||||||
|
|
||||||
# Display legends and title
|
|
||||||
plt.legend(loc=4)
|
|
||||||
|
|
||||||
# Customize the graph
|
|
||||||
ax = fig.gca()
|
|
||||||
ax.grid(linestyle='-.', which='both')
|
|
||||||
#ax.set_yticks(np.arange(0.8, 1.02, 0.02))
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# Show all
|
|
||||||
plt.show()
|
|
||||||
|
|
||||||
|
|
||||||
def compare_convergences_SLAM(dataset, list_of_paths, list_of_names=None):
|
def compare_convergences_SLAM(dataset, list_of_paths, list_of_names=None):
|
||||||
|
|
||||||
# Parameters
|
# Parameters
|
||||||
|
@ -1069,491 +683,79 @@ def compare_convergences_SLAM(dataset, list_of_paths, list_of_names=None):
|
||||||
|
|
||||||
# ----------------------------------------------------------------------------------------------------------------------
|
# ----------------------------------------------------------------------------------------------------------------------
|
||||||
#
|
#
|
||||||
# Main Call
|
# Experiments
|
||||||
# \***************/
|
# \*****************/
|
||||||
#
|
#
|
||||||
|
|
||||||
|
|
||||||
def ModelNet40_first_test():
|
def experiment_name_1():
|
||||||
"""
|
"""
|
||||||
First tries with ModelNet40
|
In this function you choose the results you want to plot together, to compare them as an experiment.
|
||||||
First we compare convergence of a very very deep network on ModelNet40, with our without bn
|
Just return the list of log paths (like 'results/Log_2020-04-04_10-04-42' for example), and the associated names
|
||||||
Then, We try the resuming of previous trainings. Which works quite well.
|
of these logs.
|
||||||
However in the mean time, we change how validation worked by calling net.eval()/net.train() before/after
|
Below an example of how to automatically gather all logs between two dates, and name them.
|
||||||
validation. It seems that the network perform strange when calling net.eval()/net.train() although it should be the
|
|
||||||
right way to do it.
|
|
||||||
Then we try to change BatchNorm1D with InstanceNorm1D and compare with and without calling eval/train at validation.
|
|
||||||
(Also with a faster lr decay).
|
|
||||||
--- MISTAKE FOUND --- the batch norm momentum was inverted 0.98 instead of 0.02.
|
|
||||||
See next experiment for correct convergences. Instance norm seems not as good
|
|
||||||
"""
|
|
||||||
|
|
||||||
# Using the dates of the logs, you can easily gather consecutive ones. All logs should be of the same dataset.
|
|
||||||
start = 'Log_2020-03-18_16-04-20'
|
|
||||||
end = 'Log_2020-03-20_16-59-40'
|
|
||||||
|
|
||||||
if end < 'Log_2020-03-22_19-30-19':
|
|
||||||
res_path = 'old_results'
|
|
||||||
else:
|
|
||||||
res_path = 'results'
|
|
||||||
|
|
||||||
logs = np.sort([join(res_path, l) for l in listdir(res_path) if start <= l <= end])
|
|
||||||
|
|
||||||
# Give names to the logs (for legends)
|
|
||||||
logs_names = ['with_bn',
|
|
||||||
'without_bn',
|
|
||||||
'with_bn2',
|
|
||||||
'without_bn2',
|
|
||||||
'lrd_80_Inorm_eval_train',
|
|
||||||
'lrd_80_Inorm_always_train',
|
|
||||||
'test']
|
|
||||||
|
|
||||||
logs_names = np.array(logs_names[:len(logs)])
|
|
||||||
|
|
||||||
return logs, logs_names
|
|
||||||
|
|
||||||
|
|
||||||
def ModelNet40_batch_norm():
|
|
||||||
"""
|
|
||||||
Compare different type of batch norm now that it has been fixed. Batch norm seems the best easily. Instance norm
|
|
||||||
crewated a NAN loss so avoid this one.
|
|
||||||
Now try fast experiments. First reduce network size. Reducing the number of convolution per layer does not affect
|
|
||||||
results (maybe because dataset is too simple???). 5 small layers is way better that 4 big layers.
|
|
||||||
Now reduce number of step per epoch and maybe try balanced sampler. Balanced sampler with fewer steps per epoch is
|
|
||||||
way faster for convergence and gets nearly the same scores. so good for experimenting. However we cant really
|
|
||||||
conclude between parameters which will get the same score (like the more layers) because the dataset my be
|
|
||||||
limitating. We can only conclude if something is not good and reduce score.
|
|
||||||
"""
|
|
||||||
|
|
||||||
# Using the dates of the logs, you can easily gather consecutive ones. All logs should be of the same dataset.
|
|
||||||
start = 'Log_2020-03-20_16-59-41'
|
|
||||||
end = 'Log_2020-04-13_18-14-44'
|
|
||||||
|
|
||||||
if end < 'Log_2020-03-22_19-30-19':
|
|
||||||
res_path = 'old_results'
|
|
||||||
else:
|
|
||||||
res_path = 'results'
|
|
||||||
|
|
||||||
logs = np.sort([join(res_path, l) for l in listdir(res_path) if start <= l <= end])
|
|
||||||
|
|
||||||
# Give names to the logs (for legends)
|
|
||||||
logs_names = ['no_norm',
|
|
||||||
'IN',
|
|
||||||
'BN',
|
|
||||||
'5_small_layer-d0=0.02',
|
|
||||||
'3_big_layer-d0=0.02',
|
|
||||||
'3_big_layer-d0=0.04',
|
|
||||||
'small-e_n=300',
|
|
||||||
'small-e_n=300-balanced_train',
|
|
||||||
'small-e_n=300-balanced_traintest',
|
|
||||||
'test']
|
|
||||||
|
|
||||||
logs_names = np.array(logs_names[:len(logs)])
|
|
||||||
|
|
||||||
return logs, logs_names
|
|
||||||
|
|
||||||
|
|
||||||
def ModelNet40_fast_vs_results():
|
|
||||||
"""
|
|
||||||
Try lr decay with fast convergence (epoch_n=300 and balanced traintest). 80 is a good value.
|
|
||||||
"""
|
|
||||||
|
|
||||||
# Using the dates of the logs, you can easily gather consecutive ones. All logs should be of the same dataset.
|
|
||||||
start = 'Log_2020-03-21_16-09-17'
|
|
||||||
end = 'Log_2020-03-21_16-09-36'
|
|
||||||
|
|
||||||
if end < 'Log_2020-03-22_19-30-19':
|
|
||||||
res_path = 'old_results'
|
|
||||||
else:
|
|
||||||
res_path = 'results'
|
|
||||||
|
|
||||||
logs = np.sort([join(res_path, l) for l in listdir(res_path) if start <= l <= end])
|
|
||||||
logs = np.insert(logs, 1, join(res_path, 'Log_2020-03-21_11-57-45'))
|
|
||||||
|
|
||||||
# Give names to the logs (for legends)
|
|
||||||
logs_names = ['lrd=120',
|
|
||||||
'lrd=80',
|
|
||||||
'lrd=40',
|
|
||||||
'test']
|
|
||||||
|
|
||||||
logs_names = np.array(logs_names[:len(logs)])
|
|
||||||
|
|
||||||
return logs, logs_names
|
|
||||||
|
|
||||||
|
|
||||||
def ModelNet40_grad_clipping():
|
|
||||||
"""
|
|
||||||
Test different grad clipping. No difference so we can move on
|
|
||||||
"""
|
|
||||||
|
|
||||||
# Using the dates of the logs, you can easily gather consecutive ones. All logs should be of the same dataset.
|
|
||||||
start = 'Log_2020-03-21_18-21-37'
|
|
||||||
end = 'Log_2020-03-21_18-30-01'
|
|
||||||
|
|
||||||
if end < 'Log_2020-03-22_19-30-19':
|
|
||||||
res_path = 'old_results'
|
|
||||||
else:
|
|
||||||
res_path = 'results'
|
|
||||||
|
|
||||||
logs = np.sort([join(res_path, l) for l in listdir(res_path) if start <= l <= end])
|
|
||||||
logs = np.insert(logs, 0, join(res_path, 'Log_2020-03-21_11-57-45'))
|
|
||||||
|
|
||||||
# Give names to the logs (for legends)
|
|
||||||
logs_names = ['value_clip_100',
|
|
||||||
'norm_clip_100',
|
|
||||||
'value_clip_10',
|
|
||||||
'norm_clip_10',
|
|
||||||
'no_clip',
|
|
||||||
'test']
|
|
||||||
|
|
||||||
logs_names = np.array(logs_names[:len(logs)])
|
|
||||||
|
|
||||||
return logs, logs_names
|
|
||||||
|
|
||||||
|
|
||||||
def ModelNet40_KP_extent():
|
|
||||||
"""
|
|
||||||
Test differents mode et kp extent. sum et extent=2.0 definitivement moins bon (trop de recouvrement des kp
|
|
||||||
influences, noyau moins versatile). les closest semble plutot bon et le sum extent=1.5 pas mal du tout () peut
|
|
||||||
etre le meilleur. A confirmer sur gros dataset
|
|
||||||
"""
|
|
||||||
|
|
||||||
# Using the dates of the logs, you can easily gather consecutive ones. All logs should be of the same dataset.
|
|
||||||
start = 'Log_2020-03-21_18-30-02'
|
|
||||||
end = 'Log_2020-03-21_23-36-18'
|
|
||||||
|
|
||||||
if end < 'Log_2020-03-22_19-30-19':
|
|
||||||
res_path = 'old_results'
|
|
||||||
else:
|
|
||||||
res_path = 'results'
|
|
||||||
|
|
||||||
logs = np.sort([join(res_path, l) for l in listdir(res_path) if start <= l <= end])
|
|
||||||
logs = np.insert(logs, 0, join(res_path, 'Log_2020-03-21_11-57-45'))
|
|
||||||
|
|
||||||
# Give names to the logs (for legends)
|
|
||||||
logs_names = ['KPe=1.0_sum_linear',
|
|
||||||
'KPe=1.5_sum_linear',
|
|
||||||
'KPe=2.0_sum_linear',
|
|
||||||
'KPe=1.5_closest_linear',
|
|
||||||
'KPe=2.0_closest_linear',
|
|
||||||
'test']
|
|
||||||
|
|
||||||
logs_names = np.array(logs_names[:len(logs)])
|
|
||||||
|
|
||||||
return logs, logs_names
|
|
||||||
|
|
||||||
|
|
||||||
def ModelNet40_gaussian():
|
|
||||||
"""
|
|
||||||
Test different extent in gaussian mode. extent=1.5 seems the best. 2.0 is not bad. But in any case, it does not
|
|
||||||
perform better than 1.5-linear-sum at least on this dataset.
|
|
||||||
"""
|
|
||||||
|
|
||||||
# Using the dates of the logs, you can easily gather consecutive ones. All logs should be of the same dataset.
|
|
||||||
start = 'Log_2020-03-21_23-36-19'
|
|
||||||
end = 'Log_2020-04-13_18-14-44'
|
|
||||||
|
|
||||||
if end < 'Log_2020-03-22_19-30-19':
|
|
||||||
res_path = 'old_results'
|
|
||||||
else:
|
|
||||||
res_path = 'results'
|
|
||||||
|
|
||||||
logs = np.sort([join(res_path, l) for l in listdir(res_path) if start <= l <= end])
|
|
||||||
logs = np.insert(logs, 4, join(res_path, 'Log_2020-03-21_19-35-11'))
|
|
||||||
|
|
||||||
# Give names to the logs (for legends)
|
|
||||||
logs_names = ['KPe=1.0_sum_gaussian',
|
|
||||||
'KPe=1.5_sum_gaussian',
|
|
||||||
'KPe=2.0_sum_gaussian',
|
|
||||||
'KPe=2.5_sum_gaussian',
|
|
||||||
'KPe=1.5_sum_linear',
|
|
||||||
'test']
|
|
||||||
|
|
||||||
logs_names = np.array(logs_names[:len(logs)])
|
|
||||||
|
|
||||||
return logs, logs_names
|
|
||||||
|
|
||||||
|
|
||||||
def ModelNet40_normals():
|
|
||||||
"""
|
|
||||||
Test different way to add normals. Seems pretty much the same and we dont care about normals.
|
|
||||||
"""
|
|
||||||
|
|
||||||
# Using the dates of the logs, you can easily gather consecutive ones. All logs should be of the same dataset.
|
|
||||||
start = 'Log_2020-03-22_10-18-56'
|
|
||||||
end = 'Log_2020-03-22_13-32-51'
|
|
||||||
|
|
||||||
if end < 'Log_2020-03-22_19-30-19':
|
|
||||||
res_path = 'old_results'
|
|
||||||
else:
|
|
||||||
res_path = 'results'
|
|
||||||
|
|
||||||
logs = np.sort([join(res_path, l) for l in listdir(res_path) if start <= l <= end])
|
|
||||||
logs = np.insert(logs, 0, join(res_path, 'Log_2020-03-21_19-35-11'))
|
|
||||||
|
|
||||||
# Give names to the logs (for legends)
|
|
||||||
logs_names = ['no_normals',
|
|
||||||
'anisotropic_scale_normals',
|
|
||||||
'wrong_scale_normals',
|
|
||||||
'only_symmetries_normals(cheating)',
|
|
||||||
'test']
|
|
||||||
|
|
||||||
logs_names = np.array(logs_names[:len(logs)])
|
|
||||||
|
|
||||||
return logs, logs_names
|
|
||||||
|
|
||||||
|
|
||||||
def ModelNet40_radius():
|
|
||||||
"""
|
|
||||||
Test different convolution radius. It was expected that larger radius would means slower networks but better
|
|
||||||
performances. In fact we do not see much difference (again because of the dataset maybe?)
|
|
||||||
"""
|
|
||||||
|
|
||||||
# Using the dates of the logs, you can easily gather consecutive ones. All logs should be of the same dataset.
|
|
||||||
start = 'Log_2020-03-22_13-32-52'
|
|
||||||
end = 'Log_2020-03-22_19-30-17'
|
|
||||||
|
|
||||||
if end < 'Log_2020-03-22_19-30-19':
|
|
||||||
res_path = 'old_results'
|
|
||||||
else:
|
|
||||||
res_path = 'results'
|
|
||||||
|
|
||||||
logs = np.sort([join(res_path, l) for l in listdir(res_path) if start <= l <= end])
|
|
||||||
logs = np.insert(logs, 2, join(res_path, 'Log_2020-03-21_19-35-11'))
|
|
||||||
|
|
||||||
# Give names to the logs (for legends)
|
|
||||||
logs_names = ['KPe=0.9_r=1.5',
|
|
||||||
'KPe=1.2_r=2.0',
|
|
||||||
'KPe=1.5_r=2.5',
|
|
||||||
'KPe=1.8_r=3.0',
|
|
||||||
'KPe=2.1_r=3.5',
|
|
||||||
'test']
|
|
||||||
|
|
||||||
logs_names = np.array(logs_names[:len(logs)])
|
|
||||||
|
|
||||||
return logs, logs_names
|
|
||||||
|
|
||||||
|
|
||||||
def ModelNet40_deform(old_result_limit):
|
|
||||||
"""
|
|
||||||
Test deformable convolution with different offset decay. Without modulations 0.01 seems the best. With
|
|
||||||
modulations 0.1 seems the best. In all cases 1.0 is to much. We need to show deformations for verification.
|
|
||||||
|
|
||||||
It seems that deformations are not really fittig the point cloud. They just reach further away. W need to try on
|
|
||||||
other datasets and with deformations earlier to see if fitting loss works
|
|
||||||
"""
|
|
||||||
|
|
||||||
# Using the dates of the logs, you can easily gather consecutive ones. All logs should be of the same dataset.
|
|
||||||
start = 'Log_2020-03-22_19-30-21'
|
|
||||||
end = 'Log_2020-03-25_19-30-17'
|
|
||||||
|
|
||||||
if end < old_result_limit:
|
|
||||||
res_path = 'old_results'
|
|
||||||
else:
|
|
||||||
res_path = 'results'
|
|
||||||
|
|
||||||
logs = np.sort([join(res_path, l) for l in listdir(res_path) if start <= l <= end])
|
|
||||||
logs = logs.astype('<U50')
|
|
||||||
logs = np.insert(logs, 0, join('old_results', 'Log_2020-03-21_19-35-11'))
|
|
||||||
|
|
||||||
# Give names to the logs (for legends)
|
|
||||||
logs_names = ['normal',
|
|
||||||
'offset_d=0.01',
|
|
||||||
'offset_d=0.1',
|
|
||||||
'offset_d=1.0',
|
|
||||||
'offset_d=0.001',
|
|
||||||
'offset_d=0.001_modu',
|
|
||||||
'offset_d=0.01_modu',
|
|
||||||
'offset_d=0.1_modu',
|
|
||||||
'offset_d=1.0_modu',
|
|
||||||
'test']
|
|
||||||
|
|
||||||
logs_names = np.array(logs_names[:len(logs)])
|
|
||||||
|
|
||||||
return logs, logs_names
|
|
||||||
|
|
||||||
|
|
||||||
def S3DIS_first(old_result_limit):
|
|
||||||
"""
|
|
||||||
Test first S3DIS. First two test have all symetries (even vertical), which is not good). We corecct for
|
|
||||||
the following.
|
|
||||||
Then we try some experiments with different input scalea and the results are not as high as expected.
|
|
||||||
WHY?
|
|
||||||
FOUND IT! Problem resnet bottleneck should divide out-dim by 4 and not by 2
|
|
||||||
"""
|
|
||||||
|
|
||||||
# Using the dates of the logs, you can easily gather consecutive ones. All logs should be of the same dataset.
|
|
||||||
start = 'Log_2020-03-25_19-30-17'
|
|
||||||
end = 'Log_2020-04-03_11-12-05'
|
|
||||||
|
|
||||||
if end < old_result_limit:
|
|
||||||
res_path = 'old_results'
|
|
||||||
else:
|
|
||||||
res_path = 'results'
|
|
||||||
|
|
||||||
logs = np.sort([join(res_path, l) for l in listdir(res_path) if start <= l <= end])
|
|
||||||
logs = logs.astype('<U50')
|
|
||||||
|
|
||||||
# Give names to the logs (for legends)
|
|
||||||
logs_names = ['Fin=1_R=1.2_r=0.02 (error all symetries)',
|
|
||||||
'Fin=1_R=2.5_r=0.04 (error all symetries)',
|
|
||||||
'Fin=5_R=1.2_r=0.02',
|
|
||||||
'Fin=5_R=1.8_r=0.03',
|
|
||||||
'Fin=5_R=2.5_r=0.04',
|
|
||||||
'original_normal',
|
|
||||||
'original_deform',
|
|
||||||
'original_random_sampler',
|
|
||||||
'original_potentials_batch16',
|
|
||||||
'test']
|
|
||||||
|
|
||||||
logs_names = np.array(logs_names[:len(logs)])
|
|
||||||
|
|
||||||
return logs, logs_names
|
|
||||||
|
|
||||||
|
|
||||||
def S3DIS_go(old_result_limit):
|
|
||||||
"""
|
|
||||||
Test S3DIS.
|
|
||||||
"""
|
|
||||||
|
|
||||||
# Using the dates of the logs, you can easily gather consecutive ones. All logs should be of the same dataset.
|
|
||||||
start = 'Log_2020-04-03_11-12-07'
|
|
||||||
end = 'Log_2020-04-07_15-30-17'
|
|
||||||
|
|
||||||
if end < old_result_limit:
|
|
||||||
res_path = 'old_results'
|
|
||||||
else:
|
|
||||||
res_path = 'results'
|
|
||||||
|
|
||||||
logs = np.sort([join(res_path, l) for l in listdir(res_path) if start <= l <= end])
|
|
||||||
logs = logs.astype('<U50')
|
|
||||||
|
|
||||||
# Give names to the logs (for legends)
|
|
||||||
logs_names = ['R=2.0_r=0.04_Din=128_potential',
|
|
||||||
'R=2.0_r=0.04_Din=64_potential',
|
|
||||||
'R=1.8_r=0.03',
|
|
||||||
'R=1.8_r=0.03_deeper',
|
|
||||||
'R=1.8_r=0.03_deform',
|
|
||||||
'R=2.0_r=0.03_megadeep',
|
|
||||||
'R=2.5_r=0.03_megadeep',
|
|
||||||
'test']
|
|
||||||
|
|
||||||
logs_names = np.array(logs_names[:len(logs)])
|
|
||||||
|
|
||||||
return logs, logs_names
|
|
||||||
|
|
||||||
|
|
||||||
def SemanticKittiFirst(old_result_limit):
|
|
||||||
"""
|
|
||||||
Test SematicKitti. First exps.
|
|
||||||
Try some class weight strategies. It seems that the final score is not impacted so much. With weights, some classes
|
|
||||||
are better while other are worse, for a final score that remains the same.
|
|
||||||
"""
|
|
||||||
|
|
||||||
# Using the dates of the logs, you can easily gather consecutive ones. All logs should be of the same dataset.
|
|
||||||
start = 'Log_2020-04-07_15-30-17'
|
|
||||||
end = 'Log_2020-04-11_21-34-16'
|
|
||||||
|
|
||||||
if end < old_result_limit:
|
|
||||||
res_path = 'old_results'
|
|
||||||
else:
|
|
||||||
res_path = 'results'
|
|
||||||
|
|
||||||
logs = np.sort([join(res_path, l) for l in listdir(res_path) if start <= l <= end])
|
|
||||||
logs = logs.astype('<U50')
|
|
||||||
|
|
||||||
# Give names to the logs (for legends)
|
|
||||||
logs_names = ['R=5.0_dl=0.04',
|
|
||||||
'R=5.0_dl=0.08',
|
|
||||||
'R=10.0_dl=0.08',
|
|
||||||
'R=10.0_dl=0.08_20*weigths',
|
|
||||||
'R=10.0_dl=0.08_20*sqrt_weigths',
|
|
||||||
'R=10.0_dl=0.08_100*sqrt_w',
|
|
||||||
'R=10.0_dl=0.08_100*sqrt_w_capped',
|
|
||||||
'R=10.0_dl=0.08_no_w']
|
|
||||||
|
|
||||||
logs_names = np.array(logs_names[:len(logs)])
|
|
||||||
|
|
||||||
return logs, logs_names
|
|
||||||
|
|
||||||
|
|
||||||
def SemanticKitti_scale(old_result_limit):
|
|
||||||
"""
|
|
||||||
Test SematicKitti. Try different scales of input raduis / subsampling.
|
|
||||||
"""
|
|
||||||
|
|
||||||
# Using the dates of the logs, you can easily gather consecutive ones. All logs should be of the same dataset.
|
|
||||||
start = 'Log_2020-04-11_21-34-15'
|
|
||||||
end = 'Log_2020-04-20_11-52-58'
|
|
||||||
|
|
||||||
if end < old_result_limit:
|
|
||||||
res_path = 'old_results'
|
|
||||||
else:
|
|
||||||
res_path = 'results'
|
|
||||||
|
|
||||||
logs = np.sort([join(res_path, l) for l in listdir(res_path) if start <= l <= end])
|
|
||||||
logs = logs.astype('<U50')
|
|
||||||
|
|
||||||
# Give names to the logs (for legends)
|
|
||||||
logs_names = ['R=10.0_dl=0.08',
|
|
||||||
'R=4.0_dl=0.04',
|
|
||||||
'R=6.0_dl=0.06',
|
|
||||||
'R=6.0_dl=0.06_inF=2',
|
|
||||||
'test',
|
|
||||||
'test',
|
|
||||||
'test',
|
|
||||||
'test',
|
|
||||||
'test']
|
|
||||||
|
|
||||||
logs_names = np.array(logs_names[:len(logs)])
|
|
||||||
|
|
||||||
return logs, logs_names
|
|
||||||
|
|
||||||
|
|
||||||
def S3DIS_deform(old_result_limit):
|
|
||||||
"""
|
|
||||||
Debug S3DIS deformable.
|
|
||||||
At checkpoint 50, the points seem to start fitting the shape, but then, they just get further away from each other
|
|
||||||
and do not care about input points. The fitting loss seems broken?
|
|
||||||
|
|
||||||
10* fitting loss seems pretty good fitting the point cloud. It seems that the offset decay was a bit to low,
|
|
||||||
because the same happens without the 0.1 hook. So we can try to keep a 0.5 hook and multiply offset decay by 2.
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
# Using the dates of the logs, you can easily gather consecutive ones. All logs should be of the same dataset.
|
# Using the dates of the logs, you can easily gather consecutive ones. All logs should be of the same dataset.
|
||||||
start = 'Log_2020-04-22_11-52-58'
|
start = 'Log_2020-04-22_11-52-58'
|
||||||
end = 'Log_2020-05-22_11-52-58'
|
end = 'Log_2020-05-22_11-52-58'
|
||||||
|
|
||||||
if end < old_result_limit:
|
# Name of the result path
|
||||||
res_path = 'old_results'
|
res_path = 'results'
|
||||||
else:
|
|
||||||
res_path = 'results'
|
|
||||||
|
|
||||||
|
# Gather logs and sort by date
|
||||||
logs = np.sort([join(res_path, l) for l in listdir(res_path) if start <= l <= end])
|
logs = np.sort([join(res_path, l) for l in listdir(res_path) if start <= l <= end])
|
||||||
logs = logs.astype('<U50')
|
|
||||||
logs = np.insert(logs, 0, 'results/Log_2020-04-04_10-04-42')
|
|
||||||
|
|
||||||
# Give names to the logs (for legends)
|
# Give names to the logs (for plot legends)
|
||||||
logs_names = ['off_d=0.01_baseline',
|
logs_names = ['name_log_1',
|
||||||
'off_d=0.01',
|
'name_log_2',
|
||||||
'off_d=0.05',
|
'name_log_3']
|
||||||
'off_d=0.05_corrected',
|
|
||||||
'off_d=0.05_norepulsive',
|
|
||||||
'off_d=0.05_repulsive0.5',
|
|
||||||
'off_d=0.05_10*fitting',
|
|
||||||
'off_d=0.05_no_hook0.1',
|
|
||||||
'NEWPARAMS_fit=0.05_loss=0.5_(=off_d=0.1_hook0.5)',
|
|
||||||
'same_normal'
|
|
||||||
'test']
|
|
||||||
|
|
||||||
|
# safe check log names
|
||||||
logs_names = np.array(logs_names[:len(logs)])
|
logs_names = np.array(logs_names[:len(logs)])
|
||||||
|
|
||||||
return logs, logs_names
|
return logs, logs_names
|
||||||
|
|
||||||
|
|
||||||
|
def experiment_name_2():
|
||||||
|
"""
|
||||||
|
In this function you choose the results you want to plot together, to compare them as an experiment.
|
||||||
|
Just return the list of log paths (like 'results/Log_2020-04-04_10-04-42' for example), and the associated names
|
||||||
|
of these logs.
|
||||||
|
Below an example of how to automatically gather all logs between two dates, and name them.
|
||||||
|
"""
|
||||||
|
|
||||||
|
# Using the dates of the logs, you can easily gather consecutive ones. All logs should be of the same dataset.
|
||||||
|
start = 'Log_2020-04-22_11-52-58'
|
||||||
|
end = 'Log_2020-05-22_11-52-58'
|
||||||
|
|
||||||
|
# Name of the result path
|
||||||
|
res_path = 'results'
|
||||||
|
|
||||||
|
# Gather logs and sort by date
|
||||||
|
logs = np.sort([join(res_path, l) for l in listdir(res_path) if start <= l <= end])
|
||||||
|
|
||||||
|
# Optionally add a specific log at a specific place in the log list
|
||||||
|
logs = logs.astype('<U50')
|
||||||
|
logs = np.insert(logs, 0, 'results/Log_2020-04-04_10-04-42')
|
||||||
|
|
||||||
|
# Give names to the logs (for plot legends)
|
||||||
|
logs_names = ['name_log_inserted',
|
||||||
|
'name_log_1',
|
||||||
|
'name_log_2',
|
||||||
|
'name_log_3']
|
||||||
|
|
||||||
|
# safe check log names
|
||||||
|
logs_names = np.array(logs_names[:len(logs)])
|
||||||
|
|
||||||
|
return logs, logs_names
|
||||||
|
|
||||||
|
|
||||||
|
# ----------------------------------------------------------------------------------------------------------------------
|
||||||
|
#
|
||||||
|
# Main Call
|
||||||
|
# \***************/
|
||||||
|
#
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
|
|
||||||
|
@ -1561,19 +763,12 @@ if __name__ == '__main__':
|
||||||
# Choose a list of log to plot together for comparison
|
# Choose a list of log to plot together for comparison
|
||||||
######################################################
|
######################################################
|
||||||
|
|
||||||
# TODO: test deformable on S3DIS to see of fitting loss works
|
|
||||||
# TODO: try class weights on S3DIS (very low weight for beam)
|
|
||||||
|
|
||||||
# Old result limit
|
|
||||||
old_res_lim = 'Log_2020-03-25_19-30-17'
|
|
||||||
|
|
||||||
# My logs: choose the logs to show
|
# My logs: choose the logs to show
|
||||||
logs, logs_names = S3DIS_deform(old_res_lim)
|
logs, logs_names = experiment_name_1()
|
||||||
#os.environ['QT_DEBUG_PLUGINS'] = '1'
|
|
||||||
|
|
||||||
######################################################
|
################
|
||||||
# Choose a list of log to plot together for comparison
|
# Plot functions
|
||||||
######################################################
|
################
|
||||||
|
|
||||||
# Check that all logs are of the same dataset. Different object can be compared
|
# Check that all logs are of the same dataset. Different object can be compared
|
||||||
plot_dataset = None
|
plot_dataset = None
|
||||||
|
|
|
@ -97,8 +97,8 @@ if __name__ == '__main__':
|
||||||
|
|
||||||
chosen_log = 'results/Log_2020-04-05_19-19-20' # => ModelNet40
|
chosen_log = 'results/Log_2020-04-05_19-19-20' # => ModelNet40
|
||||||
|
|
||||||
# You can also choose the index of the snapshot to load (last by default)
|
# Choose the index of the checkpoint to load OR None if you want to load the current checkpoint
|
||||||
chkp_idx = -1
|
chkp_idx = None
|
||||||
|
|
||||||
# Choose to test on validation or test split
|
# Choose to test on validation or test split
|
||||||
on_val = True
|
on_val = True
|
||||||
|
@ -111,7 +111,7 @@ if __name__ == '__main__':
|
||||||
############################
|
############################
|
||||||
|
|
||||||
# Set which gpu is going to be used
|
# Set which gpu is going to be used
|
||||||
GPU_ID = '1'
|
GPU_ID = '0'
|
||||||
|
|
||||||
# Set GPU visible device
|
# Set GPU visible device
|
||||||
os.environ['CUDA_VISIBLE_DEVICES'] = GPU_ID
|
os.environ['CUDA_VISIBLE_DEVICES'] = GPU_ID
|
||||||
|
@ -215,12 +215,4 @@ if __name__ == '__main__':
|
||||||
elif config.dataset_task == 'slam_segmentation':
|
elif config.dataset_task == 'slam_segmentation':
|
||||||
tester.slam_segmentation_test(net, test_loader, config)
|
tester.slam_segmentation_test(net, test_loader, config)
|
||||||
else:
|
else:
|
||||||
raise ValueError('Unsupported dataset_task for testing: ' + config.dataset_task)
|
raise ValueError('Unsupported dataset_task for testing: ' + config.dataset_task)
|
||||||
|
|
||||||
|
|
||||||
# TODO: For test and also for training. When changing epoch do not restart the worker initiation. Keep workers
|
|
||||||
# active with a while loop instead of using for loops.
|
|
||||||
# For training and validation, keep two sets of worker active in parallel? is it possible?
|
|
||||||
|
|
||||||
# TODO: We have to verify if training on smaller spheres and testing on whole frame changes the score because
|
|
||||||
# batchnorm may not have the same result as distribution of points will be different.
|
|
|
@ -70,14 +70,19 @@ class Modelnet40Config(Config):
|
||||||
|
|
||||||
# Define layers
|
# Define layers
|
||||||
architecture = ['simple',
|
architecture = ['simple',
|
||||||
'resnetb_strided',
|
|
||||||
'resnetb',
|
'resnetb',
|
||||||
'resnetb_strided',
|
'resnetb_strided',
|
||||||
'resnetb',
|
'resnetb',
|
||||||
|
'resnetb',
|
||||||
'resnetb_strided',
|
'resnetb_strided',
|
||||||
'resnetb_deformable',
|
'resnetb',
|
||||||
'resnetb_deformable_strided',
|
'resnetb',
|
||||||
'resnetb_deformable',
|
'resnetb_strided',
|
||||||
|
'resnetb',
|
||||||
|
'resnetb',
|
||||||
|
'resnetb_strided',
|
||||||
|
'resnetb',
|
||||||
|
'resnetb',
|
||||||
'global_average']
|
'global_average']
|
||||||
|
|
||||||
###################
|
###################
|
||||||
|
@ -97,7 +102,7 @@ class Modelnet40Config(Config):
|
||||||
deform_radius = 6.0
|
deform_radius = 6.0
|
||||||
|
|
||||||
# Radius of the area of influence of each kernel point in "number grid cell". (1.0 is the standard value)
|
# Radius of the area of influence of each kernel point in "number grid cell". (1.0 is the standard value)
|
||||||
KP_extent = 1.5
|
KP_extent = 1.2
|
||||||
|
|
||||||
# Behavior of convolutions in ('constant', 'linear', 'gaussian')
|
# Behavior of convolutions in ('constant', 'linear', 'gaussian')
|
||||||
KP_influence = 'linear'
|
KP_influence = 'linear'
|
||||||
|
@ -115,11 +120,13 @@ class Modelnet40Config(Config):
|
||||||
use_batch_norm = True
|
use_batch_norm = True
|
||||||
batch_norm_momentum = 0.05
|
batch_norm_momentum = 0.05
|
||||||
|
|
||||||
# Offset loss
|
# Deformable offset loss
|
||||||
# 'permissive' only constrains offsets inside the deform radius
|
# 'point2point' fitting geometry by penalizing distance from deform point to input points
|
||||||
# 'fitting' helps deformed kernels to adapt to the geometry by penalizing distance to input points
|
# 'point2plane' fitting geometry by penalizing distance from deform point to input point triplet (not implemented)
|
||||||
offsets_loss = 'fitting'
|
deform_fitting_mode = 'point2point'
|
||||||
offsets_decay = 0.1
|
deform_fitting_power = 0.1 # Multiplier for the fitting/repulsive loss
|
||||||
|
deform_loss_power = 0.1 # Multiplier for output loss applied to the deformations
|
||||||
|
repulse_extent = 0.8 # Distance of repulsion for deformed kernel points
|
||||||
|
|
||||||
#####################
|
#####################
|
||||||
# Training parameters
|
# Training parameters
|
||||||
|
@ -131,11 +138,11 @@ class Modelnet40Config(Config):
|
||||||
# Learning rate management
|
# Learning rate management
|
||||||
learning_rate = 1e-2
|
learning_rate = 1e-2
|
||||||
momentum = 0.98
|
momentum = 0.98
|
||||||
lr_decays = {i: 0.1**(1/80) for i in range(1, max_epoch)}
|
lr_decays = {i: 0.1**(1/100) for i in range(1, max_epoch)}
|
||||||
grad_clip_norm = 100.0
|
grad_clip_norm = 100.0
|
||||||
|
|
||||||
# Number of batch
|
# Number of batch
|
||||||
batch_num = 16
|
batch_num = 10
|
||||||
|
|
||||||
# Number of steps per epochs
|
# Number of steps per epochs
|
||||||
epoch_steps = 300
|
epoch_steps = 300
|
||||||
|
@ -179,7 +186,7 @@ if __name__ == '__main__':
|
||||||
############################
|
############################
|
||||||
|
|
||||||
# Set which gpu is going to be used
|
# Set which gpu is going to be used
|
||||||
GPU_ID = '3'
|
GPU_ID = '0'
|
||||||
|
|
||||||
# Set GPU visible device
|
# Set GPU visible device
|
||||||
os.environ['CUDA_VISIBLE_DEVICES'] = GPU_ID
|
os.environ['CUDA_VISIBLE_DEVICES'] = GPU_ID
|
||||||
|
|
|
@ -59,7 +59,7 @@ class S3DISConfig(Config):
|
||||||
dataset_task = ''
|
dataset_task = ''
|
||||||
|
|
||||||
# Number of CPU threads for the input pipeline
|
# Number of CPU threads for the input pipeline
|
||||||
input_threads = 20
|
input_threads = 10
|
||||||
|
|
||||||
#########################
|
#########################
|
||||||
# Architecture definition
|
# Architecture definition
|
||||||
|
@ -72,14 +72,14 @@ class S3DISConfig(Config):
|
||||||
'resnetb',
|
'resnetb',
|
||||||
'resnetb',
|
'resnetb',
|
||||||
'resnetb_strided',
|
'resnetb_strided',
|
||||||
'resnetb',
|
'resnetb_deformable',
|
||||||
'resnetb',
|
'resnetb_deformable',
|
||||||
'resnetb_strided',
|
'resnetb_deformable_strided',
|
||||||
'resnetb',
|
'resnetb_deformable',
|
||||||
'resnetb',
|
'resnetb_deformable',
|
||||||
'resnetb_strided',
|
'resnetb_deformable_strided',
|
||||||
'resnetb',
|
'resnetb_deformable',
|
||||||
'resnetb',
|
'resnetb_deformable',
|
||||||
'nearest_upsample',
|
'nearest_upsample',
|
||||||
'unary',
|
'unary',
|
||||||
'nearest_upsample',
|
'nearest_upsample',
|
||||||
|
@ -109,7 +109,7 @@ class S3DISConfig(Config):
|
||||||
deform_radius = 6.0
|
deform_radius = 6.0
|
||||||
|
|
||||||
# Radius of the area of influence of each kernel point in "number grid cell". (1.0 is the standard value)
|
# Radius of the area of influence of each kernel point in "number grid cell". (1.0 is the standard value)
|
||||||
KP_extent = 1.5
|
KP_extent = 1.2
|
||||||
|
|
||||||
# Behavior of convolutions in ('constant', 'linear', 'gaussian')
|
# Behavior of convolutions in ('constant', 'linear', 'gaussian')
|
||||||
KP_influence = 'linear'
|
KP_influence = 'linear'
|
||||||
|
@ -128,12 +128,13 @@ class S3DISConfig(Config):
|
||||||
use_batch_norm = True
|
use_batch_norm = True
|
||||||
batch_norm_momentum = 0.02
|
batch_norm_momentum = 0.02
|
||||||
|
|
||||||
# Offset loss
|
# Deformable offset loss
|
||||||
# 'point2point' fitting geometry by penalizing distance from deform point to input points
|
# 'point2point' fitting geometry by penalizing distance from deform point to input points
|
||||||
# 'point2plane' fitting geometry by penalizing distance from deform point to input point triplet
|
# 'point2plane' fitting geometry by penalizing distance from deform point to input point triplet (not implemented)
|
||||||
deform_fitting_mode = 'point2point'
|
deform_fitting_mode = 'point2point'
|
||||||
deform_fitting_power = 0.05
|
deform_fitting_power = 0.1 # Multiplier for the fitting/repulsive loss
|
||||||
deform_loss_power = 0.5
|
deform_loss_power = 0.1 # Multiplier for output loss applied to the deformations
|
||||||
|
repulse_extent = 0.8 # Distance of repulsion for deformed kernel points
|
||||||
|
|
||||||
#####################
|
#####################
|
||||||
# Training parameters
|
# Training parameters
|
||||||
|
@ -193,7 +194,7 @@ if __name__ == '__main__':
|
||||||
############################
|
############################
|
||||||
|
|
||||||
# Set which gpu is going to be used
|
# Set which gpu is going to be used
|
||||||
GPU_ID = '2'
|
GPU_ID = '0'
|
||||||
|
|
||||||
# Set GPU visible device
|
# Set GPU visible device
|
||||||
os.environ['CUDA_VISIBLE_DEVICES'] = GPU_ID
|
os.environ['CUDA_VISIBLE_DEVICES'] = GPU_ID
|
||||||
|
|
|
@ -123,7 +123,7 @@ class SemanticKittiConfig(Config):
|
||||||
deform_radius = 6.0
|
deform_radius = 6.0
|
||||||
|
|
||||||
# Radius of the area of influence of each kernel point in "number grid cell". (1.0 is the standard value)
|
# Radius of the area of influence of each kernel point in "number grid cell". (1.0 is the standard value)
|
||||||
KP_extent = 1.5
|
KP_extent = 1.2
|
||||||
|
|
||||||
# Behavior of convolutions in ('constant', 'linear', 'gaussian')
|
# Behavior of convolutions in ('constant', 'linear', 'gaussian')
|
||||||
KP_influence = 'linear'
|
KP_influence = 'linear'
|
||||||
|
@ -142,11 +142,13 @@ class SemanticKittiConfig(Config):
|
||||||
use_batch_norm = True
|
use_batch_norm = True
|
||||||
batch_norm_momentum = 0.02
|
batch_norm_momentum = 0.02
|
||||||
|
|
||||||
# Offset loss
|
# Deformable offset loss
|
||||||
# 'permissive' only constrains offsets inside the deform radius (NOT implemented yet)
|
# 'point2point' fitting geometry by penalizing distance from deform point to input points
|
||||||
# 'fitting' helps deformed kernels to adapt to the geometry by penalizing distance to input points
|
# 'point2plane' fitting geometry by penalizing distance from deform point to input point triplet (not implemented)
|
||||||
offsets_loss = 'fitting'
|
deform_fitting_mode = 'point2point'
|
||||||
offsets_decay = 0.01
|
deform_fitting_power = 0.1 # Multiplier for the fitting/repulsive loss
|
||||||
|
deform_loss_power = 0.1 # Multiplier for output loss applied to the deformations
|
||||||
|
repulse_extent = 0.8 # Distance of repulsion for deformed kernel points
|
||||||
|
|
||||||
#####################
|
#####################
|
||||||
# Training parameters
|
# Training parameters
|
||||||
|
@ -193,7 +195,6 @@ class SemanticKittiConfig(Config):
|
||||||
# class_w = [1.430, 5.000, 5.000, 4.226, 5.000, 5.000, 5.000, 5.000, 0.719, 2.377,
|
# class_w = [1.430, 5.000, 5.000, 4.226, 5.000, 5.000, 5.000, 5.000, 0.719, 2.377,
|
||||||
# 0.886, 3.863, 0.869, 1.209, 0.594, 3.780, 1.129, 5.000, 5.000]
|
# 0.886, 3.863, 0.869, 1.209, 0.594, 3.780, 1.129, 5.000, 5.000]
|
||||||
|
|
||||||
|
|
||||||
# Do we nee to save convergence
|
# Do we nee to save convergence
|
||||||
saving = True
|
saving = True
|
||||||
saving_path = None
|
saving_path = None
|
||||||
|
@ -212,7 +213,7 @@ if __name__ == '__main__':
|
||||||
############################
|
############################
|
||||||
|
|
||||||
# Set which gpu is going to be used
|
# Set which gpu is going to be used
|
||||||
GPU_ID = '2'
|
GPU_ID = '0'
|
||||||
|
|
||||||
# Set GPU visible device
|
# Set GPU visible device
|
||||||
os.environ['CUDA_VISIBLE_DEVICES'] = GPU_ID
|
os.environ['CUDA_VISIBLE_DEVICES'] = GPU_ID
|
||||||
|
|
|
@ -159,10 +159,13 @@ class Config:
|
||||||
# Choose weights for class (used in segmentation loss). Empty list for no weights
|
# Choose weights for class (used in segmentation loss). Empty list for no weights
|
||||||
class_w = []
|
class_w = []
|
||||||
|
|
||||||
# New offset regularization parameters
|
# Deformable offset loss
|
||||||
|
# 'point2point' fitting geometry by penalizing distance from deform point to input points
|
||||||
|
# 'point2plane' fitting geometry by penalizing distance from deform point to input point triplet (not implemented)
|
||||||
deform_fitting_mode = 'point2point'
|
deform_fitting_mode = 'point2point'
|
||||||
deform_fitting_power = 0.05
|
deform_fitting_power = 0.1 # Multiplier for the fitting/repulsive loss
|
||||||
deform_loss_power = 0.5
|
deform_loss_power = 0.1 # Multiplier for output loss applied to the deformations
|
||||||
|
repulse_extent = 1.0 # Distance of repulsion for deformed kernel points
|
||||||
|
|
||||||
# Number of batch
|
# Number of batch
|
||||||
batch_num = 10
|
batch_num = 10
|
||||||
|
@ -293,7 +296,7 @@ class Config:
|
||||||
text_file.write('num_classes = {:d}\n'.format(self.num_classes))
|
text_file.write('num_classes = {:d}\n'.format(self.num_classes))
|
||||||
text_file.write('in_points_dim = {:d}\n'.format(self.in_points_dim))
|
text_file.write('in_points_dim = {:d}\n'.format(self.in_points_dim))
|
||||||
text_file.write('in_features_dim = {:d}\n'.format(self.in_features_dim))
|
text_file.write('in_features_dim = {:d}\n'.format(self.in_features_dim))
|
||||||
text_file.write('in_radius = {:.3f}\n'.format(self.in_radius))
|
text_file.write('in_radius = {:.6f}\n'.format(self.in_radius))
|
||||||
text_file.write('input_threads = {:d}\n\n'.format(self.input_threads))
|
text_file.write('input_threads = {:d}\n\n'.format(self.input_threads))
|
||||||
|
|
||||||
# Model parameters
|
# Model parameters
|
||||||
|
@ -309,26 +312,26 @@ class Config:
|
||||||
text_file.write('num_layers = {:d}\n'.format(self.num_layers))
|
text_file.write('num_layers = {:d}\n'.format(self.num_layers))
|
||||||
text_file.write('first_features_dim = {:d}\n'.format(self.first_features_dim))
|
text_file.write('first_features_dim = {:d}\n'.format(self.first_features_dim))
|
||||||
text_file.write('use_batch_norm = {:d}\n'.format(int(self.use_batch_norm)))
|
text_file.write('use_batch_norm = {:d}\n'.format(int(self.use_batch_norm)))
|
||||||
text_file.write('batch_norm_momentum = {:.3f}\n\n'.format(self.batch_norm_momentum))
|
text_file.write('batch_norm_momentum = {:.6f}\n\n'.format(self.batch_norm_momentum))
|
||||||
text_file.write('segmentation_ratio = {:.3f}\n\n'.format(self.segmentation_ratio))
|
text_file.write('segmentation_ratio = {:.6f}\n\n'.format(self.segmentation_ratio))
|
||||||
|
|
||||||
# KPConv parameters
|
# KPConv parameters
|
||||||
text_file.write('# KPConv parameters\n')
|
text_file.write('# KPConv parameters\n')
|
||||||
text_file.write('# *****************\n\n')
|
text_file.write('# *****************\n\n')
|
||||||
|
|
||||||
text_file.write('first_subsampling_dl = {:.3f}\n'.format(self.first_subsampling_dl))
|
text_file.write('first_subsampling_dl = {:.6f}\n'.format(self.first_subsampling_dl))
|
||||||
text_file.write('num_kernel_points = {:d}\n'.format(self.num_kernel_points))
|
text_file.write('num_kernel_points = {:d}\n'.format(self.num_kernel_points))
|
||||||
text_file.write('conv_radius = {:.3f}\n'.format(self.conv_radius))
|
text_file.write('conv_radius = {:.6f}\n'.format(self.conv_radius))
|
||||||
text_file.write('deform_radius = {:.3f}\n'.format(self.deform_radius))
|
text_file.write('deform_radius = {:.6f}\n'.format(self.deform_radius))
|
||||||
text_file.write('fixed_kernel_points = {:s}\n'.format(self.fixed_kernel_points))
|
text_file.write('fixed_kernel_points = {:s}\n'.format(self.fixed_kernel_points))
|
||||||
text_file.write('KP_extent = {:.3f}\n'.format(self.KP_extent))
|
text_file.write('KP_extent = {:.6f}\n'.format(self.KP_extent))
|
||||||
text_file.write('KP_influence = {:s}\n'.format(self.KP_influence))
|
text_file.write('KP_influence = {:s}\n'.format(self.KP_influence))
|
||||||
text_file.write('aggregation_mode = {:s}\n'.format(self.aggregation_mode))
|
text_file.write('aggregation_mode = {:s}\n'.format(self.aggregation_mode))
|
||||||
text_file.write('modulated = {:d}\n'.format(int(self.modulated)))
|
text_file.write('modulated = {:d}\n'.format(int(self.modulated)))
|
||||||
text_file.write('n_frames = {:d}\n'.format(self.n_frames))
|
text_file.write('n_frames = {:d}\n'.format(self.n_frames))
|
||||||
text_file.write('max_in_points = {:d}\n\n'.format(self.max_in_points))
|
text_file.write('max_in_points = {:d}\n\n'.format(self.max_in_points))
|
||||||
text_file.write('max_val_points = {:d}\n\n'.format(self.max_val_points))
|
text_file.write('max_val_points = {:d}\n\n'.format(self.max_val_points))
|
||||||
text_file.write('val_radius = {:.3f}\n\n'.format(self.val_radius))
|
text_file.write('val_radius = {:.6f}\n\n'.format(self.val_radius))
|
||||||
|
|
||||||
# Training parameters
|
# Training parameters
|
||||||
text_file.write('# Training parameters\n')
|
text_file.write('# Training parameters\n')
|
||||||
|
@ -350,22 +353,23 @@ class Config:
|
||||||
text_file.write('augment_rotation = {:s}\n'.format(self.augment_rotation))
|
text_file.write('augment_rotation = {:s}\n'.format(self.augment_rotation))
|
||||||
text_file.write('augment_noise = {:f}\n'.format(self.augment_noise))
|
text_file.write('augment_noise = {:f}\n'.format(self.augment_noise))
|
||||||
text_file.write('augment_occlusion = {:s}\n'.format(self.augment_occlusion))
|
text_file.write('augment_occlusion = {:s}\n'.format(self.augment_occlusion))
|
||||||
text_file.write('augment_occlusion_ratio = {:.3f}\n'.format(self.augment_occlusion_ratio))
|
text_file.write('augment_occlusion_ratio = {:.6f}\n'.format(self.augment_occlusion_ratio))
|
||||||
text_file.write('augment_occlusion_num = {:d}\n'.format(self.augment_occlusion_num))
|
text_file.write('augment_occlusion_num = {:d}\n'.format(self.augment_occlusion_num))
|
||||||
text_file.write('augment_scale_anisotropic = {:d}\n'.format(int(self.augment_scale_anisotropic)))
|
text_file.write('augment_scale_anisotropic = {:d}\n'.format(int(self.augment_scale_anisotropic)))
|
||||||
text_file.write('augment_scale_min = {:.3f}\n'.format(self.augment_scale_min))
|
text_file.write('augment_scale_min = {:.6f}\n'.format(self.augment_scale_min))
|
||||||
text_file.write('augment_scale_max = {:.3f}\n'.format(self.augment_scale_max))
|
text_file.write('augment_scale_max = {:.6f}\n'.format(self.augment_scale_max))
|
||||||
text_file.write('augment_color = {:.3f}\n\n'.format(self.augment_color))
|
text_file.write('augment_color = {:.6f}\n\n'.format(self.augment_color))
|
||||||
|
|
||||||
text_file.write('weight_decay = {:f}\n'.format(self.weight_decay))
|
text_file.write('weight_decay = {:f}\n'.format(self.weight_decay))
|
||||||
text_file.write('segloss_balance = {:s}\n'.format(self.segloss_balance))
|
text_file.write('segloss_balance = {:s}\n'.format(self.segloss_balance))
|
||||||
text_file.write('class_w =')
|
text_file.write('class_w =')
|
||||||
for a in self.class_w:
|
for a in self.class_w:
|
||||||
text_file.write(' {:.3f}'.format(a))
|
text_file.write(' {:.6f}'.format(a))
|
||||||
text_file.write('\n')
|
text_file.write('\n')
|
||||||
text_file.write('deform_fitting_mode = {:s}\n'.format(self.deform_fitting_mode))
|
text_file.write('deform_fitting_mode = {:s}\n'.format(self.deform_fitting_mode))
|
||||||
text_file.write('deform_fitting_power = {:f}\n'.format(self.deform_fitting_power))
|
text_file.write('deform_fitting_power = {:.6f}\n'.format(self.deform_fitting_power))
|
||||||
text_file.write('deform_loss_power = {:f}\n'.format(self.deform_loss_power))
|
text_file.write('deform_loss_power = {:.6f}\n'.format(self.deform_loss_power))
|
||||||
|
text_file.write('repulse_extent = {:.6f}\n'.format(self.repulse_extent))
|
||||||
text_file.write('batch_num = {:d}\n'.format(self.batch_num))
|
text_file.write('batch_num = {:d}\n'.format(self.batch_num))
|
||||||
text_file.write('val_batch_num = {:d}\n'.format(self.val_batch_num))
|
text_file.write('val_batch_num = {:d}\n'.format(self.val_batch_num))
|
||||||
text_file.write('max_epoch = {:d}\n'.format(self.max_epoch))
|
text_file.write('max_epoch = {:d}\n'.format(self.max_epoch))
|
||||||
|
|
202
utils/trainer.py
202
utils/trainer.py
|
@ -894,208 +894,6 @@ class ModelTrainer:
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# Saving methods
|
|
||||||
# ------------------------------------------------------------------------------------------------------------------
|
|
||||||
|
|
||||||
def save_kernel_points(self, model, epoch):
|
|
||||||
"""
|
|
||||||
Method saving kernel point disposition and current model weights for later visualization
|
|
||||||
"""
|
|
||||||
|
|
||||||
if model.config.saving:
|
|
||||||
|
|
||||||
# Create a directory to save kernels of this epoch
|
|
||||||
kernels_dir = join(model.saving_path, 'kernel_points', 'epoch{:d}'.format(epoch))
|
|
||||||
if not exists(kernels_dir):
|
|
||||||
makedirs(kernels_dir)
|
|
||||||
|
|
||||||
# Get points
|
|
||||||
all_kernel_points_tf = [v for v in tf.global_variables() if 'kernel_points' in v.name
|
|
||||||
and v.name.startswith('KernelPoint')]
|
|
||||||
all_kernel_points = self.sess.run(all_kernel_points_tf)
|
|
||||||
|
|
||||||
# Get Extents
|
|
||||||
if False and 'gaussian' in model.config.convolution_mode:
|
|
||||||
all_kernel_params_tf = [v for v in tf.global_variables() if 'kernel_extents' in v.name
|
|
||||||
and v.name.startswith('KernelPoint')]
|
|
||||||
all_kernel_params = self.sess.run(all_kernel_params_tf)
|
|
||||||
else:
|
|
||||||
all_kernel_params = [None for p in all_kernel_points]
|
|
||||||
|
|
||||||
# Save in ply file
|
|
||||||
for kernel_points, kernel_extents, v in zip(all_kernel_points, all_kernel_params, all_kernel_points_tf):
|
|
||||||
|
|
||||||
# Name of saving file
|
|
||||||
ply_name = '_'.join(v.name[:-2].split('/')[1:-1]) + '.ply'
|
|
||||||
ply_file = join(kernels_dir, ply_name)
|
|
||||||
|
|
||||||
# Data to save
|
|
||||||
if kernel_points.ndim > 2:
|
|
||||||
kernel_points = kernel_points[:, 0, :]
|
|
||||||
if False and 'gaussian' in model.config.convolution_mode:
|
|
||||||
data = [kernel_points, kernel_extents]
|
|
||||||
keys = ['x', 'y', 'z', 'sigma']
|
|
||||||
else:
|
|
||||||
data = kernel_points
|
|
||||||
keys = ['x', 'y', 'z']
|
|
||||||
|
|
||||||
# Save
|
|
||||||
write_ply(ply_file, data, keys)
|
|
||||||
|
|
||||||
# Get Weights
|
|
||||||
all_kernel_weights_tf = [v for v in tf.global_variables() if 'weights' in v.name
|
|
||||||
and v.name.startswith('KernelPointNetwork')]
|
|
||||||
all_kernel_weights = self.sess.run(all_kernel_weights_tf)
|
|
||||||
|
|
||||||
# Save in numpy file
|
|
||||||
for kernel_weights, v in zip(all_kernel_weights, all_kernel_weights_tf):
|
|
||||||
np_name = '_'.join(v.name[:-2].split('/')[1:-1]) + '.npy'
|
|
||||||
np_file = join(kernels_dir, np_name)
|
|
||||||
np.save(np_file, kernel_weights)
|
|
||||||
|
|
||||||
# Debug methods
|
|
||||||
# ------------------------------------------------------------------------------------------------------------------
|
|
||||||
|
|
||||||
def show_memory_usage(self, batch_to_feed):
|
|
||||||
|
|
||||||
for l in range(self.config.num_layers):
|
|
||||||
neighb_size = list(batch_to_feed[self.in_neighbors_f32[l]].shape)
|
|
||||||
dist_size = neighb_size + [self.config.num_kernel_points, 3]
|
|
||||||
dist_memory = np.prod(dist_size) * 4 * 1e-9
|
|
||||||
in_feature_size = neighb_size + [self.config.first_features_dim * 2**l]
|
|
||||||
in_feature_memory = np.prod(in_feature_size) * 4 * 1e-9
|
|
||||||
out_feature_size = [neighb_size[0], self.config.num_kernel_points, self.config.first_features_dim * 2**(l+1)]
|
|
||||||
out_feature_memory = np.prod(out_feature_size) * 4 * 1e-9
|
|
||||||
|
|
||||||
print('Layer {:d} => {:.1f}GB {:.1f}GB {:.1f}GB'.format(l,
|
|
||||||
dist_memory,
|
|
||||||
in_feature_memory,
|
|
||||||
out_feature_memory))
|
|
||||||
print('************************************')
|
|
||||||
|
|
||||||
def debug_nan(self, model, inputs, logits):
|
|
||||||
"""
|
|
||||||
NaN happened, find where
|
|
||||||
"""
|
|
||||||
|
|
||||||
print('\n\n------------------------ NaN DEBUG ------------------------\n')
|
|
||||||
|
|
||||||
# First save everything to reproduce error
|
|
||||||
file1 = join(model.saving_path, 'all_debug_inputs.pkl')
|
|
||||||
with open(file1, 'wb') as f1:
|
|
||||||
pickle.dump(inputs, f1)
|
|
||||||
|
|
||||||
# First save all inputs
|
|
||||||
file1 = join(model.saving_path, 'all_debug_logits.pkl')
|
|
||||||
with open(file1, 'wb') as f1:
|
|
||||||
pickle.dump(logits, f1)
|
|
||||||
|
|
||||||
# Then print a list of the trainable variables and if they have nan
|
|
||||||
print('List of variables :')
|
|
||||||
print('*******************\n')
|
|
||||||
all_vars = self.sess.run(tf.global_variables())
|
|
||||||
for v, value in zip(tf.global_variables(), all_vars):
|
|
||||||
nan_percentage = 100 * np.sum(np.isnan(value)) / np.prod(value.shape)
|
|
||||||
print(v.name, ' => {:.1f}% of values are NaN'.format(nan_percentage))
|
|
||||||
|
|
||||||
|
|
||||||
print('Inputs :')
|
|
||||||
print('********')
|
|
||||||
|
|
||||||
#Print inputs
|
|
||||||
nl = model.config.num_layers
|
|
||||||
for layer in range(nl):
|
|
||||||
|
|
||||||
print('Layer : {:d}'.format(layer))
|
|
||||||
|
|
||||||
points = inputs[layer]
|
|
||||||
neighbors = inputs[nl + layer]
|
|
||||||
pools = inputs[2*nl + layer]
|
|
||||||
upsamples = inputs[3*nl + layer]
|
|
||||||
|
|
||||||
nan_percentage = 100 * np.sum(np.isnan(points)) / np.prod(points.shape)
|
|
||||||
print('Points =>', points.shape, '{:.1f}% NaN'.format(nan_percentage))
|
|
||||||
nan_percentage = 100 * np.sum(np.isnan(neighbors)) / np.prod(neighbors.shape)
|
|
||||||
print('neighbors =>', neighbors.shape, '{:.1f}% NaN'.format(nan_percentage))
|
|
||||||
nan_percentage = 100 * np.sum(np.isnan(pools)) / np.prod(pools.shape)
|
|
||||||
print('pools =>', pools.shape, '{:.1f}% NaN'.format(nan_percentage))
|
|
||||||
nan_percentage = 100 * np.sum(np.isnan(upsamples)) / np.prod(upsamples.shape)
|
|
||||||
print('upsamples =>', upsamples.shape, '{:.1f}% NaN'.format(nan_percentage))
|
|
||||||
|
|
||||||
ind = 4 * nl
|
|
||||||
features = inputs[ind]
|
|
||||||
nan_percentage = 100 * np.sum(np.isnan(features)) / np.prod(features.shape)
|
|
||||||
print('features =>', features.shape, '{:.1f}% NaN'.format(nan_percentage))
|
|
||||||
ind += 1
|
|
||||||
batch_weights = inputs[ind]
|
|
||||||
ind += 1
|
|
||||||
in_batches = inputs[ind]
|
|
||||||
max_b = np.max(in_batches)
|
|
||||||
print(in_batches.shape)
|
|
||||||
in_b_sizes = np.sum(in_batches < max_b - 0.5, axis=-1)
|
|
||||||
print('in_batch_sizes =>', in_b_sizes)
|
|
||||||
ind += 1
|
|
||||||
out_batches = inputs[ind]
|
|
||||||
max_b = np.max(out_batches)
|
|
||||||
print(out_batches.shape)
|
|
||||||
out_b_sizes = np.sum(out_batches < max_b - 0.5, axis=-1)
|
|
||||||
print('out_batch_sizes =>', out_b_sizes)
|
|
||||||
ind += 1
|
|
||||||
point_labels = inputs[ind]
|
|
||||||
print('point labels, ', point_labels.shape, ', values : ', np.unique(point_labels))
|
|
||||||
print(np.array([int(100 * np.sum(point_labels == l) / len(point_labels)) for l in np.unique(point_labels)]))
|
|
||||||
|
|
||||||
ind += 1
|
|
||||||
if model.config.dataset.startswith('ShapeNetPart_multi'):
|
|
||||||
object_labels = inputs[ind]
|
|
||||||
nan_percentage = 100 * np.sum(np.isnan(object_labels)) / np.prod(object_labels.shape)
|
|
||||||
print('object_labels =>', object_labels.shape, '{:.1f}% NaN'.format(nan_percentage))
|
|
||||||
ind += 1
|
|
||||||
augment_scales = inputs[ind]
|
|
||||||
ind += 1
|
|
||||||
augment_rotations = inputs[ind]
|
|
||||||
ind += 1
|
|
||||||
|
|
||||||
print('\npoolings and upsamples nums :\n')
|
|
||||||
|
|
||||||
#Print inputs
|
|
||||||
for layer in range(nl):
|
|
||||||
|
|
||||||
print('\nLayer : {:d}'.format(layer))
|
|
||||||
|
|
||||||
neighbors = inputs[nl + layer]
|
|
||||||
pools = inputs[2*nl + layer]
|
|
||||||
upsamples = inputs[3*nl + layer]
|
|
||||||
|
|
||||||
max_n = np.max(neighbors)
|
|
||||||
nums = np.sum(neighbors < max_n - 0.5, axis=-1)
|
|
||||||
print('min neighbors =>', np.min(nums))
|
|
||||||
|
|
||||||
if np.prod(pools.shape) > 0:
|
|
||||||
max_n = np.max(pools)
|
|
||||||
nums = np.sum(pools < max_n - 0.5, axis=-1)
|
|
||||||
print('min pools =>', np.min(nums))
|
|
||||||
else:
|
|
||||||
print('pools empty')
|
|
||||||
|
|
||||||
|
|
||||||
if np.prod(upsamples.shape) > 0:
|
|
||||||
max_n = np.max(upsamples)
|
|
||||||
nums = np.sum(upsamples < max_n - 0.5, axis=-1)
|
|
||||||
print('min upsamples =>', np.min(nums))
|
|
||||||
else:
|
|
||||||
print('upsamples empty')
|
|
||||||
|
|
||||||
|
|
||||||
print('\nFinished\n\n')
|
|
||||||
time.sleep(0.5)
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
1288
utils/visualizer.py
1288
utils/visualizer.py
File diff suppressed because it is too large
Load diff
|
@ -92,21 +92,14 @@ if __name__ == '__main__':
|
||||||
# Here you can choose which model you want to test with the variable test_model. Here are the possible values :
|
# Here you can choose which model you want to test with the variable test_model. Here are the possible values :
|
||||||
#
|
#
|
||||||
# > 'last_XXX': Automatically retrieve the last trained model on dataset XXX
|
# > 'last_XXX': Automatically retrieve the last trained model on dataset XXX
|
||||||
# > '(old_)results/Log_YYYY-MM-DD_HH-MM-SS': Directly provide the path of a trained model
|
# > 'results/Log_YYYY-MM-DD_HH-MM-SS': Directly provide the path of a trained model
|
||||||
|
|
||||||
# chosen_log = 'results/Log_2020-04-04_10-04-42' # => ModelNet40
|
chosen_log = 'results/Log_2020-04-23_19-42-18'
|
||||||
# chosen_log = 'results/Log_2020-04-22_11-53-45' # => S3DIS
|
|
||||||
# chosen_log = 'results/Log_2020-04-22_12-28-37' # => S3DIS corrected
|
|
||||||
# chosen_log = 'results/Log_2020-04-23_09-48-15' # => S3DIS no repulsive
|
|
||||||
# chosen_log = 'results/Log_2020-04-23_09-49-49' # => S3DIS repulsive 0.5
|
|
||||||
# chosen_log = 'results/Log_2020-04-23_19-41-12' # => S3DIS 10*fitting
|
|
||||||
chosen_log = 'results/Log_2020-04-23_19-42-18' # => S3DIS no hook
|
|
||||||
|
|
||||||
|
# Choose the index of the checkpoint to load OR None if you want to load the current checkpoint
|
||||||
|
chkp_idx = None
|
||||||
|
|
||||||
# You can also choose the index of the snapshot to load (last by default)
|
# Eventually you can choose which feature is visualized (index of the deform convolution in the network)
|
||||||
chkp_idx = -1
|
|
||||||
|
|
||||||
# Eventually you can choose which feature is visualized (index of the deform operation in the network)
|
|
||||||
deform_idx = 0
|
deform_idx = 0
|
||||||
|
|
||||||
# Deal with 'last_XXX' choices
|
# Deal with 'last_XXX' choices
|
||||||
|
@ -148,7 +141,6 @@ if __name__ == '__main__':
|
||||||
# Change parameters for the test here. For example, you can stop augmenting the input data.
|
# Change parameters for the test here. For example, you can stop augmenting the input data.
|
||||||
|
|
||||||
config.augment_noise = 0.0001
|
config.augment_noise = 0.0001
|
||||||
#config.augment_symmetries = False
|
|
||||||
config.batch_num = 1
|
config.batch_num = 1
|
||||||
config.in_radius = 2.0
|
config.in_radius = 2.0
|
||||||
config.input_threads = 0
|
config.input_threads = 0
|
||||||
|
|
Loading…
Reference in a new issue