Initial commit
This commit is contained in:
parent
e128d483ca
commit
755317d83a
|
@ -1191,17 +1191,32 @@ def S3DISCollate(batch_data):
|
|||
# \*********************/
|
||||
|
||||
|
||||
def debug_sampling(dataset, sampler, loader):
|
||||
def debug_upsampling(dataset, loader):
|
||||
"""Shows which labels are sampled according to strategy chosen"""
|
||||
label_sum = np.zeros((dataset.num_classes), dtype=np.int32)
|
||||
|
||||
|
||||
for epoch in range(10):
|
||||
|
||||
for batch_i, (points, normals, labels, indices, in_sizes) in enumerate(loader):
|
||||
# print(batch_i, tuple(points.shape), tuple(normals.shape), labels, indices, in_sizes)
|
||||
for batch_i, batch in enumerate(loader):
|
||||
|
||||
label_sum += np.bincount(labels.numpy(), minlength=dataset.num_classes)
|
||||
print(label_sum)
|
||||
#print(sampler.potentials[:6])
|
||||
pc1 = batch.points[1].numpy()
|
||||
pc2 = batch.points[2].numpy()
|
||||
up1 = batch.upsamples[1].numpy()
|
||||
|
||||
print(pc1.shape, '=>', pc2.shape)
|
||||
print(up1.shape, np.max(up1))
|
||||
|
||||
pc2 = np.vstack((pc2, np.zeros_like(pc2[:1, :])))
|
||||
|
||||
# Get neighbors distance
|
||||
p0 = pc1[10, :]
|
||||
neighbs0 = up1[10, :]
|
||||
neighbs0 = pc2[neighbs0, :] - p0
|
||||
d2 = np.sum(neighbs0 ** 2, axis=1)
|
||||
|
||||
print(neighbs0.shape)
|
||||
print(neighbs0[:5])
|
||||
print(d2[:5])
|
||||
|
||||
print('******************')
|
||||
print('*******************************************')
|
||||
|
@ -1210,7 +1225,7 @@ def debug_sampling(dataset, sampler, loader):
|
|||
print(counts)
|
||||
|
||||
|
||||
def debug_timing(dataset, sampler, loader):
|
||||
def debug_timing(dataset, loader):
|
||||
"""Timing of generator function"""
|
||||
|
||||
t = [time.time()]
|
||||
|
@ -1252,7 +1267,7 @@ def debug_timing(dataset, sampler, loader):
|
|||
print(counts)
|
||||
|
||||
|
||||
def debug_show_clouds(dataset, sampler, loader):
|
||||
def debug_show_clouds(dataset, loader):
|
||||
|
||||
|
||||
for epoch in range(10):
|
||||
|
@ -1307,7 +1322,7 @@ def debug_show_clouds(dataset, sampler, loader):
|
|||
print(counts)
|
||||
|
||||
|
||||
def debug_batch_and_neighbors_calib(dataset, sampler, loader):
|
||||
def debug_batch_and_neighbors_calib(dataset, loader):
|
||||
"""Timing of generator function"""
|
||||
|
||||
t = [time.time()]
|
||||
|
|
|
@ -469,7 +469,8 @@ class PointCloudDataset(Dataset):
|
|||
# Reduce size of neighbors matrices by eliminating furthest point
|
||||
conv_i = self.big_neighborhood_filter(conv_i, len(input_points))
|
||||
pool_i = self.big_neighborhood_filter(pool_i, len(input_points))
|
||||
up_i = self.big_neighborhood_filter(up_i, len(input_points))
|
||||
if up_i.shape[0] > 0:
|
||||
up_i = self.big_neighborhood_filter(up_i, len(input_points)+1)
|
||||
|
||||
# Updating input lists
|
||||
input_points += [stacked_points]
|
||||
|
|
|
@ -53,6 +53,7 @@ class KPCNN(nn.Module):
|
|||
break
|
||||
|
||||
# Apply the good block function defining tf ops
|
||||
print(block, r)
|
||||
self.block_ops.append(block_decider(block,
|
||||
r,
|
||||
in_dim,
|
||||
|
@ -60,6 +61,7 @@ class KPCNN(nn.Module):
|
|||
layer,
|
||||
config))
|
||||
|
||||
|
||||
# Index of block in this layer
|
||||
block_in_layer += 1
|
||||
|
||||
|
@ -329,6 +331,7 @@ class KPFCNN(nn.Module):
|
|||
if block_i in self.encoder_skips:
|
||||
skip_x.append(x)
|
||||
x = block_op(x, batch)
|
||||
print(block_op)
|
||||
|
||||
for block_i, block_op in enumerate(self.decoder_blocs):
|
||||
if block_i in self.decoder_concats:
|
||||
|
|
|
@ -355,6 +355,8 @@ class KPConv(nn.Module):
|
|||
# Convolution sum [n_points, out_fdim]
|
||||
return torch.sum(kernel_outputs, dim=0)
|
||||
|
||||
def __repr__(self):
|
||||
return "KPConv(radius: %.2f, in_feat: %i, out_feat: %i)"% (self.radius, self.in_channels, self.out_channels)
|
||||
|
||||
# ----------------------------------------------------------------------------------------------------------------------
|
||||
#
|
||||
|
|
|
@ -1374,7 +1374,9 @@ def ModelNet40_deform(old_result_limit):
|
|||
|
||||
def S3DIS_first(old_result_limit):
|
||||
"""
|
||||
Test first S3DIS
|
||||
Test first S3DIS. First two test have all symetries (even vertical), which is not good). We corecct for
|
||||
the following.
|
||||
Then we try some experiments with different input scalea and the results are not as high as expected. WHY?
|
||||
"""
|
||||
|
||||
# Using the dates of the logs, you can easily gather consecutive ones. All logs should be of the same dataset.
|
||||
|
@ -1395,6 +1397,8 @@ def S3DIS_first(old_result_limit):
|
|||
'Fin=5_R=1.2_r=0.02',
|
||||
'Fin=5_R=1.8_r=0.03',
|
||||
'Fin=5_R=2.5_r=0.04',
|
||||
'original_normal',
|
||||
'original_deform',
|
||||
'test']
|
||||
|
||||
logs_names = np.array(logs_names[:len(logs)])
|
||||
|
|
|
@ -62,7 +62,7 @@ class S3DISConfig(Config):
|
|||
dataset_task = ''
|
||||
|
||||
# Number of CPU threads for the input pipeline
|
||||
input_threads = 10
|
||||
input_threads = 20
|
||||
|
||||
#########################
|
||||
# Architecture definition
|
||||
|
@ -70,14 +70,15 @@ class S3DISConfig(Config):
|
|||
|
||||
# Define layers
|
||||
architecture = ['simple',
|
||||
'resnetb_strided',
|
||||
'resnetb',
|
||||
'resnetb_strided',
|
||||
'resnetb',
|
||||
'resnetb_strided',
|
||||
'resnetb',
|
||||
'resnetb_strided',
|
||||
'resnetb',
|
||||
'resnetb_deformable',
|
||||
'resnetb_deformable_strided',
|
||||
'resnetb_deformable',
|
||||
'nearest_upsample',
|
||||
'unary',
|
||||
'nearest_upsample',
|
||||
|
@ -92,7 +93,7 @@ class S3DISConfig(Config):
|
|||
###################
|
||||
|
||||
# Radius of the input sphere
|
||||
in_radius = 2.5
|
||||
in_radius = 2.0
|
||||
|
||||
# Number of kernel points
|
||||
num_kernel_points = 15
|
||||
|
@ -107,7 +108,7 @@ class S3DISConfig(Config):
|
|||
deform_radius = 6.0
|
||||
|
||||
# Radius of the area of influence of each kernel point in "number grid cell". (1.0 is the standard value)
|
||||
KP_extent = 1.5
|
||||
KP_extent = 1.2
|
||||
|
||||
# Behavior of convolutions in ('constant', 'linear', 'gaussian')
|
||||
KP_influence = 'linear'
|
||||
|
@ -119,11 +120,11 @@ class S3DISConfig(Config):
|
|||
in_features_dim = 5
|
||||
|
||||
# Can the network learn modulations
|
||||
modulated = True
|
||||
modulated = False
|
||||
|
||||
# Batch normalization parameters
|
||||
use_batch_norm = True
|
||||
batch_norm_momentum = 0.05
|
||||
batch_norm_momentum = 0.02
|
||||
|
||||
# Offset loss
|
||||
# 'permissive' only constrains offsets inside the deform radius (NOT implemented yet)
|
||||
|
@ -145,7 +146,7 @@ class S3DISConfig(Config):
|
|||
grad_clip_norm = 100.0
|
||||
|
||||
# Number of batch
|
||||
batch_num = 8
|
||||
batch_num = 10
|
||||
|
||||
# Number of steps per epochs
|
||||
epoch_steps = 500
|
||||
|
@ -163,7 +164,7 @@ class S3DISConfig(Config):
|
|||
augment_scale_min = 0.9
|
||||
augment_scale_max = 1.1
|
||||
augment_noise = 0.001
|
||||
augment_color = 0.9
|
||||
augment_color = 0.8
|
||||
|
||||
# The way we balance segmentation loss TODO: implement and test 'class' and 'batch' modes
|
||||
# > 'none': Each point in the whole batch has the same contribution.
|
||||
|
@ -188,6 +189,11 @@ if __name__ == '__main__':
|
|||
# Initialize the environment
|
||||
############################
|
||||
|
||||
# TODO: 9 millions de parametres au lieu de 14 millions... Pourquoi?
|
||||
# TODO: radius des strided 2 fois trop grand
|
||||
# TODO: implement un sampler plus simple
|
||||
# TODO: test batch size a 16
|
||||
|
||||
# Set which gpu is going to be used
|
||||
GPU_ID = '1'
|
||||
|
||||
|
@ -264,9 +270,9 @@ if __name__ == '__main__':
|
|||
training_sampler.calibration(training_loader, verbose=True)
|
||||
test_sampler.calibration(test_loader, verbose=True)
|
||||
|
||||
#debug_timing(training_dataset, training_sampler, training_loader)
|
||||
#debug_timing(test_dataset, test_sampler, test_loader)
|
||||
#debug_show_clouds(training_dataset, training_sampler, training_loader)
|
||||
#debug_timing(training_dataset, training_loader)
|
||||
#debug_timing(test_dataset, test_loader)
|
||||
#debug_upsampling(training_dataset, training_loader)
|
||||
|
||||
print('\nModel Preparation')
|
||||
print('*****************')
|
||||
|
@ -274,6 +280,8 @@ if __name__ == '__main__':
|
|||
# Define network model
|
||||
t1 = time.time()
|
||||
net = KPFCNN(config)
|
||||
print(net)
|
||||
print("Model size %i" % sum(param.numel() for param in net.parameters() if param.requires_grad))
|
||||
|
||||
# Define a trainer class
|
||||
trainer = ModelTrainer(net, config, chkp_path=chosen_chkp)
|
||||
|
|
|
@ -144,7 +144,7 @@ class Config:
|
|||
augment_color = 0.7
|
||||
|
||||
# Augment with occlusions (not implemented yet)
|
||||
augment_occlusion = 'planar'
|
||||
augment_occlusion = 'none'
|
||||
augment_occlusion_ratio = 0.2
|
||||
augment_occlusion_num = 1
|
||||
|
||||
|
@ -233,7 +233,7 @@ class Config:
|
|||
# Class variable dictionary
|
||||
for line in lines:
|
||||
line_info = line.split()
|
||||
if len(line_info) > 1 and line_info[0] != '#':
|
||||
if len(line_info) > 2 and line_info[0] != '#':
|
||||
|
||||
if line_info[2] == 'None':
|
||||
setattr(self, line_info[0], None)
|
||||
|
|
|
@ -262,7 +262,6 @@ class ModelTrainer:
|
|||
checkpoint_path = join(checkpoint_directory, 'chkp_{:04d}.tar'.format(self.epoch))
|
||||
torch.save(save_dict, checkpoint_path)
|
||||
|
||||
|
||||
# Validation
|
||||
net.eval()
|
||||
self.validation(net, val_loader, config)
|
||||
|
|
Loading…
Reference in a new issue