Initial commit
This commit is contained in:
parent
3d05a41368
commit
4b4c7b9f1c
|
@ -504,7 +504,8 @@ class SemanticKittiDataset(PointCloudDataset):
|
|||
# > Drop: We can drop even more points. Random choice could be faster without replace=False
|
||||
# > reproj: No reprojection needed
|
||||
# > Augment: See which data agment we want at test time
|
||||
# > input: MAIN BOTTLENECK. We need to see if we can do faster, maybe with some parallelisation
|
||||
# > input: MAIN BOTTLENECK. We need to see if we can do faster, maybe with some parallelisation. neighbors
|
||||
# and subsampling accelerated with lidar frame order
|
||||
|
||||
return [self.config.num_layers] + input_list
|
||||
|
||||
|
@ -887,9 +888,6 @@ class SemanticKittiSampler(Sampler):
|
|||
if breaking:
|
||||
break
|
||||
|
||||
# TODO: Compute the percentile np.percentile?
|
||||
# TODO: optionnally show a plot of the in_points histogram?
|
||||
|
||||
self.dataset.max_in_p = int(np.percentile(all_lengths, 100*untouched_ratio))
|
||||
|
||||
if verbose:
|
||||
|
@ -1379,7 +1377,7 @@ def debug_class_w(dataset, loader):
|
|||
|
||||
i = 0
|
||||
|
||||
counts = np.zeros((0, dataset.num_classes,), dtype=np.int64)
|
||||
counts = np.zeros((dataset.num_classes,), dtype=np.int64)
|
||||
|
||||
s = '{:^6}|'.format('step')
|
||||
for c in dataset.label_names:
|
||||
|
@ -1393,12 +1391,12 @@ def debug_class_w(dataset, loader):
|
|||
|
||||
# count labels
|
||||
new_counts = np.bincount(batch.labels)
|
||||
|
||||
counts[:new_counts.shape[0]] += new_counts.astype(np.int64)
|
||||
|
||||
# Update proportions
|
||||
proportions = 1000 * counts / np.sum(counts)
|
||||
|
||||
print(proportions)
|
||||
s = '{:^6d}|'.format(i)
|
||||
for pp in proportions:
|
||||
s += '{:^6.1f}'.format(pp)
|
||||
|
|
|
@ -117,8 +117,6 @@ class KPCNN(nn.Module):
|
|||
:return: loss
|
||||
"""
|
||||
|
||||
# TODO: Ignore unclassified points in loss for segmentation architecture
|
||||
|
||||
# Cross entropy loss
|
||||
self.output_loss = self.criterion(outputs, labels)
|
||||
|
||||
|
|
|
@ -1464,6 +1464,8 @@ def SemanticKittiFirst(old_result_limit):
|
|||
logs_names = ['R=5.0_dl=0.04',
|
||||
'R=5.0_dl=0.08',
|
||||
'R=10.0_dl=0.08',
|
||||
'R=10.0_dl=0.08_weigths',
|
||||
'R=10.0_dl=0.08_sqrt_weigths',
|
||||
'test']
|
||||
|
||||
logs_names = np.array(logs_names[:len(logs)])
|
||||
|
@ -1481,7 +1483,6 @@ if __name__ == '__main__':
|
|||
######################################################
|
||||
|
||||
# TODO: test deformable on S3DIS to see of fitting loss works
|
||||
# TODO: GOOOO SemanticKitti for wednesday at least have a timing to give to them
|
||||
# TODO: try class weights on S3DIS (very low weight for beam)
|
||||
|
||||
# Old result limit
|
||||
|
|
|
@ -178,7 +178,7 @@ class S3DISConfig(Config):
|
|||
augment_noise = 0.001
|
||||
augment_color = 0.8
|
||||
|
||||
# The way we balance segmentation loss TODO: implement and test 'class' and 'batch' modes
|
||||
# The way we balance segmentation loss
|
||||
# > 'none': Each point in the whole batch has the same contribution.
|
||||
# > 'class': Each class has the same contribution (points are weighted according to class balance)
|
||||
# > 'batch': Each cloud in the batch has the same contribution (points are weighted according cloud sizes)
|
||||
|
|
|
@ -180,7 +180,24 @@ class SemanticKittiConfig(Config):
|
|||
augment_color = 0.8
|
||||
|
||||
# Choose weights for class (used in segmentation loss). Empty list for no weights
|
||||
class_w = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
|
||||
# class proportion for R=10.0 and dl=0.08 (first is unlabeled)
|
||||
# 19.1 48.9 0.5 1.1 5.6 3.6 0.7 0.6 0.9 193.2 17.7 127.4 6.7 132.3 68.4 283.8 7.0 78.5 3.3 0.8
|
||||
#
|
||||
#
|
||||
|
||||
# Inverse of proportion * 20
|
||||
# class_w = [0.409, 40.000, 18.182, 3.571, 5.556, 28.571, 33.333, 22.222, 0.104,
|
||||
# 1.130, 0.157, 2.985, 0.151, 0.292, 0.070, 2.857, 0.255, 6.061, 25.000]
|
||||
|
||||
# Inverse of proportion *20 capped (0.1 < X < 10)
|
||||
# class_w = [0.409, 10.000, 10.000, 3.571, 5.556, 10.000, 10.000, 10.000, 0.104,
|
||||
# 1.130, 0.157, 2.985, 0.151, 0.292, 0.100, 2.857, 0.255, 6.061, 10.000]
|
||||
|
||||
# Inverse of proportion *20 then sqrt
|
||||
class_w = [0.639529479, 6.32455532, 4.264014327, 1.889822365, 2.357022604, 5.345224838,
|
||||
5.773502692, 4.714045208, 0.321744726, 1.062988007, 0.396214426, 1.727736851,
|
||||
0.388807896, 0.54073807, 0.265465937, 1.690308509, 0.504754465, 2.46182982, 5]
|
||||
|
||||
|
||||
# Do we nee to save convergence
|
||||
saving = True
|
||||
|
@ -283,7 +300,7 @@ if __name__ == '__main__':
|
|||
|
||||
# debug_timing(training_dataset, training_loader)
|
||||
# debug_timing(test_dataset, test_loader)
|
||||
debug_class_w(training_dataset, training_loader)
|
||||
# debug_class_w(training_dataset, training_loader)
|
||||
|
||||
print('\nModel Preparation')
|
||||
print('*****************')
|
||||
|
@ -316,6 +333,3 @@ if __name__ == '__main__':
|
|||
|
||||
print('Forcing exit now')
|
||||
os.kill(os.getpid(), signal.SIGINT)
|
||||
|
||||
# TODO: Create a function debug_class_weights that shows class distribution in input sphere. Use that as
|
||||
# indication for the class weights during training
|
||||
|
|
Loading…
Reference in a new issue