From aee5494f323aa8db523cfdcc7246562d082add00 Mon Sep 17 00:00:00 2001 From: HuguesTHOMAS Date: Wed, 4 Aug 2021 15:01:56 +0000 Subject: [PATCH] . --- doc/pretrained_models_guide.md | 5 ++- plot_convergence.py | 28 +++++++++++---- train_S3DIS.py | 62 +++++++++++++++++----------------- 3 files changed, 55 insertions(+), 40 deletions(-) diff --git a/doc/pretrained_models_guide.md b/doc/pretrained_models_guide.md index d6652a8..148461e 100644 --- a/doc/pretrained_models_guide.md +++ b/doc/pretrained_models_guide.md @@ -4,15 +4,14 @@ ### Models -We provide pretrained weights for S3DIS dataset. The raw weights come with a parameter file describing the architecture and network hyperparameters. THe code can thus load the network automatically. - -The instructions to run these models are in the S3DIS documentation, section [Test the trained model](./doc/scene_segmentation_guide.md#test-the-trained-model). +We provide pretrained weights for S3DIS dataset. The raw weights come with a parameter file describing the architecture and network hyperparameters. The code can thus load the network automatically. | Name (link) | KPConv Type | Description | Score | |:-------------|:-------------:|:-----|:-----:| | [Light_KPFCNN](https://drive.google.com/file/d/14sz0hdObzsf_exxInXdOIbnUTe0foOOz/view?usp=sharing) | rigid | A network with small `in_radius` for light GPU consumption (~8GB) | 65.4% | | [Heavy_KPFCNN](https://drive.google.com/file/d/1ySQq3SRBgk2Vt5Bvj-0N7jDPi0QTPZiZ/view?usp=sharing) | rigid | A network with better performances but needing bigger GPU (>18GB). | 66.4% | +| [Deform_KPFCNN](https://drive.google.com/file/d/1ObGr2Srfj0f7Bd3bBbuQzxtjf0ULbpSA/view?usp=sharing) | rigid | Deformable convolution network needing big GPU (>20GB). | 67.3% | diff --git a/plot_convergence.py b/plot_convergence.py index 3377c55..cf70650 100644 --- a/plot_convergence.py +++ b/plot_convergence.py @@ -47,6 +47,22 @@ from datasets.SemanticKitti import SemanticKittiDataset # \***********************/ # +def listdir_str(path): + + # listdir can return binary string instead od decoded string sometimes. + # This function ensures a steady behavior + + f_list = [] + for f in listdir(path): + try: + f = f.decode() + except (UnicodeDecodeError, AttributeError): + pass + f_list.append(f) + + return f_list + + def running_mean(signal, n, axis=0, stride=1): signal = np.array(signal) @@ -148,7 +164,7 @@ def load_single_IoU(filename, n_parts): def load_snap_clouds(path, dataset, only_last=False): - cloud_folders = np.array([join(path, f) for f in listdir(path) if f.startswith('val_preds')]) + cloud_folders = np.array([join(path, f) for f in listdir_str(path) if f.startswith('val_preds')]) cloud_epochs = np.array([int(f.split('_')[-1]) for f in cloud_folders]) epoch_order = np.argsort(cloud_epochs) cloud_epochs = cloud_epochs[epoch_order] @@ -165,7 +181,7 @@ def load_snap_clouds(path, dataset, only_last=False): Confs[c_i] += np.loadtxt(conf_file, dtype=np.int32) else: - for f in listdir(cloud_folder): + for f in listdir_str(cloud_folder): if f.endswith('.ply') and not f.endswith('sub.ply'): data = read_ply(join(cloud_folder, f)) labels = data['class'] @@ -176,7 +192,7 @@ def load_snap_clouds(path, dataset, only_last=False): # Erase ply to save disk memory if c_i < len(cloud_folders) - 1: - for f in listdir(cloud_folder): + for f in listdir_str(cloud_folder): if f.endswith('.ply'): remove(join(cloud_folder, f)) @@ -221,7 +237,7 @@ def compare_trainings(list_of_paths, list_of_labels=None): print(path) - if ('val_IoUs.txt' in [f for f in listdir(path)]) or ('val_confs.txt' in [f for f in listdir(path)]): + if ('val_IoUs.txt' in [f for f in listdir_str(path)]) or ('val_confs.txt' in [f for f in listdir_str(path)]): config = Config() config.load(path) else: @@ -704,7 +720,7 @@ def experiment_name_1(): res_path = 'results' # Gather logs and sort by date - logs = np.sort([join(res_path, l) for l in listdir(res_path) if start <= l <= end]) + logs = np.sort([join(res_path, l) for l in listdir_str(res_path) if start <= l <= end]) # Give names to the logs (for plot legends) logs_names = ['name_log_1', @@ -733,7 +749,7 @@ def experiment_name_2(): res_path = 'results' # Gather logs and sort by date - logs = np.sort([join(res_path, l) for l in listdir(res_path) if start <= l <= end]) + logs = np.sort([join(res_path, l) for l in listdir_str(res_path) if start <= l <= end]) # Optionally add a specific log at a specific place in the log list logs = logs.astype('