diff --git a/.devcontainer.tmp/docker-compose.yml b/.devcontainer.tmp/docker-compose.yml index a9f8a21..ad7fd2a 100644 --- a/.devcontainer.tmp/docker-compose.yml +++ b/.devcontainer.tmp/docker-compose.yml @@ -19,6 +19,6 @@ services: container_name: wandb-local hostname: wandb-local volumes: - - /media/disk2/lfainsin/wandb-local/:/vol + - ./wandb-local/:/vol ports: - 8080:8080 diff --git a/src/data/dataloader.py b/src/data/dataloader.py index f21dbd3..1ff1d36 100644 --- a/src/data/dataloader.py +++ b/src/data/dataloader.py @@ -25,9 +25,9 @@ class Spheres(pl.LightningDataModule): # ) # dataset = SyntheticDataset(image_dir=wandb.config.DIR_TRAIN_IMG, transform=transform) - # dataset = Subset(dataset, list(range(0, len(dataset), len(dataset) // 10000 + 1))) - dataset = LabeledDataset2(image_dir="/home/lilian/data_disk/lfainsin/prerender/") + dataset = LabeledDataset2(image_dir="/media/disk1/lfainsin/TRAIN_prerender/") + dataset = Subset(dataset, list(range(len(dataset)))) # somhow this allows to better utilize the gpu return DataLoader( dataset, @@ -40,6 +40,7 @@ class Spheres(pl.LightningDataModule): def val_dataloader(self): dataset = LabeledDataset(image_dir=wandb.config.DIR_VALID_IMG) + dataset = Subset(dataset, list(range(len(dataset)))) # somhow this allows to better utilize the gpu return DataLoader( dataset, diff --git a/src/data/dataset.py b/src/data/dataset.py index 439e284..455cc5d 100644 --- a/src/data/dataset.py +++ b/src/data/dataset.py @@ -81,18 +81,19 @@ class LabeledDataset(Dataset): class LabeledDataset2(Dataset): def __init__(self, image_dir): - self.images = list(Path(image_dir).glob("**/*.jpg")) + self.image_dir = Path(image_dir) def __len__(self): - return len(self.images) + return len(list(self.image_dir.iterdir())) def __getitem__(self, index): + path = self.image_dir / str(index) + # open and convert image - image = np.array(Image.open(self.images[index]).convert("RGB"), dtype=np.uint8) + image = np.array(Image.open(path / "image.jpg").convert("RGB"), dtype=np.uint8) # open and convert mask - mask_path = self.images[index].parent.joinpath("MASK.PNG") - mask = np.array(Image.open(mask_path).convert("L"), dtype=np.uint8) // 255 + mask = np.array(Image.open(path / "MASK.PNG").convert("L"), dtype=np.uint8) // 255 # convert image & mask to Tensor float in [0, 1] post_process = A.Compose( diff --git a/src/train.py b/src/train.py index 7167634..63b14e5 100644 --- a/src/train.py +++ b/src/train.py @@ -48,12 +48,13 @@ if __name__ == "__main__": max_epochs=wandb.config.EPOCHS, accelerator=wandb.config.DEVICE, benchmark=wandb.config.BENCHMARK, - # profiler="simple", precision=16, logger=logger, log_every_n_steps=1, val_check_interval=100, callbacks=[RichProgressBar(), ArtifactLog(), TableLog()], + # profiler="simple", + # num_sanity_val_steps=0, ) # actually train the model diff --git a/wandb.yaml b/wandb.yaml index 8bbba18..4746243 100644 --- a/wandb.yaml +++ b/wandb.yaml @@ -1,9 +1,9 @@ DIR_TRAIN_IMG: - value: "/home/lilian/data_disk/lfainsin/train/" + value: "/media/disk1/lfainsin/BACKGROUND/" DIR_VALID_IMG: - value: "/home/lilian/data_disk/lfainsin/test_batched_fast/" + value: "/media/disk1/lfainsin/TEST_batched/" DIR_SPHERE: - value: "/home/lilian/data_disk/lfainsin/spheres+real/" + value: "/media/disk1/lfainsin/SPHERES/" FEATURES: value: [8, 16, 32, 64] @@ -29,13 +29,13 @@ SPHERES: value: 3 EPOCHS: - value: 20 + value: 1 TRAIN_BATCH_SIZE: - value: 64 # 100 + value: 128 # 100 VAL_BATCH_SIZE: value: 8 # 10 PREFETCH_FACTOR: - value: 16 + value: 2 LEARNING_RATE: value: 1.0e-4