2021-08-16 00:53:00 +00:00
|
|
|
import logging
|
|
|
|
from os import listdir
|
|
|
|
from os.path import splitext
|
|
|
|
from pathlib import Path
|
|
|
|
|
|
|
|
import numpy as np
|
|
|
|
import torch
|
|
|
|
from PIL import Image
|
|
|
|
from torch.utils.data import Dataset
|
|
|
|
|
|
|
|
|
|
|
|
class BasicDataset(Dataset):
|
|
|
|
def __init__(self, images_dir: str, masks_dir: str, scale: float = 1.0, mask_suffix: str = ''):
|
|
|
|
self.images_dir = Path(images_dir)
|
|
|
|
self.masks_dir = Path(masks_dir)
|
|
|
|
assert 0 < scale <= 1, 'Scale must be between 0 and 1'
|
|
|
|
self.scale = scale
|
|
|
|
self.mask_suffix = mask_suffix
|
|
|
|
|
|
|
|
self.ids = [splitext(file)[0] for file in listdir(images_dir) if not file.startswith('.')]
|
|
|
|
if not self.ids:
|
|
|
|
raise RuntimeError(f'No input file found in {images_dir}, make sure you put your images there')
|
|
|
|
logging.info(f'Creating dataset with {len(self.ids)} examples')
|
|
|
|
|
|
|
|
def __len__(self):
|
|
|
|
return len(self.ids)
|
|
|
|
|
2022-03-31 14:23:37 +00:00
|
|
|
@staticmethod
|
|
|
|
def preprocess(pil_img, scale, is_mask):
|
2021-08-16 00:53:00 +00:00
|
|
|
w, h = pil_img.size
|
|
|
|
newW, newH = int(scale * w), int(scale * h)
|
|
|
|
assert newW > 0 and newH > 0, 'Scale is too small, resized images would have no pixel'
|
2021-11-03 08:39:21 +00:00
|
|
|
pil_img = pil_img.resize((newW, newH), resample=Image.NEAREST if is_mask else Image.BICUBIC)
|
2021-08-16 00:53:00 +00:00
|
|
|
img_ndarray = np.asarray(pil_img)
|
|
|
|
|
|
|
|
if not is_mask:
|
2022-03-31 14:23:37 +00:00
|
|
|
if img_ndarray.ndim == 2:
|
|
|
|
img_ndarray = img_ndarray[np.newaxis, ...]
|
|
|
|
else:
|
|
|
|
img_ndarray = img_ndarray.transpose((2, 0, 1))
|
|
|
|
|
2021-08-16 00:53:00 +00:00
|
|
|
img_ndarray = img_ndarray / 255
|
|
|
|
|
|
|
|
return img_ndarray
|
|
|
|
|
2022-03-31 14:23:37 +00:00
|
|
|
@staticmethod
|
|
|
|
def load(filename):
|
2021-08-16 00:53:00 +00:00
|
|
|
ext = splitext(filename)[1]
|
|
|
|
if ext in ['.npz', '.npy']:
|
|
|
|
return Image.fromarray(np.load(filename))
|
|
|
|
elif ext in ['.pt', '.pth']:
|
|
|
|
return Image.fromarray(torch.load(filename).numpy())
|
|
|
|
else:
|
|
|
|
return Image.open(filename)
|
|
|
|
|
|
|
|
def __getitem__(self, idx):
|
|
|
|
name = self.ids[idx]
|
|
|
|
mask_file = list(self.masks_dir.glob(name + self.mask_suffix + '.*'))
|
|
|
|
img_file = list(self.images_dir.glob(name + '.*'))
|
|
|
|
|
|
|
|
assert len(img_file) == 1, f'Either no image or multiple images found for the ID {name}: {img_file}'
|
2022-04-06 11:45:19 +00:00
|
|
|
assert len(mask_file) == 1, f'Either no mask or multiple masks found for the ID {name}: {mask_file}'
|
2021-08-16 00:53:00 +00:00
|
|
|
mask = self.load(mask_file[0])
|
|
|
|
img = self.load(img_file[0])
|
|
|
|
|
|
|
|
assert img.size == mask.size, \
|
2022-04-09 23:54:48 +00:00
|
|
|
f'Image and mask {name} should be the same size, but are {img.size} and {mask.size}'
|
2021-08-16 00:53:00 +00:00
|
|
|
|
|
|
|
img = self.preprocess(img, self.scale, is_mask=False)
|
|
|
|
mask = self.preprocess(mask, self.scale, is_mask=True)
|
|
|
|
|
|
|
|
return {
|
|
|
|
'image': torch.as_tensor(img.copy()).float().contiguous(),
|
|
|
|
'mask': torch.as_tensor(mask.copy()).long().contiguous()
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
class CarvanaDataset(BasicDataset):
|
|
|
|
def __init__(self, images_dir, masks_dir, scale=1):
|
|
|
|
super().__init__(images_dir, masks_dir, scale, mask_suffix='_mask')
|