mirror of
https://github.com/Laurent2916/REVA-QCAV.git
synced 2024-11-09 15:02:03 +00:00
refactor: remove pycocotools
Former-commit-id: ebf5f3b85c152491ef299d4d2773c96b8ff3d394 [formerly d6923f15603da3302224199a0b8404c83b091bca] Former-commit-id: 1d6ecdd7fffe5bd7f77e2af3d0d4d9b8df48ae53
This commit is contained in:
parent
d8522b0b63
commit
e1e74242de
|
@ -1,194 +0,0 @@
|
||||||
import copy
|
|
||||||
import io
|
|
||||||
from contextlib import redirect_stdout
|
|
||||||
|
|
||||||
import numpy as np
|
|
||||||
import pycocotools.mask as mask_util
|
|
||||||
import torch
|
|
||||||
from pycocotools.coco import COCO
|
|
||||||
from pycocotools.cocoeval import COCOeval
|
|
||||||
|
|
||||||
import utils
|
|
||||||
|
|
||||||
|
|
||||||
class CocoEvaluator:
|
|
||||||
def __init__(self, coco_gt, iou_types):
|
|
||||||
if not isinstance(iou_types, (list, tuple)):
|
|
||||||
raise TypeError(f"This constructor expects iou_types of type list or tuple, instead got {type(iou_types)}")
|
|
||||||
coco_gt = copy.deepcopy(coco_gt)
|
|
||||||
self.coco_gt = coco_gt
|
|
||||||
|
|
||||||
self.iou_types = iou_types
|
|
||||||
self.coco_eval = {}
|
|
||||||
for iou_type in iou_types:
|
|
||||||
self.coco_eval[iou_type] = COCOeval(coco_gt, iouType=iou_type)
|
|
||||||
|
|
||||||
self.img_ids = []
|
|
||||||
self.eval_imgs = {k: [] for k in iou_types}
|
|
||||||
|
|
||||||
def update(self, predictions):
|
|
||||||
img_ids = list(np.unique(list(predictions.keys())))
|
|
||||||
self.img_ids.extend(img_ids)
|
|
||||||
|
|
||||||
for iou_type in self.iou_types:
|
|
||||||
results = self.prepare(predictions, iou_type)
|
|
||||||
with redirect_stdout(io.StringIO()):
|
|
||||||
coco_dt = COCO.loadRes(self.coco_gt, results) if results else COCO()
|
|
||||||
coco_eval = self.coco_eval[iou_type]
|
|
||||||
|
|
||||||
coco_eval.cocoDt = coco_dt
|
|
||||||
coco_eval.params.imgIds = list(img_ids)
|
|
||||||
img_ids, eval_imgs = evaluate(coco_eval)
|
|
||||||
|
|
||||||
self.eval_imgs[iou_type].append(eval_imgs)
|
|
||||||
|
|
||||||
def synchronize_between_processes(self):
|
|
||||||
for iou_type in self.iou_types:
|
|
||||||
self.eval_imgs[iou_type] = np.concatenate(self.eval_imgs[iou_type], 2)
|
|
||||||
create_common_coco_eval(self.coco_eval[iou_type], self.img_ids, self.eval_imgs[iou_type])
|
|
||||||
|
|
||||||
def accumulate(self):
|
|
||||||
for coco_eval in self.coco_eval.values():
|
|
||||||
coco_eval.accumulate()
|
|
||||||
|
|
||||||
def summarize(self):
|
|
||||||
for iou_type, coco_eval in self.coco_eval.items():
|
|
||||||
print(f"IoU metric: {iou_type}")
|
|
||||||
coco_eval.summarize()
|
|
||||||
|
|
||||||
def prepare(self, predictions, iou_type):
|
|
||||||
if iou_type == "bbox":
|
|
||||||
return self.prepare_for_coco_detection(predictions)
|
|
||||||
if iou_type == "segm":
|
|
||||||
return self.prepare_for_coco_segmentation(predictions)
|
|
||||||
if iou_type == "keypoints":
|
|
||||||
return self.prepare_for_coco_keypoint(predictions)
|
|
||||||
raise ValueError(f"Unknown iou type {iou_type}")
|
|
||||||
|
|
||||||
def prepare_for_coco_detection(self, predictions):
|
|
||||||
coco_results = []
|
|
||||||
for original_id, prediction in predictions.items():
|
|
||||||
if len(prediction) == 0:
|
|
||||||
continue
|
|
||||||
|
|
||||||
boxes = prediction["boxes"]
|
|
||||||
boxes = convert_to_xywh(boxes).tolist()
|
|
||||||
scores = prediction["scores"].tolist()
|
|
||||||
labels = prediction["labels"].tolist()
|
|
||||||
|
|
||||||
coco_results.extend(
|
|
||||||
[
|
|
||||||
{
|
|
||||||
"image_id": original_id,
|
|
||||||
"category_id": labels[k],
|
|
||||||
"bbox": box,
|
|
||||||
"score": scores[k],
|
|
||||||
}
|
|
||||||
for k, box in enumerate(boxes)
|
|
||||||
]
|
|
||||||
)
|
|
||||||
return coco_results
|
|
||||||
|
|
||||||
def prepare_for_coco_segmentation(self, predictions):
|
|
||||||
coco_results = []
|
|
||||||
for original_id, prediction in predictions.items():
|
|
||||||
if len(prediction) == 0:
|
|
||||||
continue
|
|
||||||
|
|
||||||
scores = prediction["scores"]
|
|
||||||
labels = prediction["labels"]
|
|
||||||
masks = prediction["masks"]
|
|
||||||
|
|
||||||
masks = masks > 0.5
|
|
||||||
|
|
||||||
scores = prediction["scores"].tolist()
|
|
||||||
labels = prediction["labels"].tolist()
|
|
||||||
|
|
||||||
rles = [
|
|
||||||
mask_util.encode(np.array(mask[0, :, :, np.newaxis], dtype=np.uint8, order="F"))[0]
|
|
||||||
for mask in masks.cpu()
|
|
||||||
]
|
|
||||||
for rle in rles:
|
|
||||||
rle["counts"] = rle["counts"].decode("utf-8")
|
|
||||||
|
|
||||||
coco_results.extend(
|
|
||||||
[
|
|
||||||
{
|
|
||||||
"image_id": original_id,
|
|
||||||
"category_id": labels[k],
|
|
||||||
"segmentation": rle,
|
|
||||||
"score": scores[k],
|
|
||||||
}
|
|
||||||
for k, rle in enumerate(rles)
|
|
||||||
]
|
|
||||||
)
|
|
||||||
return coco_results
|
|
||||||
|
|
||||||
def prepare_for_coco_keypoint(self, predictions):
|
|
||||||
coco_results = []
|
|
||||||
for original_id, prediction in predictions.items():
|
|
||||||
if len(prediction) == 0:
|
|
||||||
continue
|
|
||||||
|
|
||||||
boxes = prediction["boxes"]
|
|
||||||
boxes = convert_to_xywh(boxes).tolist()
|
|
||||||
scores = prediction["scores"].tolist()
|
|
||||||
labels = prediction["labels"].tolist()
|
|
||||||
keypoints = prediction["keypoints"]
|
|
||||||
keypoints = keypoints.flatten(start_dim=1).tolist()
|
|
||||||
|
|
||||||
coco_results.extend(
|
|
||||||
[
|
|
||||||
{
|
|
||||||
"image_id": original_id,
|
|
||||||
"category_id": labels[k],
|
|
||||||
"keypoints": keypoint,
|
|
||||||
"score": scores[k],
|
|
||||||
}
|
|
||||||
for k, keypoint in enumerate(keypoints)
|
|
||||||
]
|
|
||||||
)
|
|
||||||
return coco_results
|
|
||||||
|
|
||||||
|
|
||||||
def convert_to_xywh(boxes):
|
|
||||||
xmin, ymin, xmax, ymax = boxes.unbind(1)
|
|
||||||
return torch.stack((xmin, ymin, xmax - xmin, ymax - ymin), dim=1)
|
|
||||||
|
|
||||||
|
|
||||||
def merge(img_ids, eval_imgs):
|
|
||||||
all_img_ids = utils.all_gather(img_ids)
|
|
||||||
all_eval_imgs = utils.all_gather(eval_imgs)
|
|
||||||
|
|
||||||
merged_img_ids = []
|
|
||||||
for p in all_img_ids:
|
|
||||||
merged_img_ids.extend(p)
|
|
||||||
|
|
||||||
merged_eval_imgs = []
|
|
||||||
for p in all_eval_imgs:
|
|
||||||
merged_eval_imgs.append(p)
|
|
||||||
|
|
||||||
merged_img_ids = np.array(merged_img_ids)
|
|
||||||
merged_eval_imgs = np.concatenate(merged_eval_imgs, 2)
|
|
||||||
|
|
||||||
# keep only unique (and in sorted order) images
|
|
||||||
merged_img_ids, idx = np.unique(merged_img_ids, return_index=True)
|
|
||||||
merged_eval_imgs = merged_eval_imgs[..., idx]
|
|
||||||
|
|
||||||
return merged_img_ids, merged_eval_imgs
|
|
||||||
|
|
||||||
|
|
||||||
def create_common_coco_eval(coco_eval, img_ids, eval_imgs):
|
|
||||||
img_ids, eval_imgs = merge(img_ids, eval_imgs)
|
|
||||||
img_ids = list(img_ids)
|
|
||||||
eval_imgs = list(eval_imgs.flatten())
|
|
||||||
|
|
||||||
coco_eval.evalImgs = eval_imgs
|
|
||||||
coco_eval.params.imgIds = img_ids
|
|
||||||
coco_eval._paramsEval = copy.deepcopy(coco_eval.params)
|
|
||||||
|
|
||||||
|
|
||||||
def evaluate(imgs):
|
|
||||||
with redirect_stdout(io.StringIO()):
|
|
||||||
imgs.evaluate()
|
|
||||||
return imgs.params.imgIds, np.asarray(imgs.evalImgs).reshape(-1, len(imgs.params.areaRng), len(imgs.params.imgIds))
|
|
|
@ -1,231 +0,0 @@
|
||||||
import copy
|
|
||||||
import os
|
|
||||||
|
|
||||||
import torch
|
|
||||||
import torch.utils.data
|
|
||||||
import torchvision
|
|
||||||
from pycocotools import mask as coco_mask
|
|
||||||
from pycocotools.coco import COCO
|
|
||||||
|
|
||||||
|
|
||||||
class FilterAndRemapCocoCategories:
|
|
||||||
def __init__(self, categories, remap=True):
|
|
||||||
self.categories = categories
|
|
||||||
self.remap = remap
|
|
||||||
|
|
||||||
def __call__(self, image, target):
|
|
||||||
anno = target["annotations"]
|
|
||||||
anno = [obj for obj in anno if obj["category_id"] in self.categories]
|
|
||||||
if not self.remap:
|
|
||||||
target["annotations"] = anno
|
|
||||||
return image, target
|
|
||||||
anno = copy.deepcopy(anno)
|
|
||||||
for obj in anno:
|
|
||||||
obj["category_id"] = self.categories.index(obj["category_id"])
|
|
||||||
target["annotations"] = anno
|
|
||||||
return image, target
|
|
||||||
|
|
||||||
|
|
||||||
def convert_coco_poly_to_mask(segmentations, height, width):
|
|
||||||
masks = []
|
|
||||||
for polygons in segmentations:
|
|
||||||
rles = coco_mask.frPyObjects(polygons, height, width)
|
|
||||||
mask = coco_mask.decode(rles)
|
|
||||||
if len(mask.shape) < 3:
|
|
||||||
mask = mask[..., None]
|
|
||||||
mask = torch.as_tensor(mask, dtype=torch.uint8)
|
|
||||||
mask = mask.any(dim=2)
|
|
||||||
masks.append(mask)
|
|
||||||
if masks:
|
|
||||||
masks = torch.stack(masks, dim=0)
|
|
||||||
else:
|
|
||||||
masks = torch.zeros((0, height, width), dtype=torch.uint8)
|
|
||||||
return masks
|
|
||||||
|
|
||||||
|
|
||||||
class ConvertCocoPolysToMask:
|
|
||||||
def __call__(self, image, target):
|
|
||||||
w, h = image.size
|
|
||||||
|
|
||||||
image_id = target["image_id"]
|
|
||||||
image_id = torch.tensor([image_id])
|
|
||||||
|
|
||||||
anno = target["annotations"]
|
|
||||||
|
|
||||||
anno = [obj for obj in anno if obj["iscrowd"] == 0]
|
|
||||||
|
|
||||||
boxes = [obj["bbox"] for obj in anno]
|
|
||||||
# guard against no boxes via resizing
|
|
||||||
boxes = torch.as_tensor(boxes, dtype=torch.float32).reshape(-1, 4)
|
|
||||||
boxes[:, 2:] += boxes[:, :2]
|
|
||||||
boxes[:, 0::2].clamp_(min=0, max=w)
|
|
||||||
boxes[:, 1::2].clamp_(min=0, max=h)
|
|
||||||
|
|
||||||
classes = [obj["category_id"] for obj in anno]
|
|
||||||
classes = torch.tensor(classes, dtype=torch.int64)
|
|
||||||
|
|
||||||
segmentations = [obj["segmentation"] for obj in anno]
|
|
||||||
masks = convert_coco_poly_to_mask(segmentations, h, w)
|
|
||||||
|
|
||||||
keypoints = None
|
|
||||||
if anno and "keypoints" in anno[0]:
|
|
||||||
keypoints = [obj["keypoints"] for obj in anno]
|
|
||||||
keypoints = torch.as_tensor(keypoints, dtype=torch.float32)
|
|
||||||
num_keypoints = keypoints.shape[0]
|
|
||||||
if num_keypoints:
|
|
||||||
keypoints = keypoints.view(num_keypoints, -1, 3)
|
|
||||||
|
|
||||||
keep = (boxes[:, 3] > boxes[:, 1]) & (boxes[:, 2] > boxes[:, 0])
|
|
||||||
boxes = boxes[keep]
|
|
||||||
classes = classes[keep]
|
|
||||||
masks = masks[keep]
|
|
||||||
if keypoints is not None:
|
|
||||||
keypoints = keypoints[keep]
|
|
||||||
|
|
||||||
target = {}
|
|
||||||
target["boxes"] = boxes
|
|
||||||
target["labels"] = classes
|
|
||||||
target["masks"] = masks
|
|
||||||
target["image_id"] = image_id
|
|
||||||
if keypoints is not None:
|
|
||||||
target["keypoints"] = keypoints
|
|
||||||
|
|
||||||
# for conversion to coco api
|
|
||||||
area = torch.tensor([obj["area"] for obj in anno])
|
|
||||||
iscrowd = torch.tensor([obj["iscrowd"] for obj in anno])
|
|
||||||
target["area"] = area
|
|
||||||
target["iscrowd"] = iscrowd
|
|
||||||
|
|
||||||
return image, target
|
|
||||||
|
|
||||||
|
|
||||||
def _coco_remove_images_without_annotations(dataset, cat_list=None):
|
|
||||||
def _has_only_empty_bbox(anno):
|
|
||||||
return all(any(o <= 1 for o in obj["bbox"][2:]) for obj in anno)
|
|
||||||
|
|
||||||
def _count_visible_keypoints(anno):
|
|
||||||
return sum(sum(1 for v in ann["keypoints"][2::3] if v > 0) for ann in anno)
|
|
||||||
|
|
||||||
min_keypoints_per_image = 10
|
|
||||||
|
|
||||||
def _has_valid_annotation(anno):
|
|
||||||
# if it's empty, there is no annotation
|
|
||||||
if len(anno) == 0:
|
|
||||||
return False
|
|
||||||
# if all boxes have close to zero area, there is no annotation
|
|
||||||
if _has_only_empty_bbox(anno):
|
|
||||||
return False
|
|
||||||
# keypoints task have a slight different critera for considering
|
|
||||||
# if an annotation is valid
|
|
||||||
if "keypoints" not in anno[0]:
|
|
||||||
return True
|
|
||||||
# for keypoint detection tasks, only consider valid images those
|
|
||||||
# containing at least min_keypoints_per_image
|
|
||||||
if _count_visible_keypoints(anno) >= min_keypoints_per_image:
|
|
||||||
return True
|
|
||||||
return False
|
|
||||||
|
|
||||||
if not isinstance(dataset, torchvision.datasets.CocoDetection):
|
|
||||||
raise TypeError(
|
|
||||||
f"This function expects dataset of type torchvision.datasets.CocoDetection, instead got {type(dataset)}"
|
|
||||||
)
|
|
||||||
ids = []
|
|
||||||
for ds_idx, img_id in enumerate(dataset.ids):
|
|
||||||
ann_ids = dataset.coco.getAnnIds(imgIds=img_id, iscrowd=None)
|
|
||||||
anno = dataset.coco.loadAnns(ann_ids)
|
|
||||||
if cat_list:
|
|
||||||
anno = [obj for obj in anno if obj["category_id"] in cat_list]
|
|
||||||
if _has_valid_annotation(anno):
|
|
||||||
ids.append(ds_idx)
|
|
||||||
|
|
||||||
dataset = torch.utils.data.Subset(dataset, ids)
|
|
||||||
return dataset
|
|
||||||
|
|
||||||
|
|
||||||
def get_iou_types(model):
|
|
||||||
model_without_ddp = model
|
|
||||||
if isinstance(model, torch.nn.parallel.DistributedDataParallel):
|
|
||||||
model_without_ddp = model.module
|
|
||||||
iou_types = ["bbox"]
|
|
||||||
if isinstance(model_without_ddp, torchvision.models.detection.MaskRCNN):
|
|
||||||
iou_types.append("segm")
|
|
||||||
if isinstance(model_without_ddp, torchvision.models.detection.KeypointRCNN):
|
|
||||||
iou_types.append("keypoints")
|
|
||||||
return iou_types
|
|
||||||
|
|
||||||
|
|
||||||
def convert_to_coco_api(ds):
|
|
||||||
coco_ds = COCO()
|
|
||||||
# annotation IDs need to start at 1, not 0, see torchvision issue #1530
|
|
||||||
ann_id = 1
|
|
||||||
dataset = {"images": [], "categories": [], "annotations": []}
|
|
||||||
categories = set()
|
|
||||||
for img_idx in range(len(ds)):
|
|
||||||
# find better way to get target
|
|
||||||
# targets = ds.get_annotations(img_idx)
|
|
||||||
img, targets = ds[img_idx]
|
|
||||||
image_id = targets["image_id"].item()
|
|
||||||
img_dict = {}
|
|
||||||
img_dict["id"] = image_id
|
|
||||||
img_dict["height"] = img.shape[-2]
|
|
||||||
img_dict["width"] = img.shape[-1]
|
|
||||||
dataset["images"].append(img_dict)
|
|
||||||
bboxes = targets["boxes"].clone()
|
|
||||||
bboxes[:, 2:] -= bboxes[:, :2]
|
|
||||||
bboxes = bboxes.tolist()
|
|
||||||
labels = targets["labels"].tolist()
|
|
||||||
areas = targets["area"].tolist()
|
|
||||||
iscrowd = targets["iscrowd"].tolist()
|
|
||||||
if "masks" in targets:
|
|
||||||
masks = targets["masks"]
|
|
||||||
# make masks Fortran contiguous for coco_mask
|
|
||||||
masks = masks.permute(0, 2, 1).contiguous().permute(0, 2, 1)
|
|
||||||
if "keypoints" in targets:
|
|
||||||
keypoints = targets["keypoints"]
|
|
||||||
keypoints = keypoints.reshape(keypoints.shape[0], -1).tolist()
|
|
||||||
num_objs = len(bboxes)
|
|
||||||
for i in range(num_objs):
|
|
||||||
ann = {}
|
|
||||||
ann["image_id"] = image_id
|
|
||||||
ann["bbox"] = bboxes[i]
|
|
||||||
ann["category_id"] = labels[i]
|
|
||||||
categories.add(labels[i])
|
|
||||||
ann["area"] = areas[i]
|
|
||||||
ann["iscrowd"] = iscrowd[i]
|
|
||||||
ann["id"] = ann_id
|
|
||||||
if "masks" in targets:
|
|
||||||
ann["segmentation"] = coco_mask.encode(masks[i].numpy())
|
|
||||||
if "keypoints" in targets:
|
|
||||||
ann["keypoints"] = keypoints[i]
|
|
||||||
ann["num_keypoints"] = sum(k != 0 for k in keypoints[i][2::3])
|
|
||||||
dataset["annotations"].append(ann)
|
|
||||||
ann_id += 1
|
|
||||||
dataset["categories"] = [{"id": i} for i in sorted(categories)]
|
|
||||||
coco_ds.dataset = dataset
|
|
||||||
coco_ds.createIndex()
|
|
||||||
return coco_ds
|
|
||||||
|
|
||||||
|
|
||||||
def get_coco_api_from_dataset(dataset):
|
|
||||||
for _ in range(10):
|
|
||||||
if isinstance(dataset, torchvision.datasets.CocoDetection):
|
|
||||||
break
|
|
||||||
if isinstance(dataset, torch.utils.data.Subset):
|
|
||||||
dataset = dataset.dataset
|
|
||||||
if isinstance(dataset, torchvision.datasets.CocoDetection):
|
|
||||||
return dataset.coco
|
|
||||||
return convert_to_coco_api(dataset)
|
|
||||||
|
|
||||||
|
|
||||||
class CocoDetection(torchvision.datasets.CocoDetection):
|
|
||||||
def __init__(self, img_folder, ann_file, transforms):
|
|
||||||
super().__init__(img_folder, ann_file)
|
|
||||||
self._transforms = transforms
|
|
||||||
|
|
||||||
def __getitem__(self, idx):
|
|
||||||
img, target = super().__getitem__(idx)
|
|
||||||
image_id = self.ids[idx]
|
|
||||||
target = dict(image_id=image_id, annotations=target)
|
|
||||||
if self._transforms is not None:
|
|
||||||
img, target = self._transforms(img, target)
|
|
||||||
return img, target
|
|
|
@ -1,18 +0,0 @@
|
||||||
import torch
|
|
||||||
|
|
||||||
|
|
||||||
def dice_score(inputs, targets, smooth=1, logits=True):
|
|
||||||
# comment out if your model contains a sigmoid or equivalent activation layer
|
|
||||||
if logits:
|
|
||||||
inputs = torch.sigmoid(inputs)
|
|
||||||
|
|
||||||
# flatten label and prediction tensors
|
|
||||||
inputs = inputs.view(-1)
|
|
||||||
targets = targets.view(-1)
|
|
||||||
|
|
||||||
intersection = (inputs * targets).sum()
|
|
||||||
return (2.0 * intersection + smooth) / (inputs.sum() + targets.sum() + smooth)
|
|
||||||
|
|
||||||
|
|
||||||
def dice_loss(inputs, targets, smooth=1, logits=True):
|
|
||||||
return 1 - dice_score(inputs, targets, smooth, logits)
|
|
Loading…
Reference in a new issue