loosen the tolerances for some sam tests (because of a recent pytorch upgrade)

This commit is contained in:
Laurent 2024-09-09 14:58:32 +00:00 committed by Laureηt
parent 2395e666d5
commit fc3a74e766
2 changed files with 4 additions and 4 deletions

View file

@ -256,8 +256,8 @@ def test_predictor(
atol=4e-3,
)
assert (
torch.abs(reference_high_res_mask_hq - refiners_high_res_mask_hq).flatten().sum() <= 1
) # The diff on the logits above leads to an absolute diff of 1 pixel on the high res masks
torch.abs(reference_high_res_mask_hq - refiners_high_res_mask_hq).flatten().sum() <= 2
) # The diff on the logits above leads to an absolute diff of 2 pixel on the high res masks
assert torch.allclose(
iou_predictions_np,
torch.max(iou_predictions),

View file

@ -422,7 +422,7 @@ def test_predictor_single_output(
assert torch.allclose(
low_res_masks[0, 0, ...],
torch.as_tensor(facebook_low_res_masks[0], device=sam_h_single_output.device),
atol=6e-3, # see test_predictor_resized_single_output for more explanation
atol=5e-2, # see test_predictor_resized_single_output for more explanation
)
assert isclose(scores[0].item(), facebook_scores[0].item(), abs_tol=1e-05)
@ -497,7 +497,7 @@ def test_mask_encoder(
dense_embeddings = sam_h.mask_encoder(mask_input)
assert facebook_mask_input.shape == mask_input.shape
assert torch.allclose(dense_embeddings, fb_dense_embeddings, atol=1e-4, rtol=1e-4)
assert torch.allclose(dense_embeddings, fb_dense_embeddings, atol=1e-3)
@no_grad()