micro_sam.evaluation.evaluation
Evaluation functionality for segmentation predictions from micro_sam.evaluation.automatic_mask_generation
and micro_sam.evaluation.inference
.
1"""Evaluation functionality for segmentation predictions from `micro_sam.evaluation.automatic_mask_generation` 2and `micro_sam.evaluation.inference`. 3""" 4 5import os 6from glob import glob 7from tqdm import tqdm 8from pathlib import Path 9from typing import List, Optional, Union 10 11import numpy as np 12import pandas as pd 13import imageio.v3 as imageio 14from skimage.measure import label 15 16from elf.evaluation import mean_segmentation_accuracy 17 18 19def _run_evaluation(gt_paths, prediction_paths, verbose=True): 20 assert len(gt_paths) == len(prediction_paths) 21 msas, sa50s, sa75s = [], [], [] 22 23 for gt_path, pred_path in tqdm( 24 zip(gt_paths, prediction_paths), desc="Evaluate predictions", total=len(gt_paths), disable=not verbose 25 ): 26 assert os.path.exists(gt_path), gt_path 27 assert os.path.exists(pred_path), pred_path 28 29 gt = imageio.imread(gt_path) 30 gt = label(gt) 31 pred = imageio.imread(pred_path) 32 33 msa, scores = mean_segmentation_accuracy(pred, gt, return_accuracies=True) 34 sa50, sa75 = scores[0], scores[5] 35 msas.append(msa), sa50s.append(sa50), sa75s.append(sa75) 36 37 return msas, sa50s, sa75s 38 39 40def run_evaluation( 41 gt_paths: List[Union[os.PathLike, str]], 42 prediction_paths: List[Union[os.PathLike, str]], 43 save_path: Optional[Union[os.PathLike, str]] = None, 44 verbose: bool = True, 45) -> pd.DataFrame: 46 """Run evaluation for instance segmentation predictions. 47 48 Args: 49 gt_paths: The list of paths to ground-truth images. 50 prediction_paths: The list of paths with the instance segmentations to evaluate. 51 save_path: Optional path for saving the results. 52 verbose: Whether to print the progress. 53 54 Returns: 55 A DataFrame that contains the evaluation results. 56 """ 57 assert len(gt_paths) == len(prediction_paths) 58 # if a save_path is given and it already exists then just load it instead of running the eval 59 if save_path is not None and os.path.exists(save_path): 60 return pd.read_csv(save_path) 61 62 msas, sa50s, sa75s = _run_evaluation(gt_paths, prediction_paths, verbose=verbose) 63 64 results = pd.DataFrame.from_dict({ 65 "mSA": [np.mean(msas)], "SA50": [np.mean(sa50s)], "SA75": [np.mean(sa75s)], 66 }) 67 68 if save_path is not None: 69 os.makedirs(Path(save_path).parent, exist_ok=True) 70 results.to_csv(save_path, index=False) 71 72 return results 73 74 75def run_evaluation_for_iterative_prompting( 76 gt_paths: List[Union[os.PathLike, str]], 77 prediction_root: Union[os.PathLike, str], 78 experiment_folder: Union[os.PathLike, str], 79 start_with_box_prompt: bool = False, 80 overwrite_results: bool = False, 81 use_masks: bool = False, 82) -> pd.DataFrame: 83 """Run evaluation for iterative prompt-based segmentation predictions. 84 85 Args: 86 gt_paths: The list of paths to ground-truth images. 87 prediction_root: The folder with the iterative prompt-based instance segmentations to evaluate. 88 experiment_folder: The folder where all the experiment results are stored. 89 start_with_box_prompt: Whether to evaluate on experiments with iterative prompting starting with box. 90 overwrite_results: Whether to overwrite the results to update them with the new evaluation run. 91 use_masks: Whether to use masks for iterative prompting. 92 93 Returns: 94 A DataFrame that contains the evaluation results. 95 """ 96 assert os.path.exists(prediction_root), prediction_root 97 98 # Save the results in the experiment folder 99 result_folder = os.path.join( 100 experiment_folder, "results", "iterative_prompting_" + ("with" if use_masks else "without") + "_mask" 101 ) 102 os.makedirs(result_folder, exist_ok=True) 103 104 csv_path = os.path.join( 105 result_folder, 106 "iterative_prompts_start_box.csv" if start_with_box_prompt else "iterative_prompts_start_point.csv" 107 ) 108 109 # Overwrite the previously saved results 110 if overwrite_results and os.path.exists(csv_path): 111 os.remove(csv_path) 112 113 # If the results have been computed already, it's not needed to re-run it again. 114 if os.path.exists(csv_path): 115 print(f"Results with iterative prompting for interactive segmentation are already stored at '{csv_path}'.") 116 return 117 118 list_of_results = [] 119 prediction_folders = sorted(glob(os.path.join(prediction_root, "iteration*"))) 120 for pred_folder in prediction_folders: 121 print("Evaluating", os.path.split(pred_folder)[-1]) 122 pred_paths = sorted(glob(os.path.join(pred_folder, "*"))) 123 result = run_evaluation(gt_paths=gt_paths, prediction_paths=pred_paths, save_path=None) 124 list_of_results.append(result) 125 126 res_df = pd.concat(list_of_results, ignore_index=True) 127 res_df.to_csv(csv_path)
def
run_evaluation( gt_paths: List[Union[str, os.PathLike]], prediction_paths: List[Union[str, os.PathLike]], save_path: Union[str, os.PathLike, NoneType] = None, verbose: bool = True) -> pandas.core.frame.DataFrame:
41def run_evaluation( 42 gt_paths: List[Union[os.PathLike, str]], 43 prediction_paths: List[Union[os.PathLike, str]], 44 save_path: Optional[Union[os.PathLike, str]] = None, 45 verbose: bool = True, 46) -> pd.DataFrame: 47 """Run evaluation for instance segmentation predictions. 48 49 Args: 50 gt_paths: The list of paths to ground-truth images. 51 prediction_paths: The list of paths with the instance segmentations to evaluate. 52 save_path: Optional path for saving the results. 53 verbose: Whether to print the progress. 54 55 Returns: 56 A DataFrame that contains the evaluation results. 57 """ 58 assert len(gt_paths) == len(prediction_paths) 59 # if a save_path is given and it already exists then just load it instead of running the eval 60 if save_path is not None and os.path.exists(save_path): 61 return pd.read_csv(save_path) 62 63 msas, sa50s, sa75s = _run_evaluation(gt_paths, prediction_paths, verbose=verbose) 64 65 results = pd.DataFrame.from_dict({ 66 "mSA": [np.mean(msas)], "SA50": [np.mean(sa50s)], "SA75": [np.mean(sa75s)], 67 }) 68 69 if save_path is not None: 70 os.makedirs(Path(save_path).parent, exist_ok=True) 71 results.to_csv(save_path, index=False) 72 73 return results
Run evaluation for instance segmentation predictions.
Arguments:
- gt_paths: The list of paths to ground-truth images.
- prediction_paths: The list of paths with the instance segmentations to evaluate.
- save_path: Optional path for saving the results.
- verbose: Whether to print the progress.
Returns:
A DataFrame that contains the evaluation results.
def
run_evaluation_for_iterative_prompting( gt_paths: List[Union[str, os.PathLike]], prediction_root: Union[os.PathLike, str], experiment_folder: Union[os.PathLike, str], start_with_box_prompt: bool = False, overwrite_results: bool = False, use_masks: bool = False) -> pandas.core.frame.DataFrame:
76def run_evaluation_for_iterative_prompting( 77 gt_paths: List[Union[os.PathLike, str]], 78 prediction_root: Union[os.PathLike, str], 79 experiment_folder: Union[os.PathLike, str], 80 start_with_box_prompt: bool = False, 81 overwrite_results: bool = False, 82 use_masks: bool = False, 83) -> pd.DataFrame: 84 """Run evaluation for iterative prompt-based segmentation predictions. 85 86 Args: 87 gt_paths: The list of paths to ground-truth images. 88 prediction_root: The folder with the iterative prompt-based instance segmentations to evaluate. 89 experiment_folder: The folder where all the experiment results are stored. 90 start_with_box_prompt: Whether to evaluate on experiments with iterative prompting starting with box. 91 overwrite_results: Whether to overwrite the results to update them with the new evaluation run. 92 use_masks: Whether to use masks for iterative prompting. 93 94 Returns: 95 A DataFrame that contains the evaluation results. 96 """ 97 assert os.path.exists(prediction_root), prediction_root 98 99 # Save the results in the experiment folder 100 result_folder = os.path.join( 101 experiment_folder, "results", "iterative_prompting_" + ("with" if use_masks else "without") + "_mask" 102 ) 103 os.makedirs(result_folder, exist_ok=True) 104 105 csv_path = os.path.join( 106 result_folder, 107 "iterative_prompts_start_box.csv" if start_with_box_prompt else "iterative_prompts_start_point.csv" 108 ) 109 110 # Overwrite the previously saved results 111 if overwrite_results and os.path.exists(csv_path): 112 os.remove(csv_path) 113 114 # If the results have been computed already, it's not needed to re-run it again. 115 if os.path.exists(csv_path): 116 print(f"Results with iterative prompting for interactive segmentation are already stored at '{csv_path}'.") 117 return 118 119 list_of_results = [] 120 prediction_folders = sorted(glob(os.path.join(prediction_root, "iteration*"))) 121 for pred_folder in prediction_folders: 122 print("Evaluating", os.path.split(pred_folder)[-1]) 123 pred_paths = sorted(glob(os.path.join(pred_folder, "*"))) 124 result = run_evaluation(gt_paths=gt_paths, prediction_paths=pred_paths, save_path=None) 125 list_of_results.append(result) 126 127 res_df = pd.concat(list_of_results, ignore_index=True) 128 res_df.to_csv(csv_path)
Run evaluation for iterative prompt-based segmentation predictions.
Arguments:
- gt_paths: The list of paths to ground-truth images.
- prediction_root: The folder with the iterative prompt-based instance segmentations to evaluate.
- experiment_folder: The folder where all the experiment results are stored.
- start_with_box_prompt: Whether to evaluate on experiments with iterative prompting starting with box.
- overwrite_results: Whether to overwrite the results to update them with the new evaluation run.
- use_masks: Whether to use masks for iterative prompting.
Returns:
A DataFrame that contains the evaluation results.