micro_sam.evaluation.evaluation

Evaluation functionality for segmentation predictions from micro_sam.evaluation.automatic_mask_generation and micro_sam.evaluation.inference.

  1"""Evaluation functionality for segmentation predictions from `micro_sam.evaluation.automatic_mask_generation`
  2and `micro_sam.evaluation.inference`.
  3"""
  4
  5import os
  6from glob import glob
  7from tqdm import tqdm
  8from pathlib import Path
  9from typing import List, Optional, Union
 10
 11import numpy as np
 12import pandas as pd
 13import imageio.v3 as imageio
 14from skimage.measure import label
 15
 16from elf.evaluation import mean_segmentation_accuracy
 17
 18
 19def _run_evaluation(gt_paths, prediction_paths, verbose=True):
 20    assert len(gt_paths) == len(prediction_paths)
 21    msas, sa50s, sa75s = [], [], []
 22
 23    for gt_path, pred_path in tqdm(
 24        zip(gt_paths, prediction_paths), desc="Evaluate predictions", total=len(gt_paths), disable=not verbose
 25    ):
 26        assert os.path.exists(gt_path), gt_path
 27        assert os.path.exists(pred_path), pred_path
 28
 29        gt = imageio.imread(gt_path)
 30        gt = label(gt)
 31        pred = imageio.imread(pred_path)
 32
 33        msa, scores = mean_segmentation_accuracy(pred, gt, return_accuracies=True)
 34        sa50, sa75 = scores[0], scores[5]
 35        msas.append(msa), sa50s.append(sa50), sa75s.append(sa75)
 36
 37    return msas, sa50s, sa75s
 38
 39
 40def run_evaluation(
 41    gt_paths: List[Union[os.PathLike, str]],
 42    prediction_paths: List[Union[os.PathLike, str]],
 43    save_path: Optional[Union[os.PathLike, str]] = None,
 44    verbose: bool = True,
 45) -> pd.DataFrame:
 46    """Run evaluation for instance segmentation predictions.
 47
 48    Args:
 49        gt_paths: The list of paths to ground-truth images.
 50        prediction_paths: The list of paths with the instance segmentations to evaluate.
 51        save_path: Optional path for saving the results.
 52        verbose: Whether to print the progress.
 53
 54    Returns:
 55        A DataFrame that contains the evaluation results.
 56    """
 57    assert len(gt_paths) == len(prediction_paths)
 58    # if a save_path is given and it already exists then just load it instead of running the eval
 59    if save_path is not None and os.path.exists(save_path):
 60        return pd.read_csv(save_path)
 61
 62    msas, sa50s, sa75s = _run_evaluation(gt_paths, prediction_paths, verbose=verbose)
 63
 64    results = pd.DataFrame.from_dict({
 65        "mSA": [np.mean(msas)], "SA50": [np.mean(sa50s)], "SA75": [np.mean(sa75s)],
 66    })
 67
 68    if save_path is not None:
 69        os.makedirs(Path(save_path).parent, exist_ok=True)
 70        results.to_csv(save_path, index=False)
 71
 72    return results
 73
 74
 75def run_evaluation_for_iterative_prompting(
 76    gt_paths: List[Union[os.PathLike, str]],
 77    prediction_root: Union[os.PathLike, str],
 78    experiment_folder: Union[os.PathLike, str],
 79    start_with_box_prompt: bool = False,
 80    overwrite_results: bool = False,
 81) -> pd.DataFrame:
 82    """Run evaluation for iterative prompt-based segmentation predictions.
 83
 84    Args:
 85        gt_paths: The list of paths to ground-truth images.
 86        prediction_root: The folder with the iterative prompt-based instance segmentations to evaluate.
 87        experiment_folder: The folder where all the experiment results are stored.
 88        start_with_box_prompt: Whether to evaluate on experiments with iterative prompting starting with box.
 89        overwrite_results: Whether to overwrite the results to update them with the new evaluation run.
 90
 91    Returns:
 92        A DataFrame that contains the evaluation results.
 93    """
 94    assert os.path.exists(prediction_root), prediction_root
 95
 96    # Save the results in the experiment folder
 97    result_folder = os.path.join(experiment_folder, "results")
 98    os.makedirs(result_folder, exist_ok=True)
 99
100    csv_path = os.path.join(
101        result_folder,
102        "iterative_prompts_start_box.csv" if start_with_box_prompt else "iterative_prompts_start_point.csv"
103    )
104
105    # Overwrite the previously saved results
106    if overwrite_results and os.path.exists(csv_path):
107        os.remove(csv_path)
108
109    # If the results have been computed already, it's not needed to re-run it again.
110    if os.path.exists(csv_path):
111        print(f"Results with iterative prompting for interactive segmentation are already stored at '{csv_path}'.")
112        return
113
114    list_of_results = []
115    prediction_folders = sorted(glob(os.path.join(prediction_root, "iteration*")))
116    for pred_folder in prediction_folders:
117        print("Evaluating", os.path.split(pred_folder)[-1])
118        pred_paths = sorted(glob(os.path.join(pred_folder, "*")))
119        result = run_evaluation(gt_paths=gt_paths, prediction_paths=pred_paths, save_path=None)
120        list_of_results.append(result)
121
122    res_df = pd.concat(list_of_results, ignore_index=True)
123    res_df.to_csv(csv_path)
def run_evaluation( gt_paths: List[Union[str, os.PathLike]], prediction_paths: List[Union[str, os.PathLike]], save_path: Union[str, os.PathLike, NoneType] = None, verbose: bool = True) -> pandas.core.frame.DataFrame:
41def run_evaluation(
42    gt_paths: List[Union[os.PathLike, str]],
43    prediction_paths: List[Union[os.PathLike, str]],
44    save_path: Optional[Union[os.PathLike, str]] = None,
45    verbose: bool = True,
46) -> pd.DataFrame:
47    """Run evaluation for instance segmentation predictions.
48
49    Args:
50        gt_paths: The list of paths to ground-truth images.
51        prediction_paths: The list of paths with the instance segmentations to evaluate.
52        save_path: Optional path for saving the results.
53        verbose: Whether to print the progress.
54
55    Returns:
56        A DataFrame that contains the evaluation results.
57    """
58    assert len(gt_paths) == len(prediction_paths)
59    # if a save_path is given and it already exists then just load it instead of running the eval
60    if save_path is not None and os.path.exists(save_path):
61        return pd.read_csv(save_path)
62
63    msas, sa50s, sa75s = _run_evaluation(gt_paths, prediction_paths, verbose=verbose)
64
65    results = pd.DataFrame.from_dict({
66        "mSA": [np.mean(msas)], "SA50": [np.mean(sa50s)], "SA75": [np.mean(sa75s)],
67    })
68
69    if save_path is not None:
70        os.makedirs(Path(save_path).parent, exist_ok=True)
71        results.to_csv(save_path, index=False)
72
73    return results

Run evaluation for instance segmentation predictions.

Arguments:
  • gt_paths: The list of paths to ground-truth images.
  • prediction_paths: The list of paths with the instance segmentations to evaluate.
  • save_path: Optional path for saving the results.
  • verbose: Whether to print the progress.
Returns:

A DataFrame that contains the evaluation results.

def run_evaluation_for_iterative_prompting( gt_paths: List[Union[str, os.PathLike]], prediction_root: Union[os.PathLike, str], experiment_folder: Union[os.PathLike, str], start_with_box_prompt: bool = False, overwrite_results: bool = False) -> pandas.core.frame.DataFrame:
 76def run_evaluation_for_iterative_prompting(
 77    gt_paths: List[Union[os.PathLike, str]],
 78    prediction_root: Union[os.PathLike, str],
 79    experiment_folder: Union[os.PathLike, str],
 80    start_with_box_prompt: bool = False,
 81    overwrite_results: bool = False,
 82) -> pd.DataFrame:
 83    """Run evaluation for iterative prompt-based segmentation predictions.
 84
 85    Args:
 86        gt_paths: The list of paths to ground-truth images.
 87        prediction_root: The folder with the iterative prompt-based instance segmentations to evaluate.
 88        experiment_folder: The folder where all the experiment results are stored.
 89        start_with_box_prompt: Whether to evaluate on experiments with iterative prompting starting with box.
 90        overwrite_results: Whether to overwrite the results to update them with the new evaluation run.
 91
 92    Returns:
 93        A DataFrame that contains the evaluation results.
 94    """
 95    assert os.path.exists(prediction_root), prediction_root
 96
 97    # Save the results in the experiment folder
 98    result_folder = os.path.join(experiment_folder, "results")
 99    os.makedirs(result_folder, exist_ok=True)
100
101    csv_path = os.path.join(
102        result_folder,
103        "iterative_prompts_start_box.csv" if start_with_box_prompt else "iterative_prompts_start_point.csv"
104    )
105
106    # Overwrite the previously saved results
107    if overwrite_results and os.path.exists(csv_path):
108        os.remove(csv_path)
109
110    # If the results have been computed already, it's not needed to re-run it again.
111    if os.path.exists(csv_path):
112        print(f"Results with iterative prompting for interactive segmentation are already stored at '{csv_path}'.")
113        return
114
115    list_of_results = []
116    prediction_folders = sorted(glob(os.path.join(prediction_root, "iteration*")))
117    for pred_folder in prediction_folders:
118        print("Evaluating", os.path.split(pred_folder)[-1])
119        pred_paths = sorted(glob(os.path.join(pred_folder, "*")))
120        result = run_evaluation(gt_paths=gt_paths, prediction_paths=pred_paths, save_path=None)
121        list_of_results.append(result)
122
123    res_df = pd.concat(list_of_results, ignore_index=True)
124    res_df.to_csv(csv_path)

Run evaluation for iterative prompt-based segmentation predictions.

Arguments:
  • gt_paths: The list of paths to ground-truth images.
  • prediction_root: The folder with the iterative prompt-based instance segmentations to evaluate.
  • experiment_folder: The folder where all the experiment results are stored.
  • start_with_box_prompt: Whether to evaluate on experiments with iterative prompting starting with box.
  • overwrite_results: Whether to overwrite the results to update them with the new evaluation run.
Returns:

A DataFrame that contains the evaluation results.