micro_sam.multi_dimensional_segmentation

Multi-dimensional segmentation with segment anything.

  1"""Multi-dimensional segmentation with segment anything.
  2"""
  3
  4import os
  5import multiprocessing as mp
  6from concurrent import futures
  7from typing import Dict, List, Optional, Union, Tuple
  8
  9import networkx as nx
 10import numpy as np
 11import torch
 12from scipy.ndimage import binary_closing
 13from skimage.measure import label, regionprops
 14from skimage.segmentation import relabel_sequential
 15
 16import nifty
 17
 18import elf.segmentation as seg_utils
 19import elf.tracking.tracking_utils as track_utils
 20from elf.tracking.motile_tracking import recolor_segmentation
 21
 22from segment_anything.predictor import SamPredictor
 23
 24try:
 25    from napari.utils import progress as tqdm
 26except ImportError:
 27    from tqdm import tqdm
 28
 29try:
 30    from trackastra.model import Trackastra
 31    from trackastra.tracking import graph_to_napari_tracks
 32except ImportError:
 33    Trackastra = None
 34
 35from . import util
 36from .prompt_based_segmentation import segment_from_mask
 37from .instance_segmentation import AMGBase, mask_data_to_segmentation
 38
 39
 40PROJECTION_MODES = ("box", "mask", "points", "points_and_mask", "single_point")
 41
 42
 43def _validate_projection(projection):
 44    use_single_point = False
 45    if isinstance(projection, str):
 46        if projection == "mask":
 47            use_box, use_mask, use_points = True, True, False
 48        elif projection == "points":
 49            use_box, use_mask, use_points = False, False, True
 50        elif projection == "box":
 51            use_box, use_mask, use_points = True, False, False
 52        elif projection == "points_and_mask":
 53            use_box, use_mask, use_points = False, True, True
 54        elif projection == "single_point":
 55            use_box, use_mask, use_points = False, False, True
 56            use_single_point = True
 57        else:
 58            raise ValueError(
 59                "Choose projection method from 'mask' / 'points' / 'box' / 'points_and_mask' / 'single_point'. "
 60                f"You have passed the invalid option {projection}."
 61            )
 62    elif isinstance(projection, dict):
 63        assert len(projection.keys()) == 3, "There should be three parameters assigned for the projection method."
 64        use_box, use_mask, use_points = projection["use_box"], projection["use_mask"], projection["use_points"]
 65    else:
 66        raise ValueError(f"{projection} is not a supported projection method.")
 67    return use_box, use_mask, use_points, use_single_point
 68
 69
 70# Advanced stopping criterions.
 71# In practice these did not make a big difference, so we do not use this at the moment.
 72# We still leave it here for reference.
 73def _advanced_stopping_criteria(
 74    z, seg_z, seg_prev, z_start, z_increment, segmentation, criterion_choice, score, increment
 75):
 76    def _compute_mean_iou_for_n_slices(z, increment, seg_z, n_slices):
 77        iou_list = [
 78            util.compute_iou(segmentation[z - increment * _slice], seg_z) for _slice in range(1, n_slices+1)
 79        ]
 80        return np.mean(iou_list)
 81
 82    if criterion_choice == 1:
 83        # 1. current metric: iou of current segmentation and the previous slice
 84        iou = util.compute_iou(seg_prev, seg_z)
 85        criterion = iou
 86
 87    elif criterion_choice == 2:
 88        # 2. combining SAM iou + iou: curr. slice & first segmented slice + iou: curr. slice vs prev. slice
 89        iou = util.compute_iou(seg_prev, seg_z)
 90        ff_iou = util.compute_iou(segmentation[z_start], seg_z)
 91        criterion = 0.5 * iou + 0.3 * score + 0.2 * ff_iou
 92
 93    elif criterion_choice == 3:
 94        # 3. iou of current segmented slice w.r.t the previous n slices
 95        criterion = _compute_mean_iou_for_n_slices(z, increment, seg_z, min(5, abs(z - z_start)))
 96
 97    return criterion
 98
 99
100def segment_mask_in_volume(
101    segmentation: np.ndarray,
102    predictor: SamPredictor,
103    image_embeddings: util.ImageEmbeddings,
104    segmented_slices: np.ndarray,
105    stop_lower: bool,
106    stop_upper: bool,
107    iou_threshold: float,
108    projection: Union[str, dict],
109    update_progress: Optional[callable] = None,
110    box_extension: float = 0.0,
111    verbose: bool = False,
112) -> Tuple[np.ndarray, Tuple[int, int]]:
113    """Segment an object mask in in volumetric data.
114
115    Args:
116        segmentation: The initial segmentation for the object.
117        predictor: The segment anything predictor.
118        image_embeddings: The precomputed image embeddings for the volume.
119        segmented_slices: List of slices for which this object has already been segmented.
120        stop_lower: Whether to stop at the lowest segmented slice.
121        stop_upper: Wheter to stop at the topmost segmented slice.
122        iou_threshold: The IOU threshold for continuing segmentation across 3d.
123        projection: The projection method to use. One of 'box', 'mask', 'points', 'points_and_mask' or 'single point'.
124            Pass a dictionary to choose the excact combination of projection modes.
125        update_progress: Callback to update an external progress bar.
126        box_extension: Extension factor for increasing the box size after projection.
127        verbose: Whether to print details about the segmentation steps.
128
129    Returns:
130        Array with the volumetric segmentation.
131        Tuple with the first and last segmented slice.
132    """
133    use_box, use_mask, use_points, use_single_point = _validate_projection(projection)
134
135    if update_progress is None:
136        def update_progress(*args):
137            pass
138
139    def segment_range(z_start, z_stop, increment, stopping_criterion, threshold=None, verbose=False):
140        z = z_start + increment
141        while True:
142            if verbose:
143                print(f"Segment {z_start} to {z_stop}: segmenting slice {z}")
144            seg_prev = segmentation[z - increment]
145            seg_z, score, _ = segment_from_mask(
146                predictor, seg_prev, image_embeddings=image_embeddings, i=z, use_mask=use_mask,
147                use_box=use_box, use_points=use_points, box_extension=box_extension, return_all=True,
148                use_single_point=use_single_point,
149            )
150            if threshold is not None:
151                iou = util.compute_iou(seg_prev, seg_z)
152                if iou < threshold:
153                    if verbose:
154                        msg = f"Segmentation stopped at slice {z} due to IOU {iou} < {threshold}."
155                        print(msg)
156                    break
157
158            segmentation[z] = seg_z
159            z += increment
160            if stopping_criterion(z, z_stop):
161                if verbose:
162                    print(f"Segment {z_start} to {z_stop}: stop at slice {z}")
163                break
164            update_progress(1)
165
166        return z - increment
167
168    z0, z1 = int(segmented_slices.min()), int(segmented_slices.max())
169
170    # segment below the min slice
171    if z0 > 0 and not stop_lower:
172        z_min = segment_range(z0, 0, -1, np.less, iou_threshold, verbose=verbose)
173    else:
174        z_min = z0
175
176    # segment above the max slice
177    if z1 < segmentation.shape[0] - 1 and not stop_upper:
178        z_max = segment_range(z1, segmentation.shape[0] - 1, 1, np.greater, iou_threshold, verbose=verbose)
179    else:
180        z_max = z1
181
182    # segment in between min and max slice
183    if z0 != z1:
184        for z_start, z_stop in zip(segmented_slices[:-1], segmented_slices[1:]):
185            slice_diff = z_stop - z_start
186            z_mid = int((z_start + z_stop) // 2)
187
188            if slice_diff == 1:  # the slices are adjacent -> we don't need to do anything
189                pass
190
191            elif z_start == z0 and stop_lower:  # the lower slice is stop: we just segment from upper
192                segment_range(z_stop, z_start, -1, np.less_equal, verbose=verbose)
193
194            elif z_stop == z1 and stop_upper:  # the upper slice is stop: we just segment from lower
195                segment_range(z_start, z_stop, 1, np.greater_equal, verbose=verbose)
196
197            elif slice_diff == 2:  # there is only one slice in between -> use combined mask
198                z = z_start + 1
199                seg_prompt = np.logical_or(segmentation[z_start] == 1, segmentation[z_stop] == 1)
200                segmentation[z] = segment_from_mask(
201                    predictor, seg_prompt, image_embeddings=image_embeddings, i=z,
202                    use_mask=use_mask, use_box=use_box, use_points=use_points,
203                    box_extension=box_extension
204                )
205                update_progress(1)
206
207            else:  # there is a range of more than 2 slices in between -> segment ranges
208                # segment from bottom
209                segment_range(
210                    z_start, z_mid, 1, np.greater_equal if slice_diff % 2 == 0 else np.greater, verbose=verbose
211                )
212                # segment from top
213                segment_range(z_stop, z_mid, -1, np.less_equal, verbose=verbose)
214                # if the difference between start and stop is even,
215                # then we have a slice in the middle that is the same distance from top bottom
216                # in this case the slice is not segmented in the ranges above, and we segment it
217                # using the combined mask from the adjacent top and bottom slice as prompt
218                if slice_diff % 2 == 0:
219                    seg_prompt = np.logical_or(segmentation[z_mid - 1] == 1, segmentation[z_mid + 1] == 1)
220                    segmentation[z_mid] = segment_from_mask(
221                        predictor, seg_prompt, image_embeddings=image_embeddings, i=z_mid,
222                        use_mask=use_mask, use_box=use_box, use_points=use_points,
223                        box_extension=box_extension
224                    )
225                    update_progress(1)
226
227    return segmentation, (z_min, z_max)
228
229
230def _preprocess_closing(slice_segmentation, gap_closing, pbar_update):
231    binarized = slice_segmentation > 0
232    # Use a structuring element that only closes elements in z, to avoid merging objects in-plane.
233    structuring_element = np.zeros((3, 1, 1))
234    structuring_element[:, 0, 0] = 1
235    closed_segmentation = binary_closing(binarized, iterations=gap_closing, structure=structuring_element)
236
237    new_segmentation = np.zeros_like(slice_segmentation)
238    n_slices = new_segmentation.shape[0]
239
240    def process_slice(z, offset):
241        seg_z = slice_segmentation[z]
242
243        # Closing does not work for the first and last gap slices
244        if z < gap_closing or z >= (n_slices - gap_closing):
245            seg_z, _, _ = relabel_sequential(seg_z, offset=offset)
246            offset = int(seg_z.max()) + 1
247            return seg_z, offset
248
249        # Apply connected components to the closed segmentation.
250        closed_z = label(closed_segmentation[z])
251
252        # Map objects in the closed and initial segmentation.
253        # We take objects from the closed segmentation unless they
254        # have overlap with more than one object from the initial segmentation.
255        # This indicates wrong merging of closeby objects that we want to prevent.
256        matches = nifty.ground_truth.overlap(closed_z, seg_z)
257        matches = {
258            seg_id: matches.overlapArrays(seg_id, sorted=False)[0] for seg_id in range(1, int(closed_z.max() + 1))
259        }
260        matches = {k: v[v != 0] for k, v in matches.items()}
261
262        ids_initial, ids_closed = [], []
263        for seg_id, matched in matches.items():
264            if len(matched) > 1:
265                ids_initial.extend(matched.tolist())
266            else:
267                ids_closed.append(seg_id)
268
269        seg_new = np.zeros_like(seg_z)
270        closed_mask = np.isin(closed_z, ids_closed)
271        seg_new[closed_mask] = closed_z[closed_mask]
272
273        if ids_initial:
274            initial_mask = np.isin(seg_z, ids_initial)
275            seg_new[initial_mask] = relabel_sequential(seg_z[initial_mask], offset=seg_new.max() + 1)[0]
276
277        seg_new, _, _ = relabel_sequential(seg_new, offset=offset)
278        max_z = seg_new.max()
279        if max_z > 0:
280            offset = int(max_z) + 1
281
282        return seg_new, offset
283
284    # Further optimization: parallelize
285    offset = 1
286    for z in range(n_slices):
287        new_segmentation[z], offset = process_slice(z, offset)
288        pbar_update(1)
289
290    return new_segmentation
291
292
293def merge_instance_segmentation_3d(
294    slice_segmentation: np.ndarray,
295    beta: float = 0.5,
296    with_background: bool = True,
297    gap_closing: Optional[int] = None,
298    min_z_extent: Optional[int] = None,
299    verbose: bool = True,
300    pbar_init: Optional[callable] = None,
301    pbar_update: Optional[callable] = None,
302) -> np.ndarray:
303    """Merge stacked 2d instance segmentations into a consistent 3d segmentation.
304
305    Solves a multicut problem based on the overlap of objects to merge across z.
306
307    Args:
308        slice_segmentation: The stacked segmentation across the slices.
309            We assume that the segmentation is labeled consecutive across z.
310        beta: The bias term for the multicut. Higher values lead to a larger
311            degree of over-segmentation and vice versa.
312        with_background: Whether this is a segmentation problem with background.
313            In that case all edges connecting to the background are set to be repulsive.
314        gap_closing: If given, gaps in the segmentation are closed with a binary closing
315            operation. The value is used to determine the number of iterations for the closing.
316        min_z_extent: Require a minimal extent in z for the segmented objects.
317            This can help to prevent segmentation artifacts.
318        verbose: Verbosity flag.
319        pbar_init: Callback to initialize an external progress bar. Must accept number of steps and description.
320            Can be used together with pbar_update to handle napari progress bar in other thread.
321            To enables using this function within a threadworker.
322        pbar_update: Callback to update an external progress bar.
323
324    Returns:
325        The merged segmentation.
326    """
327    _, pbar_init, pbar_update, pbar_close = util.handle_pbar(verbose, pbar_init, pbar_update)
328
329    if gap_closing is not None and gap_closing > 0:
330        pbar_init(slice_segmentation.shape[0] + 1, "Merge segmentation")
331        slice_segmentation = _preprocess_closing(slice_segmentation, gap_closing, pbar_update)
332    else:
333        pbar_init(1, "Merge segmentation")
334
335    # Extract the overlap between slices.
336    edges = track_utils.compute_edges_from_overlap(slice_segmentation, verbose=False)
337
338    uv_ids = np.array([[edge["source"], edge["target"]] for edge in edges])
339    overlaps = np.array([edge["score"] for edge in edges])
340
341    n_nodes = int(slice_segmentation.max() + 1)
342    graph = nifty.graph.undirectedGraph(n_nodes)
343    graph.insertEdges(uv_ids)
344
345    costs = seg_utils.multicut.compute_edge_costs(overlaps)
346    # set background weights to be maximally repulsive
347    if with_background:
348        bg_edges = (uv_ids == 0).any(axis=1)
349        costs[bg_edges] = -8.0
350
351    node_labels = seg_utils.multicut.multicut_decomposition(graph, 1.0 - costs, beta=beta)
352
353    segmentation = nifty.tools.take(node_labels, slice_segmentation)
354
355    if min_z_extent is not None and min_z_extent > 0:
356        props = regionprops(segmentation)
357        filter_ids = []
358        for prop in props:
359            box = prop.bbox
360            z_extent = box[3] - box[0]
361            if z_extent < min_z_extent:
362                filter_ids.append(prop.label)
363        if filter_ids:
364            segmentation[np.isin(segmentation, filter_ids)] = 0
365
366    pbar_update(1)
367    pbar_close()
368
369    return segmentation
370
371
372def _segment_slices(
373    data, predictor, segmentor, embedding_path, verbose, tile_shape, halo, with_background=True, batch_size=1, **kwargs
374):
375    assert data.ndim == 3
376
377    min_object_size = kwargs.pop("min_object_size", 0)
378    image_embeddings = util.precompute_image_embeddings(
379        predictor=predictor,
380        input_=data,
381        save_path=embedding_path,
382        ndim=3,
383        tile_shape=tile_shape,
384        halo=halo,
385        verbose=verbose,
386        batch_size=batch_size,
387    )
388
389    offset = 0
390    segmentation = np.zeros(data.shape, dtype="uint32")
391
392    for i in tqdm(range(segmentation.shape[0]), desc="Segment slices", disable=not verbose):
393        segmentor.initialize(data[i], image_embeddings=image_embeddings, verbose=False, i=i)
394        seg = segmentor.generate(**kwargs)
395
396        if isinstance(seg, list) and len(seg) == 0:
397            continue
398        else:
399            if isinstance(seg, list):
400                seg = mask_data_to_segmentation(
401                    seg, with_background=with_background, min_object_size=min_object_size
402                )
403
404            # Set offset for instance per slice.
405            max_z = seg.max()
406            if max_z == 0:
407                continue
408            seg[seg != 0] += offset
409            offset = max_z + offset
410
411        segmentation[i] = seg
412
413    return segmentation, image_embeddings
414
415
416def automatic_3d_segmentation(
417    volume: np.ndarray,
418    predictor: SamPredictor,
419    segmentor: AMGBase,
420    embedding_path: Optional[Union[str, os.PathLike]] = None,
421    with_background: bool = True,
422    gap_closing: Optional[int] = None,
423    min_z_extent: Optional[int] = None,
424    tile_shape: Optional[Tuple[int, int]] = None,
425    halo: Optional[Tuple[int, int]] = None,
426    verbose: bool = True,
427    return_embeddings: bool = False,
428    batch_size: int = 1,
429    **kwargs,
430) -> np.ndarray:
431    """Automatically segment objects in a volume.
432
433    First segments slices individually in 2d and then merges them across 3d
434    based on overlap of objects between slices.
435
436    Args:
437        volume: The input volume.
438        predictor: The SAM model.
439        segmentor: The instance segmentation class.
440        embedding_path: The path to save pre-computed embeddings.
441        with_background: Whether the segmentation has background.
442        gap_closing: If given, gaps in the segmentation are closed with a binary closing
443            operation. The value is used to determine the number of iterations for the closing.
444        min_z_extent: Require a minimal extent in z for the segmented objects.
445            This can help to prevent segmentation artifacts.
446        tile_shape: Shape of the tiles for tiled prediction. By default prediction is run without tiling.
447        halo: Overlap of the tiles for tiled prediction.
448        verbose: Verbosity flag.
449        return_embeddings: Whether to return the precomputed image embeddings.
450        batch_size: The batch size to compute image embeddings over planes.
451        kwargs: Keyword arguments for the 'generate' method of the 'segmentor'.
452
453    Returns:
454        The segmentation.
455    """
456    segmentation, image_embeddings = _segment_slices(
457        volume, predictor, segmentor, embedding_path, verbose,
458        tile_shape=tile_shape, halo=halo, with_background=with_background, **kwargs
459    )
460    segmentation = merge_instance_segmentation_3d(
461        segmentation,
462        beta=0.5,
463        with_background=with_background,
464        gap_closing=gap_closing,
465        min_z_extent=min_z_extent,
466        verbose=verbose,
467    )
468    if return_embeddings:
469        return segmentation, image_embeddings
470    else:
471        return segmentation
472
473
474def _filter_tracks(tracking_result, min_track_length):
475    props = regionprops(tracking_result)
476    discard_ids = []
477    for prop in props:
478        label_id = prop.label
479        z_start, z_stop = prop.bbox[0], prop.bbox[3]
480        if z_stop - z_start < min_track_length:
481            discard_ids.append(label_id)
482    tracking_result[np.isin(tracking_result, discard_ids)] = 0
483    tracking_result, _, _ = relabel_sequential(tracking_result)
484    return tracking_result
485
486
487def _extract_tracks_and_lineages(segmentations, track_data, parent_graph):
488    # The track data has the following layout: n_tracks x 4
489    # With the following columns:
490    # track_id - id of the track (= result from trackastra)
491    # timepoint
492    # y coordinate
493    # x coordinate
494
495    # Use the last three columns to index the segmentation and get the segmentation id.
496    index = np.round(track_data[:, 1:], 0).astype("int32")
497    index = tuple(index[:, i] for i in range(index.shape[1]))
498    segmentation_ids = segmentations[index]
499
500    # Find the mapping of nodes (= segmented objects) to track-ids.
501    track_ids = track_data[:, 0].astype("int32")
502    assert len(segmentation_ids) == len(track_ids)
503    node_to_track = {k: v for k, v in zip(segmentation_ids, track_ids)}
504
505    # Find the lineages as connected components in the parent graph.
506    # First, we build a proper graph.
507    lineage_graph = nx.Graph()
508    for k, v in parent_graph.items():
509        lineage_graph.add_edge(k, v)
510
511    # Then, find the connected components, and compute the lineage representation expected by micro-sam from it:
512    # E.g. if we have three lineages, the first consisting of three tracks and the second and third of one track each:
513    # [
514    #   {1: [2, 3]},  lineage with a dividing cell
515    #   {4: []}, lineage with just one cell
516    #   {5: []}, lineage with just one cell
517    # ]
518
519    # First, we fill the lineages which have one or more divisions, i.e. trees with more than one node.
520    lineages = []
521    for component in nx.connected_components(lineage_graph):
522        root = next(iter(component))
523        lineage_dict = {}
524
525        def dfs(node, parent):
526            # Avoid revisiting the parent node
527            children = [n for n in lineage_graph[node] if n != parent]
528            lineage_dict[node] = children
529            for child in children:
530                dfs(child, node)
531
532        dfs(root, None)
533        lineages.append(lineage_dict)
534
535    # Then add single node lineages, which are not reflected in the original graph.
536    all_tracks = set(track_ids.tolist())
537    lineage_tracks = []
538    for lineage in lineages:
539        for k, v in lineage.items():
540            lineage_tracks.append(k)
541            lineage_tracks.extend(v)
542    singleton_tracks = list(all_tracks - set(lineage_tracks))
543    lineages.extend([{track: []} for track in singleton_tracks])
544
545    # Make sure node_to_track contains everything.
546    all_seg_ids = np.unique(segmentations)
547    missing_seg_ids = np.setdiff1d(all_seg_ids, list(node_to_track.keys()))
548    node_to_track.update({seg_id: 0 for seg_id in missing_seg_ids})
549    return node_to_track, lineages
550
551
552def _filter_lineages(lineages, tracking_result):
553    track_ids = set(np.unique(tracking_result)) - {0}
554    filtered_lineages = []
555    for lineage in lineages:
556        filtered_lineage = {k: v for k, v in lineage.items() if k in track_ids}
557        if filtered_lineage:
558            filtered_lineages.append(filtered_lineage)
559    return filtered_lineages
560
561
562def _tracking_impl(timeseries, segmentation, mode, min_time_extent):
563    device = "cuda" if torch.cuda.is_available() else "cpu"
564    model = Trackastra.from_pretrained("general_2d", device=device)
565    lineage_graph = model.track(timeseries, segmentation, mode=mode)
566    track_data, parent_graph, _ = graph_to_napari_tracks(lineage_graph)
567    node_to_track, lineages = _extract_tracks_and_lineages(segmentation, track_data, parent_graph)
568    tracking_result = recolor_segmentation(segmentation, node_to_track)
569
570    # TODO
571    # We should check if trackastra supports this already.
572    # Filter out short tracks / lineages.
573    if min_time_extent is not None and min_time_extent > 0:
574        raise NotImplementedError
575
576    # Filter out pruned lineages.
577    # Mmay either be missing due to track filtering or non-consectutive track numbering in trackastra.
578    lineages = _filter_lineages(lineages, tracking_result)
579
580    return tracking_result, lineages
581
582
583def track_across_frames(
584    timeseries: np.ndarray,
585    segmentation: np.ndarray,
586    gap_closing: Optional[int] = None,
587    min_time_extent: Optional[int] = None,
588    verbose: bool = True,
589    pbar_init: Optional[callable] = None,
590    pbar_update: Optional[callable] = None,
591) -> Tuple[np.ndarray, List[Dict]]:
592    """Track segmented objects over time.
593
594    This function uses Trackastra: https://www.ecva.net/papers/eccv_2024/papers_ECCV/papers/09819.pdf
595    for tracking. Please cite it if you use the automated tracking functionality.
596
597    Args:
598        timeseries: The input timeseries of images.
599        segmentation: The segmentation. Expect segmentation results per frame
600            that are relabeled so that segmentation ids don't overlap.
601        gap_closing: If given, gaps in the segmentation are closed with a binary closing
602            operation. The value is used to determine the number of iterations for the closing.
603        min_time_extent: Require a minimal extent in time for the tracked objects.
604        verbose: Verbosity flag.
605        pbar_init: Function to initialize the progress bar.
606        pbar_update: Function to update the progress bar.
607
608    Returns:
609        The tracking result. Each object is colored by its track id.
610        The lineages, which correspond to the cell divisions. Lineages are represented by a list of dicts,
611            with each dict encoding a lineage, where keys correspond to parent track ids.
612            Each key either maps to a list with two child track ids (cell division) or to an empty list (no division).
613    """
614    _, pbar_init, pbar_update, pbar_close = util.handle_pbar(verbose, pbar_init=pbar_init, pbar_update=pbar_update)
615
616    if gap_closing is not None and gap_closing > 0:
617        segmentation = _preprocess_closing(segmentation, gap_closing, pbar_update)
618
619    segmentation, lineage = _tracking_impl(
620        np.asarray(timeseries), segmentation, mode="greedy", min_time_extent=min_time_extent
621    )
622    return segmentation, lineage
623
624
625def automatic_tracking(
626    timeseries: np.ndarray,
627    predictor: SamPredictor,
628    segmentor: AMGBase,
629    embedding_path: Optional[Union[str, os.PathLike]] = None,
630    gap_closing: Optional[int] = None,
631    min_time_extent: Optional[int] = None,
632    tile_shape: Optional[Tuple[int, int]] = None,
633    halo: Optional[Tuple[int, int]] = None,
634    verbose: bool = True,
635    **kwargs,
636) -> Tuple[np.ndarray, List[Dict]]:
637    """Automatically track objects in a timesries based on per-frame automatic segmentation.
638
639    This function uses Trackastra: https://www.ecva.net/papers/eccv_2024/papers_ECCV/papers/09819.pdf
640    for tracking. Please cite it if you use the automated tracking functionality.
641
642    Args:
643        timeseries: The input timeseries of images.
644        predictor: The SAM model.
645        segmentor: The instance segmentation class.
646        embedding_path: The path to save pre-computed embeddings.
647        gap_closing: If given, gaps in the segmentation are closed with a binary closing
648            operation. The value is used to determine the number of iterations for the closing.
649        min_time_extent: Require a minimal extent in time for the tracked objects.
650        tile_shape: Shape of the tiles for tiled prediction. By default prediction is run without tiling.
651        halo: Overlap of the tiles for tiled prediction.
652        verbose: Verbosity flag.
653        kwargs: Keyword arguments for the 'generate' method of the 'segmentor'.
654
655    Returns:
656        The tracking result. Each object is colored by its track id.
657        The lineages, which correspond to the cell divisions. Lineages are represented by a list of dicts,
658            with each dict encoding a lineage, where keys correspond to parent track ids.
659            Each key either maps to a list with two child track ids (cell division) or to an empty list (no division).
660    """
661    if Trackastra is None:
662        raise RuntimeError(
663            "Automatic tracking requires trackastra. You can install it via 'pip install trackastra'."
664        )
665    segmentation, _ = _segment_slices(
666        timeseries, predictor, segmentor, embedding_path, verbose,
667        tile_shape=tile_shape, halo=halo,
668        **kwargs,
669    )
670    segmentation, lineage = track_across_frames(
671        timeseries, segmentation, gap_closing=gap_closing, min_time_extent=min_time_extent, verbose=verbose,
672    )
673    return segmentation, lineage
674
675
676def get_napari_track_data(
677    segmentation: np.ndarray, lineages: List[Dict], n_threads: Optional[int] = None
678) -> Tuple[np.ndarray, Dict[int, List]]:
679    """Derive the inputs for the napari tracking layer from a tracking result.
680
681    Args:
682        segmentation: The segmentation, after relabeling with track ids.
683        lineages: The lineage information.
684        n_threads: Number of threads for extracting the track data from the segmentation.
685
686    Returns:
687        The array with the track data expected by napari.
688        The parent dictionary for napari.
689    """
690    if n_threads is None:
691        n_threads = mp.cpu_count()
692
693    def compute_props(t):
694        props = regionprops(segmentation[t])
695        # Create the track data representation for napari, which expects:
696        # track_id, timepoint, y, x
697        track_data = np.array([[prop.label, t] + list(prop.centroid) for prop in props])
698        return track_data
699
700    with futures.ThreadPoolExecutor(n_threads) as tp:
701        track_data = list(tp.map(compute_props, range(segmentation.shape[0])))
702    track_data = [data for data in track_data if data.size > 0]
703    track_data = np.concatenate(track_data)
704
705    # The graph representation of napari uses the children as keys and the parents as values,
706    # whereas our representation uses parents as keys and children as values.
707    # Hence, we need to translate the representation.
708    parent_graph = {
709        child: [parent] for lineage in lineages for parent, children in lineage.items() for child in children
710    }
711
712    return track_data, parent_graph
PROJECTION_MODES = ('box', 'mask', 'points', 'points_and_mask', 'single_point')
def segment_mask_in_volume( segmentation: numpy.ndarray, predictor: segment_anything.predictor.SamPredictor, image_embeddings: Dict[str, Any], segmented_slices: numpy.ndarray, stop_lower: bool, stop_upper: bool, iou_threshold: float, projection: Union[str, dict], update_progress: Optional[<built-in function callable>] = None, box_extension: float = 0.0, verbose: bool = False) -> Tuple[numpy.ndarray, Tuple[int, int]]:
101def segment_mask_in_volume(
102    segmentation: np.ndarray,
103    predictor: SamPredictor,
104    image_embeddings: util.ImageEmbeddings,
105    segmented_slices: np.ndarray,
106    stop_lower: bool,
107    stop_upper: bool,
108    iou_threshold: float,
109    projection: Union[str, dict],
110    update_progress: Optional[callable] = None,
111    box_extension: float = 0.0,
112    verbose: bool = False,
113) -> Tuple[np.ndarray, Tuple[int, int]]:
114    """Segment an object mask in in volumetric data.
115
116    Args:
117        segmentation: The initial segmentation for the object.
118        predictor: The segment anything predictor.
119        image_embeddings: The precomputed image embeddings for the volume.
120        segmented_slices: List of slices for which this object has already been segmented.
121        stop_lower: Whether to stop at the lowest segmented slice.
122        stop_upper: Wheter to stop at the topmost segmented slice.
123        iou_threshold: The IOU threshold for continuing segmentation across 3d.
124        projection: The projection method to use. One of 'box', 'mask', 'points', 'points_and_mask' or 'single point'.
125            Pass a dictionary to choose the excact combination of projection modes.
126        update_progress: Callback to update an external progress bar.
127        box_extension: Extension factor for increasing the box size after projection.
128        verbose: Whether to print details about the segmentation steps.
129
130    Returns:
131        Array with the volumetric segmentation.
132        Tuple with the first and last segmented slice.
133    """
134    use_box, use_mask, use_points, use_single_point = _validate_projection(projection)
135
136    if update_progress is None:
137        def update_progress(*args):
138            pass
139
140    def segment_range(z_start, z_stop, increment, stopping_criterion, threshold=None, verbose=False):
141        z = z_start + increment
142        while True:
143            if verbose:
144                print(f"Segment {z_start} to {z_stop}: segmenting slice {z}")
145            seg_prev = segmentation[z - increment]
146            seg_z, score, _ = segment_from_mask(
147                predictor, seg_prev, image_embeddings=image_embeddings, i=z, use_mask=use_mask,
148                use_box=use_box, use_points=use_points, box_extension=box_extension, return_all=True,
149                use_single_point=use_single_point,
150            )
151            if threshold is not None:
152                iou = util.compute_iou(seg_prev, seg_z)
153                if iou < threshold:
154                    if verbose:
155                        msg = f"Segmentation stopped at slice {z} due to IOU {iou} < {threshold}."
156                        print(msg)
157                    break
158
159            segmentation[z] = seg_z
160            z += increment
161            if stopping_criterion(z, z_stop):
162                if verbose:
163                    print(f"Segment {z_start} to {z_stop}: stop at slice {z}")
164                break
165            update_progress(1)
166
167        return z - increment
168
169    z0, z1 = int(segmented_slices.min()), int(segmented_slices.max())
170
171    # segment below the min slice
172    if z0 > 0 and not stop_lower:
173        z_min = segment_range(z0, 0, -1, np.less, iou_threshold, verbose=verbose)
174    else:
175        z_min = z0
176
177    # segment above the max slice
178    if z1 < segmentation.shape[0] - 1 and not stop_upper:
179        z_max = segment_range(z1, segmentation.shape[0] - 1, 1, np.greater, iou_threshold, verbose=verbose)
180    else:
181        z_max = z1
182
183    # segment in between min and max slice
184    if z0 != z1:
185        for z_start, z_stop in zip(segmented_slices[:-1], segmented_slices[1:]):
186            slice_diff = z_stop - z_start
187            z_mid = int((z_start + z_stop) // 2)
188
189            if slice_diff == 1:  # the slices are adjacent -> we don't need to do anything
190                pass
191
192            elif z_start == z0 and stop_lower:  # the lower slice is stop: we just segment from upper
193                segment_range(z_stop, z_start, -1, np.less_equal, verbose=verbose)
194
195            elif z_stop == z1 and stop_upper:  # the upper slice is stop: we just segment from lower
196                segment_range(z_start, z_stop, 1, np.greater_equal, verbose=verbose)
197
198            elif slice_diff == 2:  # there is only one slice in between -> use combined mask
199                z = z_start + 1
200                seg_prompt = np.logical_or(segmentation[z_start] == 1, segmentation[z_stop] == 1)
201                segmentation[z] = segment_from_mask(
202                    predictor, seg_prompt, image_embeddings=image_embeddings, i=z,
203                    use_mask=use_mask, use_box=use_box, use_points=use_points,
204                    box_extension=box_extension
205                )
206                update_progress(1)
207
208            else:  # there is a range of more than 2 slices in between -> segment ranges
209                # segment from bottom
210                segment_range(
211                    z_start, z_mid, 1, np.greater_equal if slice_diff % 2 == 0 else np.greater, verbose=verbose
212                )
213                # segment from top
214                segment_range(z_stop, z_mid, -1, np.less_equal, verbose=verbose)
215                # if the difference between start and stop is even,
216                # then we have a slice in the middle that is the same distance from top bottom
217                # in this case the slice is not segmented in the ranges above, and we segment it
218                # using the combined mask from the adjacent top and bottom slice as prompt
219                if slice_diff % 2 == 0:
220                    seg_prompt = np.logical_or(segmentation[z_mid - 1] == 1, segmentation[z_mid + 1] == 1)
221                    segmentation[z_mid] = segment_from_mask(
222                        predictor, seg_prompt, image_embeddings=image_embeddings, i=z_mid,
223                        use_mask=use_mask, use_box=use_box, use_points=use_points,
224                        box_extension=box_extension
225                    )
226                    update_progress(1)
227
228    return segmentation, (z_min, z_max)

Segment an object mask in in volumetric data.

Arguments:
  • segmentation: The initial segmentation for the object.
  • predictor: The segment anything predictor.
  • image_embeddings: The precomputed image embeddings for the volume.
  • segmented_slices: List of slices for which this object has already been segmented.
  • stop_lower: Whether to stop at the lowest segmented slice.
  • stop_upper: Wheter to stop at the topmost segmented slice.
  • iou_threshold: The IOU threshold for continuing segmentation across 3d.
  • projection: The projection method to use. One of 'box', 'mask', 'points', 'points_and_mask' or 'single point'. Pass a dictionary to choose the excact combination of projection modes.
  • update_progress: Callback to update an external progress bar.
  • box_extension: Extension factor for increasing the box size after projection.
  • verbose: Whether to print details about the segmentation steps.
Returns:

Array with the volumetric segmentation. Tuple with the first and last segmented slice.

def merge_instance_segmentation_3d( slice_segmentation: numpy.ndarray, beta: float = 0.5, with_background: bool = True, gap_closing: Optional[int] = None, min_z_extent: Optional[int] = None, verbose: bool = True, pbar_init: Optional[<built-in function callable>] = None, pbar_update: Optional[<built-in function callable>] = None) -> numpy.ndarray:
294def merge_instance_segmentation_3d(
295    slice_segmentation: np.ndarray,
296    beta: float = 0.5,
297    with_background: bool = True,
298    gap_closing: Optional[int] = None,
299    min_z_extent: Optional[int] = None,
300    verbose: bool = True,
301    pbar_init: Optional[callable] = None,
302    pbar_update: Optional[callable] = None,
303) -> np.ndarray:
304    """Merge stacked 2d instance segmentations into a consistent 3d segmentation.
305
306    Solves a multicut problem based on the overlap of objects to merge across z.
307
308    Args:
309        slice_segmentation: The stacked segmentation across the slices.
310            We assume that the segmentation is labeled consecutive across z.
311        beta: The bias term for the multicut. Higher values lead to a larger
312            degree of over-segmentation and vice versa.
313        with_background: Whether this is a segmentation problem with background.
314            In that case all edges connecting to the background are set to be repulsive.
315        gap_closing: If given, gaps in the segmentation are closed with a binary closing
316            operation. The value is used to determine the number of iterations for the closing.
317        min_z_extent: Require a minimal extent in z for the segmented objects.
318            This can help to prevent segmentation artifacts.
319        verbose: Verbosity flag.
320        pbar_init: Callback to initialize an external progress bar. Must accept number of steps and description.
321            Can be used together with pbar_update to handle napari progress bar in other thread.
322            To enables using this function within a threadworker.
323        pbar_update: Callback to update an external progress bar.
324
325    Returns:
326        The merged segmentation.
327    """
328    _, pbar_init, pbar_update, pbar_close = util.handle_pbar(verbose, pbar_init, pbar_update)
329
330    if gap_closing is not None and gap_closing > 0:
331        pbar_init(slice_segmentation.shape[0] + 1, "Merge segmentation")
332        slice_segmentation = _preprocess_closing(slice_segmentation, gap_closing, pbar_update)
333    else:
334        pbar_init(1, "Merge segmentation")
335
336    # Extract the overlap between slices.
337    edges = track_utils.compute_edges_from_overlap(slice_segmentation, verbose=False)
338
339    uv_ids = np.array([[edge["source"], edge["target"]] for edge in edges])
340    overlaps = np.array([edge["score"] for edge in edges])
341
342    n_nodes = int(slice_segmentation.max() + 1)
343    graph = nifty.graph.undirectedGraph(n_nodes)
344    graph.insertEdges(uv_ids)
345
346    costs = seg_utils.multicut.compute_edge_costs(overlaps)
347    # set background weights to be maximally repulsive
348    if with_background:
349        bg_edges = (uv_ids == 0).any(axis=1)
350        costs[bg_edges] = -8.0
351
352    node_labels = seg_utils.multicut.multicut_decomposition(graph, 1.0 - costs, beta=beta)
353
354    segmentation = nifty.tools.take(node_labels, slice_segmentation)
355
356    if min_z_extent is not None and min_z_extent > 0:
357        props = regionprops(segmentation)
358        filter_ids = []
359        for prop in props:
360            box = prop.bbox
361            z_extent = box[3] - box[0]
362            if z_extent < min_z_extent:
363                filter_ids.append(prop.label)
364        if filter_ids:
365            segmentation[np.isin(segmentation, filter_ids)] = 0
366
367    pbar_update(1)
368    pbar_close()
369
370    return segmentation

Merge stacked 2d instance segmentations into a consistent 3d segmentation.

Solves a multicut problem based on the overlap of objects to merge across z.

Arguments:
  • slice_segmentation: The stacked segmentation across the slices. We assume that the segmentation is labeled consecutive across z.
  • beta: The bias term for the multicut. Higher values lead to a larger degree of over-segmentation and vice versa.
  • with_background: Whether this is a segmentation problem with background. In that case all edges connecting to the background are set to be repulsive.
  • gap_closing: If given, gaps in the segmentation are closed with a binary closing operation. The value is used to determine the number of iterations for the closing.
  • min_z_extent: Require a minimal extent in z for the segmented objects. This can help to prevent segmentation artifacts.
  • verbose: Verbosity flag.
  • pbar_init: Callback to initialize an external progress bar. Must accept number of steps and description. Can be used together with pbar_update to handle napari progress bar in other thread. To enables using this function within a threadworker.
  • pbar_update: Callback to update an external progress bar.
Returns:

The merged segmentation.

def automatic_3d_segmentation( volume: numpy.ndarray, predictor: segment_anything.predictor.SamPredictor, segmentor: micro_sam.instance_segmentation.AMGBase, embedding_path: Union[str, os.PathLike, NoneType] = None, with_background: bool = True, gap_closing: Optional[int] = None, min_z_extent: Optional[int] = None, tile_shape: Optional[Tuple[int, int]] = None, halo: Optional[Tuple[int, int]] = None, verbose: bool = True, return_embeddings: bool = False, batch_size: int = 1, **kwargs) -> numpy.ndarray:
417def automatic_3d_segmentation(
418    volume: np.ndarray,
419    predictor: SamPredictor,
420    segmentor: AMGBase,
421    embedding_path: Optional[Union[str, os.PathLike]] = None,
422    with_background: bool = True,
423    gap_closing: Optional[int] = None,
424    min_z_extent: Optional[int] = None,
425    tile_shape: Optional[Tuple[int, int]] = None,
426    halo: Optional[Tuple[int, int]] = None,
427    verbose: bool = True,
428    return_embeddings: bool = False,
429    batch_size: int = 1,
430    **kwargs,
431) -> np.ndarray:
432    """Automatically segment objects in a volume.
433
434    First segments slices individually in 2d and then merges them across 3d
435    based on overlap of objects between slices.
436
437    Args:
438        volume: The input volume.
439        predictor: The SAM model.
440        segmentor: The instance segmentation class.
441        embedding_path: The path to save pre-computed embeddings.
442        with_background: Whether the segmentation has background.
443        gap_closing: If given, gaps in the segmentation are closed with a binary closing
444            operation. The value is used to determine the number of iterations for the closing.
445        min_z_extent: Require a minimal extent in z for the segmented objects.
446            This can help to prevent segmentation artifacts.
447        tile_shape: Shape of the tiles for tiled prediction. By default prediction is run without tiling.
448        halo: Overlap of the tiles for tiled prediction.
449        verbose: Verbosity flag.
450        return_embeddings: Whether to return the precomputed image embeddings.
451        batch_size: The batch size to compute image embeddings over planes.
452        kwargs: Keyword arguments for the 'generate' method of the 'segmentor'.
453
454    Returns:
455        The segmentation.
456    """
457    segmentation, image_embeddings = _segment_slices(
458        volume, predictor, segmentor, embedding_path, verbose,
459        tile_shape=tile_shape, halo=halo, with_background=with_background, **kwargs
460    )
461    segmentation = merge_instance_segmentation_3d(
462        segmentation,
463        beta=0.5,
464        with_background=with_background,
465        gap_closing=gap_closing,
466        min_z_extent=min_z_extent,
467        verbose=verbose,
468    )
469    if return_embeddings:
470        return segmentation, image_embeddings
471    else:
472        return segmentation

Automatically segment objects in a volume.

First segments slices individually in 2d and then merges them across 3d based on overlap of objects between slices.

Arguments:
  • volume: The input volume.
  • predictor: The SAM model.
  • segmentor: The instance segmentation class.
  • embedding_path: The path to save pre-computed embeddings.
  • with_background: Whether the segmentation has background.
  • gap_closing: If given, gaps in the segmentation are closed with a binary closing operation. The value is used to determine the number of iterations for the closing.
  • min_z_extent: Require a minimal extent in z for the segmented objects. This can help to prevent segmentation artifacts.
  • tile_shape: Shape of the tiles for tiled prediction. By default prediction is run without tiling.
  • halo: Overlap of the tiles for tiled prediction.
  • verbose: Verbosity flag.
  • return_embeddings: Whether to return the precomputed image embeddings.
  • batch_size: The batch size to compute image embeddings over planes.
  • kwargs: Keyword arguments for the 'generate' method of the 'segmentor'.
Returns:

The segmentation.

def track_across_frames( timeseries: numpy.ndarray, segmentation: numpy.ndarray, gap_closing: Optional[int] = None, min_time_extent: Optional[int] = None, verbose: bool = True, pbar_init: Optional[<built-in function callable>] = None, pbar_update: Optional[<built-in function callable>] = None) -> Tuple[numpy.ndarray, List[Dict]]:
584def track_across_frames(
585    timeseries: np.ndarray,
586    segmentation: np.ndarray,
587    gap_closing: Optional[int] = None,
588    min_time_extent: Optional[int] = None,
589    verbose: bool = True,
590    pbar_init: Optional[callable] = None,
591    pbar_update: Optional[callable] = None,
592) -> Tuple[np.ndarray, List[Dict]]:
593    """Track segmented objects over time.
594
595    This function uses Trackastra: https://www.ecva.net/papers/eccv_2024/papers_ECCV/papers/09819.pdf
596    for tracking. Please cite it if you use the automated tracking functionality.
597
598    Args:
599        timeseries: The input timeseries of images.
600        segmentation: The segmentation. Expect segmentation results per frame
601            that are relabeled so that segmentation ids don't overlap.
602        gap_closing: If given, gaps in the segmentation are closed with a binary closing
603            operation. The value is used to determine the number of iterations for the closing.
604        min_time_extent: Require a minimal extent in time for the tracked objects.
605        verbose: Verbosity flag.
606        pbar_init: Function to initialize the progress bar.
607        pbar_update: Function to update the progress bar.
608
609    Returns:
610        The tracking result. Each object is colored by its track id.
611        The lineages, which correspond to the cell divisions. Lineages are represented by a list of dicts,
612            with each dict encoding a lineage, where keys correspond to parent track ids.
613            Each key either maps to a list with two child track ids (cell division) or to an empty list (no division).
614    """
615    _, pbar_init, pbar_update, pbar_close = util.handle_pbar(verbose, pbar_init=pbar_init, pbar_update=pbar_update)
616
617    if gap_closing is not None and gap_closing > 0:
618        segmentation = _preprocess_closing(segmentation, gap_closing, pbar_update)
619
620    segmentation, lineage = _tracking_impl(
621        np.asarray(timeseries), segmentation, mode="greedy", min_time_extent=min_time_extent
622    )
623    return segmentation, lineage

Track segmented objects over time.

This function uses Trackastra: https://www.ecva.net/papers/eccv_2024/papers_ECCV/papers/09819.pdf for tracking. Please cite it if you use the automated tracking functionality.

Arguments:
  • timeseries: The input timeseries of images.
  • segmentation: The segmentation. Expect segmentation results per frame that are relabeled so that segmentation ids don't overlap.
  • gap_closing: If given, gaps in the segmentation are closed with a binary closing operation. The value is used to determine the number of iterations for the closing.
  • min_time_extent: Require a minimal extent in time for the tracked objects.
  • verbose: Verbosity flag.
  • pbar_init: Function to initialize the progress bar.
  • pbar_update: Function to update the progress bar.
Returns:

The tracking result. Each object is colored by its track id. The lineages, which correspond to the cell divisions. Lineages are represented by a list of dicts, with each dict encoding a lineage, where keys correspond to parent track ids. Each key either maps to a list with two child track ids (cell division) or to an empty list (no division).

def automatic_tracking( timeseries: numpy.ndarray, predictor: segment_anything.predictor.SamPredictor, segmentor: micro_sam.instance_segmentation.AMGBase, embedding_path: Union[str, os.PathLike, NoneType] = None, gap_closing: Optional[int] = None, min_time_extent: Optional[int] = None, tile_shape: Optional[Tuple[int, int]] = None, halo: Optional[Tuple[int, int]] = None, verbose: bool = True, **kwargs) -> Tuple[numpy.ndarray, List[Dict]]:
626def automatic_tracking(
627    timeseries: np.ndarray,
628    predictor: SamPredictor,
629    segmentor: AMGBase,
630    embedding_path: Optional[Union[str, os.PathLike]] = None,
631    gap_closing: Optional[int] = None,
632    min_time_extent: Optional[int] = None,
633    tile_shape: Optional[Tuple[int, int]] = None,
634    halo: Optional[Tuple[int, int]] = None,
635    verbose: bool = True,
636    **kwargs,
637) -> Tuple[np.ndarray, List[Dict]]:
638    """Automatically track objects in a timesries based on per-frame automatic segmentation.
639
640    This function uses Trackastra: https://www.ecva.net/papers/eccv_2024/papers_ECCV/papers/09819.pdf
641    for tracking. Please cite it if you use the automated tracking functionality.
642
643    Args:
644        timeseries: The input timeseries of images.
645        predictor: The SAM model.
646        segmentor: The instance segmentation class.
647        embedding_path: The path to save pre-computed embeddings.
648        gap_closing: If given, gaps in the segmentation are closed with a binary closing
649            operation. The value is used to determine the number of iterations for the closing.
650        min_time_extent: Require a minimal extent in time for the tracked objects.
651        tile_shape: Shape of the tiles for tiled prediction. By default prediction is run without tiling.
652        halo: Overlap of the tiles for tiled prediction.
653        verbose: Verbosity flag.
654        kwargs: Keyword arguments for the 'generate' method of the 'segmentor'.
655
656    Returns:
657        The tracking result. Each object is colored by its track id.
658        The lineages, which correspond to the cell divisions. Lineages are represented by a list of dicts,
659            with each dict encoding a lineage, where keys correspond to parent track ids.
660            Each key either maps to a list with two child track ids (cell division) or to an empty list (no division).
661    """
662    if Trackastra is None:
663        raise RuntimeError(
664            "Automatic tracking requires trackastra. You can install it via 'pip install trackastra'."
665        )
666    segmentation, _ = _segment_slices(
667        timeseries, predictor, segmentor, embedding_path, verbose,
668        tile_shape=tile_shape, halo=halo,
669        **kwargs,
670    )
671    segmentation, lineage = track_across_frames(
672        timeseries, segmentation, gap_closing=gap_closing, min_time_extent=min_time_extent, verbose=verbose,
673    )
674    return segmentation, lineage

Automatically track objects in a timesries based on per-frame automatic segmentation.

This function uses Trackastra: https://www.ecva.net/papers/eccv_2024/papers_ECCV/papers/09819.pdf for tracking. Please cite it if you use the automated tracking functionality.

Arguments:
  • timeseries: The input timeseries of images.
  • predictor: The SAM model.
  • segmentor: The instance segmentation class.
  • embedding_path: The path to save pre-computed embeddings.
  • gap_closing: If given, gaps in the segmentation are closed with a binary closing operation. The value is used to determine the number of iterations for the closing.
  • min_time_extent: Require a minimal extent in time for the tracked objects.
  • tile_shape: Shape of the tiles for tiled prediction. By default prediction is run without tiling.
  • halo: Overlap of the tiles for tiled prediction.
  • verbose: Verbosity flag.
  • kwargs: Keyword arguments for the 'generate' method of the 'segmentor'.
Returns:

The tracking result. Each object is colored by its track id. The lineages, which correspond to the cell divisions. Lineages are represented by a list of dicts, with each dict encoding a lineage, where keys correspond to parent track ids. Each key either maps to a list with two child track ids (cell division) or to an empty list (no division).

def get_napari_track_data( segmentation: numpy.ndarray, lineages: List[Dict], n_threads: Optional[int] = None) -> Tuple[numpy.ndarray, Dict[int, List]]:
677def get_napari_track_data(
678    segmentation: np.ndarray, lineages: List[Dict], n_threads: Optional[int] = None
679) -> Tuple[np.ndarray, Dict[int, List]]:
680    """Derive the inputs for the napari tracking layer from a tracking result.
681
682    Args:
683        segmentation: The segmentation, after relabeling with track ids.
684        lineages: The lineage information.
685        n_threads: Number of threads for extracting the track data from the segmentation.
686
687    Returns:
688        The array with the track data expected by napari.
689        The parent dictionary for napari.
690    """
691    if n_threads is None:
692        n_threads = mp.cpu_count()
693
694    def compute_props(t):
695        props = regionprops(segmentation[t])
696        # Create the track data representation for napari, which expects:
697        # track_id, timepoint, y, x
698        track_data = np.array([[prop.label, t] + list(prop.centroid) for prop in props])
699        return track_data
700
701    with futures.ThreadPoolExecutor(n_threads) as tp:
702        track_data = list(tp.map(compute_props, range(segmentation.shape[0])))
703    track_data = [data for data in track_data if data.size > 0]
704    track_data = np.concatenate(track_data)
705
706    # The graph representation of napari uses the children as keys and the parents as values,
707    # whereas our representation uses parents as keys and children as values.
708    # Hence, we need to translate the representation.
709    parent_graph = {
710        child: [parent] for lineage in lineages for parent, children in lineage.items() for child in children
711    }
712
713    return track_data, parent_graph

Derive the inputs for the napari tracking layer from a tracking result.

Arguments:
  • segmentation: The segmentation, after relabeling with track ids.
  • lineages: The lineage information.
  • n_threads: Number of threads for extracting the track data from the segmentation.
Returns:

The array with the track data expected by napari. The parent dictionary for napari.