Skip to content
Merged
Show file tree
Hide file tree
Changes from 4 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions ci/unit_tests/test_spleen_deepedit_annotation.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@

import os
import shutil
import sys
import tempfile
import unittest

Expand Down Expand Up @@ -123,12 +124,11 @@ def test_infer_config(self, override):
@parameterized.expand([TEST_CASE_2])
def test_infer_click_config(self, override):
override["dataset_dir"] = self.dataset_dir
override["use_click"] = True
override[
"dataset#data"
] = "$[{'image': i, 'background': [], 'spleen': [[6, 6, 6], [8, 8, 8]]} for i in @datalist]"
bundle_root = override["bundle_root"]
print(override)
sys.path = [bundle_root] + sys.path

inferrer = ConfigWorkflow(
workflow="infer",
Expand Down
20 changes: 5 additions & 15 deletions models/spleen_deepedit_annotation/configs/inference.json
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,6 @@
],
"number_intensity_ch": 1,
"device": "$torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')",
"use_click": false,
"network_def": {
"_target_": "DynUNet",
"spatial_dims": 3,
Expand Down Expand Up @@ -86,21 +85,12 @@
"clip": true
}
],
"auto_seg_transforms": [
"deepedit_transforms": [
{
"_target_": "Resized",
"keys": "image",
"spatial_size": "@spatial_size",
"mode": "area"
"_target_": "scripts.transforms.OrientationGuidanceMultipleLabelDeepEditd",
"ref_image": "image",
"label_names": "@label_names"
},
{
"_target_": "DiscardAddGuidanced",
"keys": "image",
"label_names": "@label_names",
"number_intensity_ch": "@number_intensity_ch"
}
],
"deepedit_transforms": [
{
"_target_": "AddGuidanceFromPointsDeepEditd",
"ref_image": "image",
Expand Down Expand Up @@ -133,7 +123,7 @@
],
"preprocessing": {
"_target_": "Compose",
"transforms": "$@preprocessing_transforms + (@deepedit_transforms if @use_click else @auto_seg_transforms) + @extra_transforms"
"transforms": "$@preprocessing_transforms + @deepedit_transforms + @extra_transforms"
},
"dataset": {
"_target_": "Dataset",
Expand Down
6 changes: 4 additions & 2 deletions models/spleen_deepedit_annotation/configs/metadata.json
Original file line number Diff line number Diff line change
@@ -1,7 +1,8 @@
{
"schema": "https://github.com/Project-MONAI/MONAI-extra-test-data/releases/download/0.8.1/meta_schema_20220324.json",
"version": "0.4.8",
"version": "0.4.9",
"changelog": {
"0.4.9": "fix orientation issue on clicks",
"0.4.8": "Add infer transforms to manage clicks from viewer",
"0.4.7": "fix the wrong GPU index issue of multi-node",
"0.4.6": "update to use rc7 which solves dynunet issue",
Expand Down Expand Up @@ -31,7 +32,8 @@
"optional_packages_version": {
"itk": "5.3.0",
"pytorch-ignite": "0.4.9",
"scikit-image": "0.19.3"
"scikit-image": "0.19.3",
"einops": "0.6.1"
},
"name": "Spleen DeepEdit annotation",
"task": "Decathlon spleen segmentation",
Expand Down
13 changes: 1 addition & 12 deletions models/spleen_deepedit_annotation/docs/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -118,22 +118,11 @@ python -m monai.bundle run --config_file "['configs/train.json','configs/evaluat

#### Execute inference:


For automatic inference mode:


```
python -m monai.bundle run --config_file configs/inference.json
```

For interactive segmentation mode, in which the user provides clicks, set the **use_click** flag to true:


```
python -m monai.bundle run --config_file configs/inference.json --use_click true
```

Clicks should be added to the data dictionary that is passed to the preprocessing transforms. The add keys are defined in `label_names` in `configs/inference.json`, and the corresponding values are the point coordinates. The following is an example of a data dictionary:
Optionally, clicks can be added to the data dictionary that is passed to the preprocessing transforms. The add keys are defined in `label_names` in `configs/inference.json`, and the corresponding values are the point coordinates. The following is an example of a data dictionary:

```
{"image": "example.nii.gz", "background": [], "spleen": [[I1, J1, K1], [I2, J2, K2]]}
Expand Down
38 changes: 38 additions & 0 deletions models/spleen_deepedit_annotation/scripts/transforms.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,38 @@
from typing import Dict

import numpy as np
from einops import rearrange
from monai.transforms.transform import Transform


class OrientationGuidanceMultipleLabelDeepEditd(Transform):
def __init__(self, ref_image="image", label_names=None):
"""
Convert the guidance to the RAS orientation
"""
self.ref_image = ref_image
self.label_names = label_names

def transform_points(self, point, affine):
"""transform point to the coordinates of the transformed image
point: numpy array [bs, N, 3]
"""
bs, n = point.shape[:2]
point = np.concatenate((point, np.ones((bs, n, 1))), axis=-1)
point = rearrange(point, "b n d -> d (b n)")
point = affine @ point
point = rearrange(point, "d (b n)-> b n d", b=bs)[:, :, :3]
return point

def __call__(self, data):
d: Dict = dict(data)
for key_label in self.label_names.keys():
points = d.get(key_label, [])
if len(points) < 1:
continue
reoriented_points = self.transform_points(
np.array(points)[None],
np.linalg.inv(d[self.ref_image].meta["affine"].numpy()) @ d[self.ref_image].meta["original_affine"],
)
d[key_label] = reoriented_points[0]
return d