From b3f14574bdce9d56f98826e52f711c7e23cf5214 Mon Sep 17 00:00:00 2001 From: M Q Date: Fri, 29 Aug 2025 22:07:01 -0700 Subject: [PATCH 01/21] Initial code for making a RESTful miscroservice with MONAI App SDK Signed-off-by: M Q --- platforms/aidoc/README.md | 149 ++++++ .../restful_app/ai_spleen_seg_app/__init__.py | 18 + .../restful_app/ai_spleen_seg_app/__main__.py | 19 + .../restful_app/ai_spleen_seg_app/app.py | 186 +++++++ .../restful_app/ai_spleen_seg_app/app.yaml | 27 ++ .../ai_spleen_seg_app/reporter_operator.py | 64 +++ .../ai_spleen_seg_app/requirements.txt | 11 + platforms/aidoc/restful_app/app.py | 139 ++++++ .../dicom_series_to_volume_operator_local.py | 454 ++++++++++++++++++ .../aidoc/restful_app/env_settings_repo.sh | 5 + platforms/aidoc/restful_app/requirements.txt | 14 + platforms/aidoc/test_endpoints.sh | 78 +++ 12 files changed, 1164 insertions(+) create mode 100644 platforms/aidoc/README.md create mode 100644 platforms/aidoc/restful_app/ai_spleen_seg_app/__init__.py create mode 100644 platforms/aidoc/restful_app/ai_spleen_seg_app/__main__.py create mode 100644 platforms/aidoc/restful_app/ai_spleen_seg_app/app.py create mode 100644 platforms/aidoc/restful_app/ai_spleen_seg_app/app.yaml create mode 100644 platforms/aidoc/restful_app/ai_spleen_seg_app/reporter_operator.py create mode 100644 platforms/aidoc/restful_app/ai_spleen_seg_app/requirements.txt create mode 100644 platforms/aidoc/restful_app/app.py create mode 100644 platforms/aidoc/restful_app/dicom_series_to_volume_operator_local.py create mode 100755 platforms/aidoc/restful_app/env_settings_repo.sh create mode 100644 platforms/aidoc/restful_app/requirements.txt create mode 100644 platforms/aidoc/test_endpoints.sh diff --git a/platforms/aidoc/README.md b/platforms/aidoc/README.md new file mode 100644 index 00000000..8c1d0632 --- /dev/null +++ b/platforms/aidoc/README.md @@ -0,0 +1,149 @@ +# RESTful Wrapper Application for MONAI Deploy + +This application provides a RESTful web interface to run MONAI Deploy applications. + +It allows you to start a processing job, check the status, and receive a callback when the job is complete. + +As it stands now, the callback message content is stubbed/generated in the wrapper app, and this will change to the design +where the wrapper app will pass a static callback function to the MONAI Deploy app which will have a reporter operator +that gathers the operations and domain specific info in the app's pipeline and then reports back the content via +this callback. The wrapper app will then have a mapping function to transform the reported data to that expected by +the external callback endpoint. + +Also, the whole Restful application can be packaged into a container image using MONAI Deploy app packager, but not doner here. + +## How to Run + +Change working directory to the same level as this README. + +1. **Install Dependencies** + + Create and activate a Python virtual environment. + + ```bash + pip install -r restful_app/requirements.txt + ``` +2. **Download Test Data and Set Env Vars** + The model and test DICOM series are shared on Google Drive requiring first gaining access permission, and + the zip file is [here](https://drive.google.com/uc?id=1IwWMpbo2fd38fKIqeIdL8SKTGvkn31tK). + + Please make a request so that it can be shared to specific Gmail account. + + `gdown` may also work. + ``` + pip install gdown + gdown https://drive.google.com/uc?id=1IwWMpbo2fd38fKIqeIdL8SKTGvkn31tK + ``` + + Unzip the file to local folders. If deviating from the path noted below, please adjuest the env var values + + ``` + unzip -o "ai_spleen_seg_bundle_data.zip" + rm -rf models && mkdir -p models/model && mv model.ts models/model && ls models/model + ``` + + Set the environment vars so that the model can be found by the Spleen Seg app. Also, + the settings are consolidated in the `env_settings.sh`. + + ``` + export HOLOSCAN_MODEL_PATH=models + ``` + +3. **Run the Web Application** + + ```bash + python restful_app/app.py + ``` + + The application will start on `http://127.0.0.1:5000`. + +## Test API Endpoints + +A simplest test client is provided, which makes call to the endpoint, as well as providing +a callback endpoint to receives message content at the specidied port. + +Open another console window and change directory to the same as this file. + +Set the environment vars so that the test script can get the input DCM and write the callback contents. +Also, once the Restful app completes each processing, the Spleen Seg app's output will also be saved in +the output folder speficied below (the script passes the output folder via the Rest API). + +``` +export HOLOSCAN_INPUT_PATH=dcm +export HOLOSCAN_OUTPUT_PATH=output +``` + +Run the test script, and examine its console output. + +``` +source test_endpoints.sh +``` + +Once the script completes, examine the `output` folder, which should conatain the following (dcm file +name will be different) + +``` +output +├── 1.2.826.0.1.3680043.10.511.3.22611096892439837402906545708809852.dcm +└── stl + └── spleen.stl +``` + +The script can run multiple times, or modified to loop with different output folder setting. + +### Check Status + +- **URL**: `/status` +- **Method**: `GET` +- **Description**: Checks the current status of the processor. +- **Success Response**: + - **Code**: 200 OK + - **Content**: `{ "status": "IDLE" }` or `{ "status": "BUSY" }` + +### Process Data + +- **URL**: `/process` +- **Method**: `POST` +- **Description**: Starts a new processing job. +- **Body**: + + ```json + { + "input_folder": "/path/to/your/input/data", + "output_folder": "/path/to/your/output/folder", + "callback_url": "http://your-service.com/callback" + } + ``` + +- **Success Response**: + - **Code**: 202 ACCEPTED + - **Content**: `{ "message": "Processing started." }` +- **Error Response**: + - **Code**: 409 CONFLICT + - **Content**: `{ "error": "Processor is busy." }` + - **Code**: 400 BAD REQUEST + - **Content**: `{ "error": "Missing required fields." }` + +### Callback + +When processing is complete, the application will send a `POST` request to the `callback_url` provided in the process request. The body of the callback will be: + +```json +{ + "run_success": true, + "result": "Processing completed successfully.", + "output_files": ["test.json", "seg.com"], + "error_message": null, + "error_code": null +} +``` + +Or in case of an error: + +```json +{ + "run_success": False, + "error_message": "E.g., Model network is not load and model file not found.", + "error_code": 500 +} +``` diff --git a/platforms/aidoc/restful_app/ai_spleen_seg_app/__init__.py b/platforms/aidoc/restful_app/ai_spleen_seg_app/__init__.py new file mode 100644 index 00000000..526cee59 --- /dev/null +++ b/platforms/aidoc/restful_app/ai_spleen_seg_app/__init__.py @@ -0,0 +1,18 @@ +# Copyright 2021-2023 MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import sys + +_current_dir = os.path.abspath(os.path.dirname(__file__)) +if sys.path and os.path.abspath(sys.path[0]) != _current_dir: + sys.path.insert(0, _current_dir) +del _current_dir diff --git a/platforms/aidoc/restful_app/ai_spleen_seg_app/__main__.py b/platforms/aidoc/restful_app/ai_spleen_seg_app/__main__.py new file mode 100644 index 00000000..2d67f364 --- /dev/null +++ b/platforms/aidoc/restful_app/ai_spleen_seg_app/__main__.py @@ -0,0 +1,19 @@ +# Copyright 2021-2023 MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging + +from app import AISpleenSegApp + +if __name__ == "__main__": + logging.info(f"Begin {__name__}") + AISpleenSegApp().run() + logging.info(f"End {__name__}") diff --git a/platforms/aidoc/restful_app/ai_spleen_seg_app/app.py b/platforms/aidoc/restful_app/ai_spleen_seg_app/app.py new file mode 100644 index 00000000..488d00cc --- /dev/null +++ b/platforms/aidoc/restful_app/ai_spleen_seg_app/app.py @@ -0,0 +1,186 @@ +# Copyright 2021-2023 MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +from pathlib import Path + +from dicom_series_to_volume_operator_local import DICOMSeriesToVolumeOperator + +# Required for setting SegmentDescription attributes. Direct import as this is not part of App SDK package. +from pydicom.sr.codedict import codes + +from monai.deploy.conditions import CountCondition +from monai.deploy.core import AppContext, Application +from monai.deploy.core.domain import Image +from monai.deploy.core.io_type import IOType +from monai.deploy.operators.dicom_data_loader_operator import DICOMDataLoaderOperator +from monai.deploy.operators.dicom_seg_writer_operator import DICOMSegmentationWriterOperator, SegmentDescription +from monai.deploy.operators.dicom_series_selector_operator import DICOMSeriesSelectorOperator + +# Use a local fixed version. from monai.deploy.operators.dicom_series_to_volume_operator import DICOMSeriesToVolumeOperator +from monai.deploy.operators.monai_bundle_inference_operator import ( + BundleConfigNames, + IOMapping, + MonaiBundleInferenceOperator, +) +from monai.deploy.operators.stl_conversion_operator import STLConversionOperator +from reporter_operator import ExecutionStatusReporterOperator + + +# @resource(cpu=1, gpu=1, memory="7Gi") +# pip_packages can be a string that is a path(str) to requirements.txt file or a list of packages. +# The monai pkg is not required by this class, instead by the included operators. +class AISpleenSegApp(Application): + """Demonstrates inference with built-in MONAI Bundle inference operator with DICOM files as input/output + + This application loads a set of DICOM instances, select the appropriate series, converts the series to + 3D volume image, performs inference with the built-in MONAI Bundle inference operator, including pre-processing + and post-processing, save the segmentation image in a DICOM Seg OID in an instance file, and optionally the + surface mesh in STL format. + + Pertinent MONAI Bundle: + https://github.com/Project-MONAI/model-zoo/tree/dev/models/spleen_ct_segmentation + + Execution Time Estimate: + With a Nvidia GV100 32GB GPU, for an input DICOM Series of 515 instances, the execution time is around + 25 seconds with saving both DICOM Seg and surface mesh STL file, and 15 seconds with DICOM Seg only. + """ + + def __init__(self, *args, status_callback=None, **kwargs): + """Creates an application instance.""" + self._logger = logging.getLogger("{}.{}".format(__name__, type(self).__name__)) + self._status_callback = status_callback + super().__init__(*args, **kwargs) + + def run(self, *args, **kwargs): + # This method calls the base class to run. Can be omitted if simply calling through. + self._logger.info(f"Begin {self.run.__name__}") + # The try...except block is removed as the reporter operator will handle status reporting. + super().run(*args, **kwargs) + self._logger.info(f"End {self.run.__name__}") + + def compose(self): + """Creates the app specific operators and chain them up in the processing DAG.""" + + logging.info(f"Begin {self.compose.__name__}") + + # Use Commandline options over environment variables to init context. + app_context: AppContext = Application.init_app_context(self.argv) + app_input_path = Path(app_context.input_path) + app_output_path = Path(app_context.output_path) + + # Create the custom operator(s) as well as SDK built-in operator(s). + study_loader_op = DICOMDataLoaderOperator( + self, CountCondition(self, 1), input_folder=app_input_path, name="study_loader_op" + ) + series_selector_op = DICOMSeriesSelectorOperator(self, rules=Sample_Rules_Text, name="series_selector_op") + series_to_vol_op = DICOMSeriesToVolumeOperator(self, name="series_to_vol_op") + + # Create the inference operator that supports MONAI Bundle and automates the inference. + # The IOMapping labels match the input and prediction keys in the pre and post processing. + # The model_name is optional when the app has only one model. + # The bundle_path argument optionally can be set to an accessible bundle file path in the dev + # environment, so when the app is packaged into a MAP, the operator can complete the bundle parsing + # during init. + + config_names = BundleConfigNames(config_names=["inference"]) # Same as the default + + bundle_spleen_seg_op = MonaiBundleInferenceOperator( + self, + input_mapping=[IOMapping("image", Image, IOType.IN_MEMORY)], + output_mapping=[IOMapping("pred", Image, IOType.IN_MEMORY)], + app_context=app_context, + bundle_config_names=config_names, + name="bundle_spleen_seg_op", + ) + + # Create DICOM Seg writer providing the required segment description for each segment with + # the actual algorithm and the pertinent organ/tissue. The segment_label, algorithm_name, + # and algorithm_version are of DICOM VR LO type, limited to 64 chars. + # https://dicom.nema.org/medical/dicom/current/output/chtml/part05/sect_6.2.html + segment_descriptions = [ + SegmentDescription( + segment_label="Spleen", + segmented_property_category=codes.SCT.Organ, + segmented_property_type=codes.SCT.Spleen, + algorithm_name="volumetric (3D) segmentation of the spleen from CT image", + algorithm_family=codes.DCM.ArtificialIntelligence, + algorithm_version="0.3.2", + ) + ] + + custom_tags = {"SeriesDescription": "AI generated Seg, not for clinical use."} + + dicom_seg_writer = DICOMSegmentationWriterOperator( + self, + segment_descriptions=segment_descriptions, + custom_tags=custom_tags, + output_folder=app_output_path, + name="dicom_seg_writer", + ) + + reporter_op = ExecutionStatusReporterOperator(self, status_callback=self._status_callback) + + # Create the processing pipeline, by specifying the source and destination operators, and + # ensuring the output from the former matches the input of the latter, in both name and type. + self.add_flow(study_loader_op, series_selector_op, {("dicom_study_list", "dicom_study_list")}) + self.add_flow( + series_selector_op, series_to_vol_op, {("study_selected_series_list", "study_selected_series_list")} + ) + self.add_flow(series_to_vol_op, bundle_spleen_seg_op, {("image", "image")}) + # Note below the dicom_seg_writer requires two inputs, each coming from a source operator. + self.add_flow( + series_selector_op, dicom_seg_writer, {("study_selected_series_list", "study_selected_series_list")} + ) + self.add_flow(bundle_spleen_seg_op, dicom_seg_writer, {("pred", "seg_image")}) + # Create the surface mesh STL conversion operator and add it to the app execution flow, if needed, by + # uncommenting the following couple lines. + stl_conversion_op = STLConversionOperator( + self, output_file=app_output_path.joinpath("stl/spleen.stl"), name="stl_conversion_op" + ) + self.add_flow(bundle_spleen_seg_op, stl_conversion_op, {("pred", "image")}) + + # Connect the reporter operator to the end of the pipeline. + # It will be triggered after the DICOM SEG file is written. + self.add_flow(stl_conversion_op, reporter_op, {("stl_bytes", "data")}) + + logging.info(f"End {self.compose.__name__}") + + +# This is a sample series selection rule in JSON, simply selecting CT series. +# If the study has more than 1 CT series, then all of them will be selected. +# Please see more detail in DICOMSeriesSelectorOperator. +Sample_Rules_Text = """ +{ + "selections": [ + { + "name": "CT Series", + "conditions": { + "StudyDescription": "(.*?)", + "Modality": "(?i)CT", + "SeriesDescription": "(.*?)" + } + } + ] +} +""" + +if __name__ == "__main__": + # Creates the app and test it standalone. When running is this mode, please note the following: + # -m , for model file path + # -i , for input DICOM CT series folder + # -o , for the output folder, default $PWD/output + # e.g. + # monai-deploy exec app.py -i input -m model/model.ts + # + logging.info(f"Begin {__name__}") + AISpleenSegApp().run() + logging.info(f"End {__name__}") diff --git a/platforms/aidoc/restful_app/ai_spleen_seg_app/app.yaml b/platforms/aidoc/restful_app/ai_spleen_seg_app/app.yaml new file mode 100644 index 00000000..390ce7ee --- /dev/null +++ b/platforms/aidoc/restful_app/ai_spleen_seg_app/app.yaml @@ -0,0 +1,27 @@ +%YAML 1.2 +# SPDX-FileCopyrightText: Copyright (c) 2022-2023 MONAI. All rights reserved. +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +--- +application: + title: MONAI Deploy App Package - Spleen Seg Inference + version: 1.0 + inputFormats: ["file"] + outputFormats: ["file"] + +resources: + cpu: 1 + gpu: 1 + memory: 1Gi + gpuMemory: 7Gi diff --git a/platforms/aidoc/restful_app/ai_spleen_seg_app/reporter_operator.py b/platforms/aidoc/restful_app/ai_spleen_seg_app/reporter_operator.py new file mode 100644 index 00000000..365ade75 --- /dev/null +++ b/platforms/aidoc/restful_app/ai_spleen_seg_app/reporter_operator.py @@ -0,0 +1,64 @@ +# Copyright 2025 MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +from monai.deploy.core import ( + Application, + ConditionType, + ExecutionContext, + Fragment, + InputContext, + IOType, + Operator, + OutputContext, + OperatorSpec, +) + + +class ExecutionStatusReporterOperator(Operator): + """ + This operator reports the execution status of the application via a callback. + It is intended to be the last operator in the application's workflow. + """ + + def __init__(self, fragment: Fragment, *args, status_callback, **kwargs): + """ + Args: + fragment (Fragment): An instance of the Application class. + status_callback (callable): The callback function to invoke with the status. + """ + self._logger = logging.getLogger("{}.{}".format(__name__, type(self).__name__)) + self._status_callback = status_callback + super().__init__(fragment, *args, **kwargs) + + def setup(self, spec: OperatorSpec): + spec.input("data") + spec.output("data").condition(ConditionType.NONE) + + def compute(self, op_input, op_output, context): + """ + Receives data from the upstream operator and invokes the status callback. + """ + # For now, we are not doing anything with the input data or collecting logs. + # We will just report success. + # In the future, this is where log collection and summary generation would happen. + try: + # In a real implementation, you would gather data here. + op_input.receive("data") + summary = {"status": "Success", "message": "Application completed successfully."} + if self._status_callback: + self._status_callback(summary) + op_output.emit(summary, "data") + except Exception as e: + self._logger.error(f"Error in status reporter: {e}") + if self._status_callback: + error_summary = {"status": "Failure", "message": f"Application failed with error: {e}"} + self._status_callback(error_summary) diff --git a/platforms/aidoc/restful_app/ai_spleen_seg_app/requirements.txt b/platforms/aidoc/restful_app/ai_spleen_seg_app/requirements.txt new file mode 100644 index 00000000..d60890dd --- /dev/null +++ b/platforms/aidoc/restful_app/ai_spleen_seg_app/requirements.txt @@ -0,0 +1,11 @@ +monai-deploy-app-sdk>=3.0.0 +scikit-image>=0.17.2 +pydicom>=2.3.0 +highdicom>=0.18.2 +SimpleITK>=2.0.0 +Pillow>=8.0.0 +numpy-stl>=2.12.0 +trimesh>=3.8.11 +nibabel>=3.2.1 +torch>=1.12.0 +monai>=1.0.0 diff --git a/platforms/aidoc/restful_app/app.py b/platforms/aidoc/restful_app/app.py new file mode 100644 index 00000000..4361a5e6 --- /dev/null +++ b/platforms/aidoc/restful_app/app.py @@ -0,0 +1,139 @@ +import importlib +import logging +import os +import sys +import threading +from http import HTTPStatus +import argparse + +import requests +from flask import Flask, jsonify, request + +# The MONAI Deploy application to be wrapped. +# This can be changed to any other application in the repository. +# Provide the module path and the class name of the application. +APP_MODULE_NAME = "ai_spleen_seg_app.app" +APP_CLASS_NAME = "AISpleenSegApp" + +# Flask application setup +app = Flask(__name__) +logging.basicConfig(stream=sys.stdout, level=logging.INFO) + +# Global state to track processing status. A lock is used for thread safety. +PROCESSING_STATUS = "IDLE" +PROCESSING_LOCK = threading.Lock() + + +def set_processing_status(status): + """Sets the processing status in a thread-safe manner.""" + global PROCESSING_STATUS + with PROCESSING_LOCK: + PROCESSING_STATUS = status + + +def get_processing_status(): + """Gets the processing status in a thread-safe manner.""" + with PROCESSING_LOCK: + return PROCESSING_STATUS + + +def run_processing(input_folder, output_folder, callback_url): + """ + This function runs in a separate thread to execute the MONAI Deploy application. + """ + + # Define the callback function that the MONAI Deploy app will call. + def app_status_callback(summary): + """Callback function to handle the final status from the application.""" + logging.info(f"Received status from application: {summary}") + if callback_url: + try: + logging.info(f"Sending final status callback to {callback_url}") + # Here you could map the summary to the expected format of the callback. + # For now, we'll just forward the summary. + requests.post(callback_url, json=summary, timeout=5) + logging.info("Sent final status callback.") + except Exception as e: + logging.error(f"Failed to send callback to {callback_url}: {e}") + + try: + logging.info("Starting processing in a background thread.") + set_processing_status("BUSY") + + # Set environment variables for the MONAI Deploy application. + # The application context will pick these up. + os.environ["MONAI_INPUTPATH"] = input_folder + os.environ["MONAI_OUTPUTPATH"] = output_folder + os.environ["HOLOSCAN_INPUT_PATH"] = input_folder # For Holoscan-based apps + os.environ["HOLOSCAN_OUTPUT_PATH"] = output_folder # For Holoscan-based apps + + + # Dynamically import the application class from the specified module. + logging.info(f"Loading application: {APP_MODULE_NAME}.{APP_CLASS_NAME}") + module = importlib.import_module(APP_MODULE_NAME) + app_class = getattr(module, APP_CLASS_NAME) + monai_app = app_class(status_callback=app_status_callback) + + # Run the MONAI Deploy application. + logging.info("Running the MONAI Deploy application.") + monai_app.run() + logging.info("Processing completed successfully.") + + except Exception as e: + logging.error(f"An error occurred during processing: {e}") + # If the app fails to even start, we need to report a failure. + app_status_callback({"status": "Failure", "message": f"Application failed to run: {e}"}) + + finally: + set_processing_status("IDLE") + logging.info("Processor is now IDLE.") + + +@app.route("/status", methods=["GET"]) +def status(): + """Endpoint to check the current processing status.""" + return jsonify({"status": get_processing_status()}) + + +@app.route("/process", methods=["POST"]) +def process(): + """Endpoint to start a new processing job.""" + if get_processing_status() == "BUSY": + return jsonify({"error": "Processor is busy."}), HTTPStatus.CONFLICT + + data = request.get_json() + if not data or "input_folder" not in data or "output_folder" not in data: + return jsonify({"error": "Missing required fields."}), HTTPStatus.BAD_REQUEST + + input_folder = data["input_folder"] + output_folder = data["output_folder"] + callback_url = data.get("callback_url") # Callback URL is optional + + # Start the processing in a background thread. + thread = threading.Thread( + target=run_processing, args=(input_folder, output_folder, callback_url) + ) + thread.start() + + return jsonify({"message": "Processing started."}), HTTPStatus.ACCEPTED + + +if __name__ == "__main__": + # Note: For production, use a proper WSGI server like Gunicorn or uWSGI. + parser = argparse.ArgumentParser(description="Run the MONAI Deploy RESTful wrapper application.") + parser.add_argument( + "--host", + type=str, + default=os.environ.get("FLASK_HOST", "0.0.0.0"), + help="Host address to bind the Flask server to. Defaults to env var FLASK_HOST or 0.0.0.0.", + ) + parser.add_argument( + "--port", + type=int, + default=int(os.environ.get("FLASK_PORT", 5000)), + help="Port to listen on. Defaults to env var FLASK_PORT or 5000.", + ) + args = parser.parse_args() + host = args.host or os.environ.get("FLASK_HOST", "0.0.0.0") + port = args.port or int(os.environ.get("FLASK_PORT", 5000)) + app.run(host=host, port=port) \ No newline at end of file diff --git a/platforms/aidoc/restful_app/dicom_series_to_volume_operator_local.py b/platforms/aidoc/restful_app/dicom_series_to_volume_operator_local.py new file mode 100644 index 00000000..30812768 --- /dev/null +++ b/platforms/aidoc/restful_app/dicom_series_to_volume_operator_local.py @@ -0,0 +1,454 @@ +# Copyright 2021-2025 MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import copy +import logging +import math +from typing import Dict, List, Union + +import numpy as np + +from monai.deploy.utils.importutil import optional_import + +apply_rescale, _ = optional_import("pydicom.pixels", name="apply_rescale") + +from monai.deploy.core import ConditionType, Fragment, Operator, OperatorSpec +from monai.deploy.core.domain.dicom_series_selection import StudySelectedSeries +from monai.deploy.core.domain.image import Image + + +class DICOMSeriesToVolumeOperator(Operator): + """This operator converts an instance of DICOMSeries into an Image object. + + The loaded Image Object can be used for further processing via other operators. + The data array will be a 3D image NumPy array with index order of `DHW`. + Channel is limited to 1 as of now, and `C` is absent in the NumPy array. + + Named Input: + study_selected_series_list: List of StudySelectedSeries. + Named Output: + image: Image object. + """ + + def __init__(self, fragment: Fragment, *args, **kwargs): + """Create an instance for a containing application object. + + Args: + fragment (Fragment): An instance of the Application class which is derived from Fragment. + """ + + self.input_name_series = "study_selected_series_list" + self.output_name_image = "image" + # Need to call the base class constructor last + super().__init__(fragment, *args, **kwargs) + + def setup(self, spec: OperatorSpec): + spec.input(self.input_name_series) + spec.output(self.output_name_image).condition(ConditionType.NONE) + + def compute(self, op_input, op_output, context): + """Performs computation for this operator and handles I/O.""" + + study_selected_series_list = op_input.receive(self.input_name_series) + + # TODO: need to get a solution to correctly annotate and consume multiple image outputs. + # For now, only supports the one and only one selected series. + image = self.convert_to_image(study_selected_series_list) + op_output.emit(image, self.output_name_image) + + def convert_to_image(self, study_selected_series_list: List[StudySelectedSeries]) -> Union[Image, None]: + """Extracts the pixel data from a DICOM Series and other attributes to create an Image object""" + # For now, only supports the one and only one selected series. + if not study_selected_series_list or len(study_selected_series_list) < 1: + raise ValueError("Missing expected input 'study_selected_series_list'") + + for study_selected_series in study_selected_series_list: + if not isinstance(study_selected_series, StudySelectedSeries): + raise ValueError("Element in input is not expected type, 'StudySelectedSeries'.") + selected_series = study_selected_series.selected_series[0] + dicom_series = selected_series.series + selection_name = selected_series.selection_name + self.prepare_series(dicom_series) + metadata = self.create_metadata(dicom_series) + + # Add to the metadata the DICOMStudy properties and selection metadata + metadata.update(self._get_instance_properties(study_selected_series.study)) + selection_metadata = {"selection_name": selection_name} + metadata.update(selection_metadata) + + voxel_data = self.generate_voxel_data(dicom_series) + image = self.create_volumetric_image(voxel_data, metadata) + + # Now it is time to assign the converted image to the SelectedSeries obj + selected_series.image = image + + # Break out since limited to one series/image for now + break + + # TODO: This needs to be updated once allowed to output multiple Image objects + return study_selected_series_list[0].selected_series[0].image + + def generate_voxel_data(self, series): + """Applies rescale slope and rescale intercept to the pixels. + + Supports monochrome image only for now. Photometric Interpretation attribute, + tag (0028,0004), is considered. Both MONOCHROME2 (IDENTITY) and MONOCHROME1 (INVERSE) + result in an output image where The minimum sample value is intended to be displayed as black. + + Args: + series: DICOM Series for which the pixel data needs to be extracted. + + Returns: + A 3D numpy tensor representing the volumetric data. + """ + slices = series.get_sop_instances() + # The sop_instance get_pixel_array() returns a 2D NumPy array with index order + # of `HW`. The pixel array of all instances will be stacked along the first axis, + # so the final 3D NumPy array will have index order of [DHW]. This is consistent + # with the NumPy array returned from the ITK GetArrayViewFromImage on the image + # loaded from the same DICOM series. + vol_data = np.stack([s.get_pixel_array() for s in slices], axis=0) + + # Use pydicom utility to apply a modality lookup table or rescale operator to the pixel array. + # The pydicom Dataset is required which can be obtained from the first slice's native SOP instance. + # If Modality LUT is present the return array is of np.uint8 or np.uint16, and if Rescale + # Intercept and Rescale Slope are present, np.float64. + # If the pixel array is already in the correct type, the return array is the same as the input array. + try: + native_sop = slices[0].get_native_sop_instance() + vol_data = apply_rescale(vol_data, native_sop) + except Exception as e: + logging.error(f"Failed to apply rescale to DICOM volume: {e}") + raise RuntimeError("Failed to apply rescale to DICOM volume.") from e + + # For now we support monochrome image only, for which DICOM Photometric Interpretation + # (0028,0004) has defined terms, MONOCHROME1 and MONOCHROME2, with the former being: + # Pixel data represent a single monochrome image plane. The minimum sample value is + # intended to be displayed as white after any VOI gray scale transformations have been + # performed. See PS3.4. This value may be used only when Samples per Pixel (0028,0002) + # has a value of 1. May be used for pixel data in a Native (uncompressed) or Encapsulated + # (compressed) format; see Section 8.2 in PS3.5. + # and for the latter "The minimum sample value is intended to be displayed as black" + # + # In this function, pixel data will be interpreted as if MONOCHROME2, hence inverting + # MONOCHROME1 for the final voxel data. + + photometric_interpretation = ( + slices[0].get_native_sop_instance().get("PhotometricInterpretation", "").strip().upper() + ) + presentation_lut_shape = slices[0].get_native_sop_instance().get("PresentationLUTShape", "").strip().upper() + + if not photometric_interpretation: + logging.warning("Cannot get value of attribute Photometric Interpretation.") + + if photometric_interpretation != "MONOCHROME2": + if photometric_interpretation == "MONOCHROME1" or presentation_lut_shape == "INVERSE": + logging.debug("Applying INVERSE transformation as required for MONOCHROME1 image.") + vol_data = np.amax(vol_data) - vol_data + else: + raise ValueError( + f"Cannot process pixel data with Photometric Interpretation of {photometric_interpretation}." + ) + + # NumPy's np.can_cast function, as of version 2.0, no longer supports Python scalars directly and + # does not apply value-based logic for 0-D arrays and NumPy scalars. + # The following can_cast calls are expecting the array is already of the correct type. + if vol_data.dtype == np.uint8: + logging.info("Rescaled pixel data is of type uint8.") + elif np.can_cast(vol_data, np.uint16, casting="safe"): + logging.info("Casting to uint16") + vol_data = vol_data.astype(dtype=np.uint16, casting="safe") + elif np.can_cast(vol_data, np.float32, casting="safe"): + logging.info("Casting to float32") + vol_data = vol_data.astype(dtype=np.float32, casting="safe") + else: + logging.info("Rescaled pixel data remains as of type float64.") + + return vol_data + + def create_volumetric_image(self, vox_data, metadata): + """Creates an instance of 3D image. + + Args: + vox_data: A numpy array representing the volumetric data. + metadata: DICOM attributes in a dictionary. + + Returns: + An instance of Image object. + """ + image = Image(vox_data, metadata) + return image + + def prepare_series(self, series): + """Computes the slice normal for each slice and then projects the first voxel of each + slice on that slice normal. + + It computes the distance of that point from the origin of the patient coordinate system along the slice normal. + It orders the slices in the series according to that distance. + + Args: + series: An instance of DICOMSeries. + """ + + if len(series._sop_instances) <= 1: + series.depth_pixel_spacing = 1.0 # Default to 1, e.g. for CR image, similar to (Simple) ITK + return + + slice_indices_to_be_removed = [] + depth_pixel_spacing = 0.0 + last_slice_normal = [0.0, 0.0, 0.0] + + for slice_index, slice in enumerate(series._sop_instances): + distance = 0.0 + point = [0.0, 0.0, 0.0] + slice_normal = [0.0, 0.0, 0.0] + slice_position = None + cosines = None + + try: + image_orientation_patient_de = slice[0x0020, 0x0037] + if image_orientation_patient_de is not None: + image_orientation_patient = image_orientation_patient_de.value + cosines = image_orientation_patient + except KeyError: + pass + + try: + image_poisition_patient_de = slice[0x0020, 0x0032] + if image_poisition_patient_de is not None: + image_poisition_patient = image_poisition_patient_de.value + slice_position = image_poisition_patient + except KeyError: + pass + + distance = 0.0 + + if (cosines is not None) and (slice_position is not None): + slice_normal[0] = cosines[1] * cosines[5] - cosines[2] * cosines[4] + slice_normal[1] = cosines[2] * cosines[3] - cosines[0] * cosines[5] + slice_normal[2] = cosines[0] * cosines[4] - cosines[1] * cosines[3] + + last_slice_normal = copy.deepcopy(slice_normal) + + i = 0 + while i < 3: + point[i] = slice_normal[i] * slice_position[i] + i += 1 + + distance += point[0] + point[1] + point[2] + + series._sop_instances[slice_index].distance = distance + series._sop_instances[slice_index].first_pixel_on_slice_normal = point + else: + logging.debug(f"Slice index to remove: {slice_index}") + slice_indices_to_be_removed.append(slice_index) + + logging.debug(f"Total slices before removal (if applicable): {len(series._sop_instances)}") + + # iterate in reverse order to avoid affecting subsequent indices after a deletion + for sl_index in sorted(slice_indices_to_be_removed, reverse=True): + del series._sop_instances[sl_index] + logging.info(f"Removed slice index: {sl_index}") + + logging.debug(f"Total slices after removal (if applicable): {len(series._sop_instances)}") + + series._sop_instances = sorted(series._sop_instances, key=lambda s: s.distance) + series.depth_direction_cosine = copy.deepcopy(last_slice_normal) + + if len(series._sop_instances) > 1: + p1 = series._sop_instances[0].first_pixel_on_slice_normal + p2 = series._sop_instances[1].first_pixel_on_slice_normal + depth_pixel_spacing = ( + (p1[0] - p2[0]) * (p1[0] - p2[0]) + + (p1[1] - p2[1]) * (p1[1] - p2[1]) + + (p1[2] - p2[2]) * (p1[2] - p2[2]) + ) + depth_pixel_spacing = math.sqrt(depth_pixel_spacing) + series.depth_pixel_spacing = depth_pixel_spacing + + s_1 = series._sop_instances[0] + s_n = series._sop_instances[-1] + num_slices = len(series._sop_instances) + self.compute_affine_transform(s_1, s_n, num_slices, series) + + def compute_affine_transform(self, s_1, s_n, n, series): + """Computes the affine transform for this series. It does it in both DICOM Patient oriented + coordinate system as well as the pne preferred by NIFTI standard. Accordingly, the two attributes + dicom_affine_transform and nifti_affine_transform are stored in the series instance. + + The Image Orientation Patient contains two triplets, [rx ry rz cx cy cz], which encode + direction cosines of the row and column of an image slice. The Image Position Patient of the first slice in + a volume, [x1 y1 z1], is the x, y, z coordinates of the upper-left corner voxel of the slice. These two + parameters define the location of the slice in PCS. To determine the location of a volume, the Image + Position Patient of another slice is normally needed. In practice, we tend to use the position of the last + slice in a volume, [xn yn zn]. The voxel size within the slice plane, [vr vc], is stored in object Pixel Spacing. + + Args: + s_1: A first slice in the series. + s_n: A last slice in the series. + n: A number of slices in the series. + series: An instance of DICOMSeries. + """ + + m1 = np.arange(1, 17, dtype=float).reshape(4, 4) + m2 = np.arange(1, 17, dtype=float).reshape(4, 4) + + image_orientation_patient = None + try: + image_orientation_patient_de = s_1[0x0020, 0x0037] + if image_orientation_patient_de is not None: + image_orientation_patient = image_orientation_patient_de.value + except KeyError: + pass + rx = image_orientation_patient[0] + ry = image_orientation_patient[1] + rz = image_orientation_patient[2] + cx = image_orientation_patient[3] + cy = image_orientation_patient[4] + cz = image_orientation_patient[5] + + vr = 0.0 + vc = 0.0 + try: + pixel_spacing_de = s_1[0x0028, 0x0030] + if pixel_spacing_de is not None: + vr = pixel_spacing_de.value[0] + vc = pixel_spacing_de.value[1] + except KeyError: + pass + + x1 = 0.0 + y1 = 0.0 + z1 = 0.0 + + xn = 0.0 + yn = 0.0 + zn = 0.0 + + ip1 = None + ip2 = None + try: + ip1_de = s_1[0x0020, 0x0032] + ipn_de = s_n[0x0020, 0x0032] + ip1 = ip1_de.value + ipn = ipn_de.value + + except KeyError: + pass + + x1 = ip1[0] + y1 = ip1[1] + z1 = ip1[2] + + xn = ipn[0] + yn = ipn[1] + zn = ipn[2] + + m1[0, 0] = rx * vr + m1[0, 1] = cx * vc + m1[0, 2] = (xn - x1) / (n - 1) + m1[0, 3] = x1 + + m1[1, 0] = ry * vr + m1[1, 1] = cy * vc + m1[1, 2] = (yn - y1) / (n - 1) + m1[1, 3] = y1 + + m1[2, 0] = rz * vr + m1[2, 1] = cz * vc + m1[2, 2] = (zn - z1) / (n - 1) + m1[2, 3] = z1 + + m1[3, 0] = 0 + m1[3, 1] = 0 + m1[3, 2] = 0 + m1[3, 3] = 1 + + series.dicom_affine_transform = m1 + + m2[0, 0] = -rx * vr + m2[0, 1] = -cx * vc + m2[0, 2] = -(xn - x1) / (n - 1) + m2[0, 3] = -x1 + + m2[1, 0] = -ry * vr + m2[1, 1] = -cy * vc + m2[1, 2] = -(yn - y1) / (n - 1) + m2[1, 3] = -y1 + + m2[2, 0] = rz * vr + m2[2, 1] = cz * vc + m2[2, 2] = (zn - z1) / (n - 1) + m2[2, 3] = z1 + + m2[3, 0] = 0 + m2[3, 1] = 0 + m2[3, 2] = 0 + m2[3, 3] = 1 + + series.nifti_affine_transform = m2 + + def create_metadata(self, series) -> Dict: + """Collects all relevant metadata from the DICOM Series and creates a dictionary. + + Args: + series: An instance of DICOMSeries. + + Returns: + An instance of a dictionary containing metadata for the volumetric image. + """ + + # Set metadata with series properties that are not None. + metadata = {} + if series: + metadata = self._get_instance_properties(series) + return metadata + + @staticmethod + def _get_instance_properties(obj: object, not_none: bool = True) -> Dict: + prop_dict = {} + if obj: + for attribute in [x for x in type(obj).__dict__ if isinstance(type(obj).__dict__[x], property)]: + attr_val = getattr(obj, attribute, None) + if not_none: + if attr_val is not None: + prop_dict[attribute] = attr_val + else: + prop_dict[attribute] = attr_val + + return prop_dict + + +def test(): + from pathlib import Path + + from monai.deploy.operators.dicom_data_loader_operator import DICOMDataLoaderOperator + from monai.deploy.operators.dicom_series_selector_operator import DICOMSeriesSelectorOperator + + current_file_dir = Path(__file__).parent.resolve() + data_path = current_file_dir.joinpath("../../../inputs/spleen_ct/dcm").absolute() + + fragment = Fragment() + loader = DICOMDataLoaderOperator(fragment, name="loader_op") + series_selector = DICOMSeriesSelectorOperator(fragment, name="selector_op") + vol_op = DICOMSeriesToVolumeOperator(fragment, name="series_to_vol_op") + + study_list = loader.load_data_to_studies(data_path) + study_selected_series_list = series_selector.filter(None, study_list) + image = vol_op.convert_to_image(study_selected_series_list) + + print(f"Image NumPy array shape (index order DHW): {image.asnumpy().shape}") + for k, v in image.metadata().items(): + print(f"{(k)}: {(v)}") + + +if __name__ == "__main__": + test() diff --git a/platforms/aidoc/restful_app/env_settings_repo.sh b/platforms/aidoc/restful_app/env_settings_repo.sh new file mode 100755 index 00000000..aa506f2b --- /dev/null +++ b/platforms/aidoc/restful_app/env_settings_repo.sh @@ -0,0 +1,5 @@ +#!/bin/bash +export HOLOSCAN_INPUT_PATH="$(pwd)/../monai-deploy-app-sdk/inputs/spleen_ct_tcia" +export HOLOSCAN_MODEL_PATH="$(pwd)/../monai-deploy-app-sdk/models/spleen_ct" +export HOLOSCAN_OUTPUT_PATH="$(pwd)/output_spleen" +export HOLOSCAN_LOG_LEVEL=INFO \ No newline at end of file diff --git a/platforms/aidoc/restful_app/requirements.txt b/platforms/aidoc/restful_app/requirements.txt new file mode 100644 index 00000000..bd91f047 --- /dev/null +++ b/platforms/aidoc/restful_app/requirements.txt @@ -0,0 +1,14 @@ +monai-deploy-app-sdk>=3.0.0 +scikit-image>=0.17.2 +pydicom>=2.3.0 +highdicom>=0.18.2 +SimpleITK>=2.0.0 +Pillow>=8.0.0 +numpy-stl>=2.12.0 +trimesh>=3.8.11 +nibabel>=3.2.1 +torch>=1.12.0 +monai>=1.0.0 +Flask==2.2.2 +requests>=2.32 +Werkzeug==2.2.3 \ No newline at end of file diff --git a/platforms/aidoc/test_endpoints.sh b/platforms/aidoc/test_endpoints.sh new file mode 100644 index 00000000..381505ee --- /dev/null +++ b/platforms/aidoc/test_endpoints.sh @@ -0,0 +1,78 @@ +#!/bin/bash + +# This script tests the RESTful endpoints of the wrapper application. + +# Base URL of the web application +BASE_URL="http://127.0.0.1:5000" + +# Get the absolute path to the input and output directories +INPUT_DIR="$HOLOSCAN_INPUT_PATH" #"$(pwd)/inputs/spleen_ct_tcia" +OUTPUT_DIR="$HOLOSCAN_OUTPUT_PATH" #"$(pwd)/output_spleen" +CALLBACK_PORT=9005 +CALLBACK_URL="http://127.0.0.1:$CALLBACK_PORT/callback" + +# Function to print test headers +print_header() { + echo "" + echo "======================================================" + echo "$1" + echo "======================================================" +} + +# 1. Start a simple netcat listener to act as our callback server +print_header "Starting callback listener on port $CALLBACK_PORT" +# Listen for one request, then exit immediately +nc -l $CALLBACK_PORT -q 1 > callback_output.txt & +NC_PID=$! +echo "Netcat listener started with PID $NC_PID" +sleep 2 # Give it a moment to start up + +# 2. Check the initial status (should be IDLE) +print_header "Checking initial status (should be IDLE)" +curl -X GET "$BASE_URL/status" +echo "" + +# 3. Send a request to process data +print_header "Sending request to process data" +curl -X POST "$BASE_URL/process" \ + -H "Content-Type: application/json" \ + -d '{ + "input_folder": "'"$INPUT_DIR"'", + "output_folder": "'"$OUTPUT_DIR"'", + "callback_url": "'"$CALLBACK_URL"'" + }' +echo "" + +# 4. Check the status immediately after (should be BUSY) +print_header "Checking status immediately after request (should be BUSY)" +sleep 1 # Give the server a moment to switch state +curl -X GET "$BASE_URL/status" +echo "" + +# 5. Wait for processing to complete +print_header "Waiting for processing to complete..." +wait $NC_PID +echo "Netcat listener has received the callback and exited." + + +# 6. Display the callback data received +print_header "Callback data received" +if [ -f "callback_output.txt" ]; then + cat callback_output.txt + # The actual HTTP headers and body are captured. We'll just show the JSON part. + print_header "Callback message content in JSON:" + grep -o '{.*}' callback_output.txt + rm callback_output.txt +else + echo "No callback output file found." +fi +echo "" + +# 7. Check the final status (should be IDLE again) +print_header "Checking final status (should be IDLE)" +# Give a second for the status to be updated post-callback +sleep 1 +curl -X GET "$BASE_URL/status" +echo "" + +echo "Test script finished." From 11f8184f1876220b6d75b0298c5e618e470671de Mon Sep 17 00:00:00 2001 From: M Q Date: Mon, 8 Sep 2025 22:48:19 -0700 Subject: [PATCH 02/21] Removed operator local fix and use newly released MONAI App SDK v3.5.1 release Signed-off-by: M Q --- .../restful_app/ai_spleen_seg_app/app.py | 3 +- .../ai_spleen_seg_app/requirements.txt | 2 +- .../dicom_series_to_volume_operator_local.py | 454 ------------------ platforms/aidoc/restful_app/requirements.txt | 2 +- 4 files changed, 3 insertions(+), 458 deletions(-) delete mode 100644 platforms/aidoc/restful_app/dicom_series_to_volume_operator_local.py diff --git a/platforms/aidoc/restful_app/ai_spleen_seg_app/app.py b/platforms/aidoc/restful_app/ai_spleen_seg_app/app.py index 488d00cc..5361c9b2 100644 --- a/platforms/aidoc/restful_app/ai_spleen_seg_app/app.py +++ b/platforms/aidoc/restful_app/ai_spleen_seg_app/app.py @@ -12,8 +12,6 @@ import logging from pathlib import Path -from dicom_series_to_volume_operator_local import DICOMSeriesToVolumeOperator - # Required for setting SegmentDescription attributes. Direct import as this is not part of App SDK package. from pydicom.sr.codedict import codes @@ -24,6 +22,7 @@ from monai.deploy.operators.dicom_data_loader_operator import DICOMDataLoaderOperator from monai.deploy.operators.dicom_seg_writer_operator import DICOMSegmentationWriterOperator, SegmentDescription from monai.deploy.operators.dicom_series_selector_operator import DICOMSeriesSelectorOperator +from monai.deploy.operators.dicom_series_to_volume_operator import DICOMSeriesToVolumeOperator # Use a local fixed version. from monai.deploy.operators.dicom_series_to_volume_operator import DICOMSeriesToVolumeOperator from monai.deploy.operators.monai_bundle_inference_operator import ( diff --git a/platforms/aidoc/restful_app/ai_spleen_seg_app/requirements.txt b/platforms/aidoc/restful_app/ai_spleen_seg_app/requirements.txt index d60890dd..210906dc 100644 --- a/platforms/aidoc/restful_app/ai_spleen_seg_app/requirements.txt +++ b/platforms/aidoc/restful_app/ai_spleen_seg_app/requirements.txt @@ -1,4 +1,4 @@ -monai-deploy-app-sdk>=3.0.0 +monai-deploy-app-sdk>=3.1.0 scikit-image>=0.17.2 pydicom>=2.3.0 highdicom>=0.18.2 diff --git a/platforms/aidoc/restful_app/dicom_series_to_volume_operator_local.py b/platforms/aidoc/restful_app/dicom_series_to_volume_operator_local.py deleted file mode 100644 index 30812768..00000000 --- a/platforms/aidoc/restful_app/dicom_series_to_volume_operator_local.py +++ /dev/null @@ -1,454 +0,0 @@ -# Copyright 2021-2025 MONAI Consortium -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import copy -import logging -import math -from typing import Dict, List, Union - -import numpy as np - -from monai.deploy.utils.importutil import optional_import - -apply_rescale, _ = optional_import("pydicom.pixels", name="apply_rescale") - -from monai.deploy.core import ConditionType, Fragment, Operator, OperatorSpec -from monai.deploy.core.domain.dicom_series_selection import StudySelectedSeries -from monai.deploy.core.domain.image import Image - - -class DICOMSeriesToVolumeOperator(Operator): - """This operator converts an instance of DICOMSeries into an Image object. - - The loaded Image Object can be used for further processing via other operators. - The data array will be a 3D image NumPy array with index order of `DHW`. - Channel is limited to 1 as of now, and `C` is absent in the NumPy array. - - Named Input: - study_selected_series_list: List of StudySelectedSeries. - Named Output: - image: Image object. - """ - - def __init__(self, fragment: Fragment, *args, **kwargs): - """Create an instance for a containing application object. - - Args: - fragment (Fragment): An instance of the Application class which is derived from Fragment. - """ - - self.input_name_series = "study_selected_series_list" - self.output_name_image = "image" - # Need to call the base class constructor last - super().__init__(fragment, *args, **kwargs) - - def setup(self, spec: OperatorSpec): - spec.input(self.input_name_series) - spec.output(self.output_name_image).condition(ConditionType.NONE) - - def compute(self, op_input, op_output, context): - """Performs computation for this operator and handles I/O.""" - - study_selected_series_list = op_input.receive(self.input_name_series) - - # TODO: need to get a solution to correctly annotate and consume multiple image outputs. - # For now, only supports the one and only one selected series. - image = self.convert_to_image(study_selected_series_list) - op_output.emit(image, self.output_name_image) - - def convert_to_image(self, study_selected_series_list: List[StudySelectedSeries]) -> Union[Image, None]: - """Extracts the pixel data from a DICOM Series and other attributes to create an Image object""" - # For now, only supports the one and only one selected series. - if not study_selected_series_list or len(study_selected_series_list) < 1: - raise ValueError("Missing expected input 'study_selected_series_list'") - - for study_selected_series in study_selected_series_list: - if not isinstance(study_selected_series, StudySelectedSeries): - raise ValueError("Element in input is not expected type, 'StudySelectedSeries'.") - selected_series = study_selected_series.selected_series[0] - dicom_series = selected_series.series - selection_name = selected_series.selection_name - self.prepare_series(dicom_series) - metadata = self.create_metadata(dicom_series) - - # Add to the metadata the DICOMStudy properties and selection metadata - metadata.update(self._get_instance_properties(study_selected_series.study)) - selection_metadata = {"selection_name": selection_name} - metadata.update(selection_metadata) - - voxel_data = self.generate_voxel_data(dicom_series) - image = self.create_volumetric_image(voxel_data, metadata) - - # Now it is time to assign the converted image to the SelectedSeries obj - selected_series.image = image - - # Break out since limited to one series/image for now - break - - # TODO: This needs to be updated once allowed to output multiple Image objects - return study_selected_series_list[0].selected_series[0].image - - def generate_voxel_data(self, series): - """Applies rescale slope and rescale intercept to the pixels. - - Supports monochrome image only for now. Photometric Interpretation attribute, - tag (0028,0004), is considered. Both MONOCHROME2 (IDENTITY) and MONOCHROME1 (INVERSE) - result in an output image where The minimum sample value is intended to be displayed as black. - - Args: - series: DICOM Series for which the pixel data needs to be extracted. - - Returns: - A 3D numpy tensor representing the volumetric data. - """ - slices = series.get_sop_instances() - # The sop_instance get_pixel_array() returns a 2D NumPy array with index order - # of `HW`. The pixel array of all instances will be stacked along the first axis, - # so the final 3D NumPy array will have index order of [DHW]. This is consistent - # with the NumPy array returned from the ITK GetArrayViewFromImage on the image - # loaded from the same DICOM series. - vol_data = np.stack([s.get_pixel_array() for s in slices], axis=0) - - # Use pydicom utility to apply a modality lookup table or rescale operator to the pixel array. - # The pydicom Dataset is required which can be obtained from the first slice's native SOP instance. - # If Modality LUT is present the return array is of np.uint8 or np.uint16, and if Rescale - # Intercept and Rescale Slope are present, np.float64. - # If the pixel array is already in the correct type, the return array is the same as the input array. - try: - native_sop = slices[0].get_native_sop_instance() - vol_data = apply_rescale(vol_data, native_sop) - except Exception as e: - logging.error(f"Failed to apply rescale to DICOM volume: {e}") - raise RuntimeError("Failed to apply rescale to DICOM volume.") from e - - # For now we support monochrome image only, for which DICOM Photometric Interpretation - # (0028,0004) has defined terms, MONOCHROME1 and MONOCHROME2, with the former being: - # Pixel data represent a single monochrome image plane. The minimum sample value is - # intended to be displayed as white after any VOI gray scale transformations have been - # performed. See PS3.4. This value may be used only when Samples per Pixel (0028,0002) - # has a value of 1. May be used for pixel data in a Native (uncompressed) or Encapsulated - # (compressed) format; see Section 8.2 in PS3.5. - # and for the latter "The minimum sample value is intended to be displayed as black" - # - # In this function, pixel data will be interpreted as if MONOCHROME2, hence inverting - # MONOCHROME1 for the final voxel data. - - photometric_interpretation = ( - slices[0].get_native_sop_instance().get("PhotometricInterpretation", "").strip().upper() - ) - presentation_lut_shape = slices[0].get_native_sop_instance().get("PresentationLUTShape", "").strip().upper() - - if not photometric_interpretation: - logging.warning("Cannot get value of attribute Photometric Interpretation.") - - if photometric_interpretation != "MONOCHROME2": - if photometric_interpretation == "MONOCHROME1" or presentation_lut_shape == "INVERSE": - logging.debug("Applying INVERSE transformation as required for MONOCHROME1 image.") - vol_data = np.amax(vol_data) - vol_data - else: - raise ValueError( - f"Cannot process pixel data with Photometric Interpretation of {photometric_interpretation}." - ) - - # NumPy's np.can_cast function, as of version 2.0, no longer supports Python scalars directly and - # does not apply value-based logic for 0-D arrays and NumPy scalars. - # The following can_cast calls are expecting the array is already of the correct type. - if vol_data.dtype == np.uint8: - logging.info("Rescaled pixel data is of type uint8.") - elif np.can_cast(vol_data, np.uint16, casting="safe"): - logging.info("Casting to uint16") - vol_data = vol_data.astype(dtype=np.uint16, casting="safe") - elif np.can_cast(vol_data, np.float32, casting="safe"): - logging.info("Casting to float32") - vol_data = vol_data.astype(dtype=np.float32, casting="safe") - else: - logging.info("Rescaled pixel data remains as of type float64.") - - return vol_data - - def create_volumetric_image(self, vox_data, metadata): - """Creates an instance of 3D image. - - Args: - vox_data: A numpy array representing the volumetric data. - metadata: DICOM attributes in a dictionary. - - Returns: - An instance of Image object. - """ - image = Image(vox_data, metadata) - return image - - def prepare_series(self, series): - """Computes the slice normal for each slice and then projects the first voxel of each - slice on that slice normal. - - It computes the distance of that point from the origin of the patient coordinate system along the slice normal. - It orders the slices in the series according to that distance. - - Args: - series: An instance of DICOMSeries. - """ - - if len(series._sop_instances) <= 1: - series.depth_pixel_spacing = 1.0 # Default to 1, e.g. for CR image, similar to (Simple) ITK - return - - slice_indices_to_be_removed = [] - depth_pixel_spacing = 0.0 - last_slice_normal = [0.0, 0.0, 0.0] - - for slice_index, slice in enumerate(series._sop_instances): - distance = 0.0 - point = [0.0, 0.0, 0.0] - slice_normal = [0.0, 0.0, 0.0] - slice_position = None - cosines = None - - try: - image_orientation_patient_de = slice[0x0020, 0x0037] - if image_orientation_patient_de is not None: - image_orientation_patient = image_orientation_patient_de.value - cosines = image_orientation_patient - except KeyError: - pass - - try: - image_poisition_patient_de = slice[0x0020, 0x0032] - if image_poisition_patient_de is not None: - image_poisition_patient = image_poisition_patient_de.value - slice_position = image_poisition_patient - except KeyError: - pass - - distance = 0.0 - - if (cosines is not None) and (slice_position is not None): - slice_normal[0] = cosines[1] * cosines[5] - cosines[2] * cosines[4] - slice_normal[1] = cosines[2] * cosines[3] - cosines[0] * cosines[5] - slice_normal[2] = cosines[0] * cosines[4] - cosines[1] * cosines[3] - - last_slice_normal = copy.deepcopy(slice_normal) - - i = 0 - while i < 3: - point[i] = slice_normal[i] * slice_position[i] - i += 1 - - distance += point[0] + point[1] + point[2] - - series._sop_instances[slice_index].distance = distance - series._sop_instances[slice_index].first_pixel_on_slice_normal = point - else: - logging.debug(f"Slice index to remove: {slice_index}") - slice_indices_to_be_removed.append(slice_index) - - logging.debug(f"Total slices before removal (if applicable): {len(series._sop_instances)}") - - # iterate in reverse order to avoid affecting subsequent indices after a deletion - for sl_index in sorted(slice_indices_to_be_removed, reverse=True): - del series._sop_instances[sl_index] - logging.info(f"Removed slice index: {sl_index}") - - logging.debug(f"Total slices after removal (if applicable): {len(series._sop_instances)}") - - series._sop_instances = sorted(series._sop_instances, key=lambda s: s.distance) - series.depth_direction_cosine = copy.deepcopy(last_slice_normal) - - if len(series._sop_instances) > 1: - p1 = series._sop_instances[0].first_pixel_on_slice_normal - p2 = series._sop_instances[1].first_pixel_on_slice_normal - depth_pixel_spacing = ( - (p1[0] - p2[0]) * (p1[0] - p2[0]) - + (p1[1] - p2[1]) * (p1[1] - p2[1]) - + (p1[2] - p2[2]) * (p1[2] - p2[2]) - ) - depth_pixel_spacing = math.sqrt(depth_pixel_spacing) - series.depth_pixel_spacing = depth_pixel_spacing - - s_1 = series._sop_instances[0] - s_n = series._sop_instances[-1] - num_slices = len(series._sop_instances) - self.compute_affine_transform(s_1, s_n, num_slices, series) - - def compute_affine_transform(self, s_1, s_n, n, series): - """Computes the affine transform for this series. It does it in both DICOM Patient oriented - coordinate system as well as the pne preferred by NIFTI standard. Accordingly, the two attributes - dicom_affine_transform and nifti_affine_transform are stored in the series instance. - - The Image Orientation Patient contains two triplets, [rx ry rz cx cy cz], which encode - direction cosines of the row and column of an image slice. The Image Position Patient of the first slice in - a volume, [x1 y1 z1], is the x, y, z coordinates of the upper-left corner voxel of the slice. These two - parameters define the location of the slice in PCS. To determine the location of a volume, the Image - Position Patient of another slice is normally needed. In practice, we tend to use the position of the last - slice in a volume, [xn yn zn]. The voxel size within the slice plane, [vr vc], is stored in object Pixel Spacing. - - Args: - s_1: A first slice in the series. - s_n: A last slice in the series. - n: A number of slices in the series. - series: An instance of DICOMSeries. - """ - - m1 = np.arange(1, 17, dtype=float).reshape(4, 4) - m2 = np.arange(1, 17, dtype=float).reshape(4, 4) - - image_orientation_patient = None - try: - image_orientation_patient_de = s_1[0x0020, 0x0037] - if image_orientation_patient_de is not None: - image_orientation_patient = image_orientation_patient_de.value - except KeyError: - pass - rx = image_orientation_patient[0] - ry = image_orientation_patient[1] - rz = image_orientation_patient[2] - cx = image_orientation_patient[3] - cy = image_orientation_patient[4] - cz = image_orientation_patient[5] - - vr = 0.0 - vc = 0.0 - try: - pixel_spacing_de = s_1[0x0028, 0x0030] - if pixel_spacing_de is not None: - vr = pixel_spacing_de.value[0] - vc = pixel_spacing_de.value[1] - except KeyError: - pass - - x1 = 0.0 - y1 = 0.0 - z1 = 0.0 - - xn = 0.0 - yn = 0.0 - zn = 0.0 - - ip1 = None - ip2 = None - try: - ip1_de = s_1[0x0020, 0x0032] - ipn_de = s_n[0x0020, 0x0032] - ip1 = ip1_de.value - ipn = ipn_de.value - - except KeyError: - pass - - x1 = ip1[0] - y1 = ip1[1] - z1 = ip1[2] - - xn = ipn[0] - yn = ipn[1] - zn = ipn[2] - - m1[0, 0] = rx * vr - m1[0, 1] = cx * vc - m1[0, 2] = (xn - x1) / (n - 1) - m1[0, 3] = x1 - - m1[1, 0] = ry * vr - m1[1, 1] = cy * vc - m1[1, 2] = (yn - y1) / (n - 1) - m1[1, 3] = y1 - - m1[2, 0] = rz * vr - m1[2, 1] = cz * vc - m1[2, 2] = (zn - z1) / (n - 1) - m1[2, 3] = z1 - - m1[3, 0] = 0 - m1[3, 1] = 0 - m1[3, 2] = 0 - m1[3, 3] = 1 - - series.dicom_affine_transform = m1 - - m2[0, 0] = -rx * vr - m2[0, 1] = -cx * vc - m2[0, 2] = -(xn - x1) / (n - 1) - m2[0, 3] = -x1 - - m2[1, 0] = -ry * vr - m2[1, 1] = -cy * vc - m2[1, 2] = -(yn - y1) / (n - 1) - m2[1, 3] = -y1 - - m2[2, 0] = rz * vr - m2[2, 1] = cz * vc - m2[2, 2] = (zn - z1) / (n - 1) - m2[2, 3] = z1 - - m2[3, 0] = 0 - m2[3, 1] = 0 - m2[3, 2] = 0 - m2[3, 3] = 1 - - series.nifti_affine_transform = m2 - - def create_metadata(self, series) -> Dict: - """Collects all relevant metadata from the DICOM Series and creates a dictionary. - - Args: - series: An instance of DICOMSeries. - - Returns: - An instance of a dictionary containing metadata for the volumetric image. - """ - - # Set metadata with series properties that are not None. - metadata = {} - if series: - metadata = self._get_instance_properties(series) - return metadata - - @staticmethod - def _get_instance_properties(obj: object, not_none: bool = True) -> Dict: - prop_dict = {} - if obj: - for attribute in [x for x in type(obj).__dict__ if isinstance(type(obj).__dict__[x], property)]: - attr_val = getattr(obj, attribute, None) - if not_none: - if attr_val is not None: - prop_dict[attribute] = attr_val - else: - prop_dict[attribute] = attr_val - - return prop_dict - - -def test(): - from pathlib import Path - - from monai.deploy.operators.dicom_data_loader_operator import DICOMDataLoaderOperator - from monai.deploy.operators.dicom_series_selector_operator import DICOMSeriesSelectorOperator - - current_file_dir = Path(__file__).parent.resolve() - data_path = current_file_dir.joinpath("../../../inputs/spleen_ct/dcm").absolute() - - fragment = Fragment() - loader = DICOMDataLoaderOperator(fragment, name="loader_op") - series_selector = DICOMSeriesSelectorOperator(fragment, name="selector_op") - vol_op = DICOMSeriesToVolumeOperator(fragment, name="series_to_vol_op") - - study_list = loader.load_data_to_studies(data_path) - study_selected_series_list = series_selector.filter(None, study_list) - image = vol_op.convert_to_image(study_selected_series_list) - - print(f"Image NumPy array shape (index order DHW): {image.asnumpy().shape}") - for k, v in image.metadata().items(): - print(f"{(k)}: {(v)}") - - -if __name__ == "__main__": - test() diff --git a/platforms/aidoc/restful_app/requirements.txt b/platforms/aidoc/restful_app/requirements.txt index bd91f047..4970ddd5 100644 --- a/platforms/aidoc/restful_app/requirements.txt +++ b/platforms/aidoc/restful_app/requirements.txt @@ -1,4 +1,4 @@ -monai-deploy-app-sdk>=3.0.0 +monai-deploy-app-sdk>=3.1.0 scikit-image>=0.17.2 pydicom>=2.3.0 highdicom>=0.18.2 From cdc8b0e196d5858b74808c7d064915e2ba25c4b0 Mon Sep 17 00:00:00 2001 From: M Q Date: Wed, 10 Sep 2025 15:59:23 -0700 Subject: [PATCH 03/21] Removed local copy of operator that had a temp fix Signed-off-by: M Q --- .../restful_app/ai_spleen_seg_app/__init__.py | 2 +- .../restful_app/ai_spleen_seg_app/__main__.py | 2 +- .../restful_app/ai_spleen_seg_app/app.py | 4 ++-- .../restful_app/ai_spleen_seg_app/app.yaml | 2 +- .../ai_spleen_seg_app/reporter_operator.py | 3 ++- platforms/aidoc/restful_app/app.py | 22 +++++++++++++------ 6 files changed, 22 insertions(+), 13 deletions(-) diff --git a/platforms/aidoc/restful_app/ai_spleen_seg_app/__init__.py b/platforms/aidoc/restful_app/ai_spleen_seg_app/__init__.py index 526cee59..e8c9b358 100644 --- a/platforms/aidoc/restful_app/ai_spleen_seg_app/__init__.py +++ b/platforms/aidoc/restful_app/ai_spleen_seg_app/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2021-2023 MONAI Consortium +# Copyright 2021-2025 MONAI Consortium # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at diff --git a/platforms/aidoc/restful_app/ai_spleen_seg_app/__main__.py b/platforms/aidoc/restful_app/ai_spleen_seg_app/__main__.py index 2d67f364..27254d89 100644 --- a/platforms/aidoc/restful_app/ai_spleen_seg_app/__main__.py +++ b/platforms/aidoc/restful_app/ai_spleen_seg_app/__main__.py @@ -1,4 +1,4 @@ -# Copyright 2021-2023 MONAI Consortium +# Copyright 2021-2025 MONAI Consortium # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at diff --git a/platforms/aidoc/restful_app/ai_spleen_seg_app/app.py b/platforms/aidoc/restful_app/ai_spleen_seg_app/app.py index 5361c9b2..5d8a2a98 100644 --- a/platforms/aidoc/restful_app/ai_spleen_seg_app/app.py +++ b/platforms/aidoc/restful_app/ai_spleen_seg_app/app.py @@ -1,4 +1,4 @@ -# Copyright 2021-2023 MONAI Consortium +# Copyright 2021-2025 MONAI Consortium # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at @@ -14,6 +14,7 @@ # Required for setting SegmentDescription attributes. Direct import as this is not part of App SDK package. from pydicom.sr.codedict import codes +from reporter_operator import ExecutionStatusReporterOperator from monai.deploy.conditions import CountCondition from monai.deploy.core import AppContext, Application @@ -31,7 +32,6 @@ MonaiBundleInferenceOperator, ) from monai.deploy.operators.stl_conversion_operator import STLConversionOperator -from reporter_operator import ExecutionStatusReporterOperator # @resource(cpu=1, gpu=1, memory="7Gi") diff --git a/platforms/aidoc/restful_app/ai_spleen_seg_app/app.yaml b/platforms/aidoc/restful_app/ai_spleen_seg_app/app.yaml index 390ce7ee..cbe95c9e 100644 --- a/platforms/aidoc/restful_app/ai_spleen_seg_app/app.yaml +++ b/platforms/aidoc/restful_app/ai_spleen_seg_app/app.yaml @@ -1,5 +1,5 @@ %YAML 1.2 -# SPDX-FileCopyrightText: Copyright (c) 2022-2023 MONAI. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2022-2025 MONAI. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/platforms/aidoc/restful_app/ai_spleen_seg_app/reporter_operator.py b/platforms/aidoc/restful_app/ai_spleen_seg_app/reporter_operator.py index 365ade75..b62a4b86 100644 --- a/platforms/aidoc/restful_app/ai_spleen_seg_app/reporter_operator.py +++ b/platforms/aidoc/restful_app/ai_spleen_seg_app/reporter_operator.py @@ -10,6 +10,7 @@ # limitations under the License. import logging + from monai.deploy.core import ( Application, ConditionType, @@ -18,8 +19,8 @@ InputContext, IOType, Operator, - OutputContext, OperatorSpec, + OutputContext, ) diff --git a/platforms/aidoc/restful_app/app.py b/platforms/aidoc/restful_app/app.py index 4361a5e6..36f57e89 100644 --- a/platforms/aidoc/restful_app/app.py +++ b/platforms/aidoc/restful_app/app.py @@ -1,10 +1,21 @@ +# Copyright 2025 MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import argparse import importlib import logging import os import sys import threading from http import HTTPStatus -import argparse import requests from flask import Flask, jsonify, request @@ -65,8 +76,7 @@ def app_status_callback(summary): os.environ["MONAI_INPUTPATH"] = input_folder os.environ["MONAI_OUTPUTPATH"] = output_folder os.environ["HOLOSCAN_INPUT_PATH"] = input_folder # For Holoscan-based apps - os.environ["HOLOSCAN_OUTPUT_PATH"] = output_folder # For Holoscan-based apps - + os.environ["HOLOSCAN_OUTPUT_PATH"] = output_folder # For Holoscan-based apps # Dynamically import the application class from the specified module. logging.info(f"Loading application: {APP_MODULE_NAME}.{APP_CLASS_NAME}") @@ -110,9 +120,7 @@ def process(): callback_url = data.get("callback_url") # Callback URL is optional # Start the processing in a background thread. - thread = threading.Thread( - target=run_processing, args=(input_folder, output_folder, callback_url) - ) + thread = threading.Thread(target=run_processing, args=(input_folder, output_folder, callback_url)) thread.start() return jsonify({"message": "Processing started."}), HTTPStatus.ACCEPTED @@ -136,4 +144,4 @@ def process(): args = parser.parse_args() host = args.host or os.environ.get("FLASK_HOST", "0.0.0.0") port = args.port or int(os.environ.get("FLASK_PORT", 5000)) - app.run(host=host, port=port) \ No newline at end of file + app.run(host=host, port=port) From 3164908b3d84c72146c74245800dc3799f058990 Mon Sep 17 00:00:00 2001 From: M Q Date: Thu, 18 Sep 2025 18:35:52 -0700 Subject: [PATCH 04/21] Add proper callback message Signed-off-by: M Q --- platforms/aidoc/README.md | 4 +- .../restful_app/ai_spleen_seg_app/__init__.py | 4 + .../restful_app/ai_spleen_seg_app/__main__.py | 2 +- .../restful_app/ai_spleen_seg_app/app.py | 88 ++++++++++++++++--- .../ai_spleen_seg_app/reporter_operator.py | 65 -------------- platforms/aidoc/restful_app/app.py | 27 +++--- .../aidoc/restful_app/env_settings_repo.sh | 8 +- platforms/aidoc/restful_app/requirements.txt | 8 +- 8 files changed, 108 insertions(+), 98 deletions(-) delete mode 100644 platforms/aidoc/restful_app/ai_spleen_seg_app/reporter_operator.py diff --git a/platforms/aidoc/README.md b/platforms/aidoc/README.md index 8c1d0632..8e44359e 100644 --- a/platforms/aidoc/README.md +++ b/platforms/aidoc/README.md @@ -66,7 +66,7 @@ Open another console window and change directory to the same as this file. Set the environment vars so that the test script can get the input DCM and write the callback contents. Also, once the Restful app completes each processing, the Spleen Seg app's output will also be saved in -the output folder speficied below (the script passes the output folder via the Rest API). +the output folder specified below (the script passes the output folder via the Rest API). ``` export HOLOSCAN_INPUT_PATH=dcm @@ -79,7 +79,7 @@ Run the test script, and examine its console output. source test_endpoints.sh ``` -Once the script completes, examine the `output` folder, which should conatain the following (dcm file +Once the script completes, examine the `output` folder, which should contain the following (dcm file name will be different) ``` diff --git a/platforms/aidoc/restful_app/ai_spleen_seg_app/__init__.py b/platforms/aidoc/restful_app/ai_spleen_seg_app/__init__.py index e8c9b358..0ba7a8fc 100644 --- a/platforms/aidoc/restful_app/ai_spleen_seg_app/__init__.py +++ b/platforms/aidoc/restful_app/ai_spleen_seg_app/__init__.py @@ -12,7 +12,11 @@ import os import sys +from .app import AISpleenSegApp + _current_dir = os.path.abspath(os.path.dirname(__file__)) if sys.path and os.path.abspath(sys.path[0]) != _current_dir: sys.path.insert(0, _current_dir) del _current_dir + +__all__ = ["AISpleenSegApp"] diff --git a/platforms/aidoc/restful_app/ai_spleen_seg_app/__main__.py b/platforms/aidoc/restful_app/ai_spleen_seg_app/__main__.py index 27254d89..09e42b00 100644 --- a/platforms/aidoc/restful_app/ai_spleen_seg_app/__main__.py +++ b/platforms/aidoc/restful_app/ai_spleen_seg_app/__main__.py @@ -11,7 +11,7 @@ import logging -from app import AISpleenSegApp +from .app import AISpleenSegApp if __name__ == "__main__": logging.info(f"Begin {__name__}") diff --git a/platforms/aidoc/restful_app/ai_spleen_seg_app/app.py b/platforms/aidoc/restful_app/ai_spleen_seg_app/app.py index 5d8a2a98..35965213 100644 --- a/platforms/aidoc/restful_app/ai_spleen_seg_app/app.py +++ b/platforms/aidoc/restful_app/ai_spleen_seg_app/app.py @@ -9,12 +9,14 @@ # See the License for the specific language governing permissions and # limitations under the License. +import json import logging +import os from pathlib import Path +from typing import List, Union # Required for setting SegmentDescription attributes. Direct import as this is not part of App SDK package. from pydicom.sr.codedict import codes -from reporter_operator import ExecutionStatusReporterOperator from monai.deploy.conditions import CountCondition from monai.deploy.core import AppContext, Application @@ -33,10 +35,15 @@ ) from monai.deploy.operators.stl_conversion_operator import STLConversionOperator +from .results_message import ( + AggregatedResults, + AlgorithmClass, + DetailedResult, + MeasurementResult, + Results, +) + -# @resource(cpu=1, gpu=1, memory="7Gi") -# pip_packages can be a string that is a path(str) to requirements.txt file or a list of packages. -# The monai pkg is not required by this class, instead by the included operators. class AISpleenSegApp(Application): """Demonstrates inference with built-in MONAI Bundle inference operator with DICOM files as input/output @@ -57,13 +64,68 @@ def __init__(self, *args, status_callback=None, **kwargs): """Creates an application instance.""" self._logger = logging.getLogger("{}.{}".format(__name__, type(self).__name__)) self._status_callback = status_callback + self._app_input_path = None # to be set in compose + self._app_output_path = None # to be set in compose super().__init__(*args, **kwargs) + def _get_files_in_folder(self, folder_path: Union[str, Path]) -> List[str]: + """Traverses a folder and returns a list of full paths of all files. + + Args: + folder_path (Union[str, Path]): The path to the folder to traverse. + + Returns: + List[str]: A list of absolute paths to the files in the folder. + """ + if not os.path.isdir(folder_path): + self._logger.warning(f"Output folder '{folder_path}' not found, returning empty file list.") + return [] + + file_paths = [] + for root, _, files in os.walk(folder_path): + for file in files: + full_path = os.path.abspath(os.path.join(root, file)) + file_paths.append(full_path) + return file_paths + def run(self, *args, **kwargs): # This method calls the base class to run. Can be omitted if simply calling through. self._logger.info(f"Begin {self.run.__name__}") - # The try...except block is removed as the reporter operator will handle status reporting. - super().run(*args, **kwargs) + try: + super().run(*args, **kwargs) + + if self._status_callback: + # Create the results object using the Pydantic models + ai_results = Results( + aggregated_results=AggregatedResults( + name="Spleen Segmentation", + algorithm_class={AlgorithmClass.MEASUREMENT}, + ), + detailed_results={ + "Spleen Segmentation": DetailedResult( + measurement=MeasurementResult( + measurements_text="Spleen segmentation completed successfully.", + ) + ) + }, + ) + + output_files = self._get_files_in_folder(self._app_output_path) + + callback_msg_dict = { + "run_success": True, + "output_files": output_files, + "error_message": None, + "error_code": None, + "result": ai_results.model_dump_json(), + } + self._status_callback(json.dumps(callback_msg_dict)) + + except Exception as e: + self._logger.error(f"Error in {self.run.__name__}: {e}") + # Let the caller to handle and report the error + raise e + self._logger.info(f"End {self.run.__name__}") def compose(self): @@ -73,12 +135,12 @@ def compose(self): # Use Commandline options over environment variables to init context. app_context: AppContext = Application.init_app_context(self.argv) - app_input_path = Path(app_context.input_path) - app_output_path = Path(app_context.output_path) + self._app_input_path = Path(app_context.input_path) + self._app_output_path = Path(app_context.output_path) # Create the custom operator(s) as well as SDK built-in operator(s). study_loader_op = DICOMDataLoaderOperator( - self, CountCondition(self, 1), input_folder=app_input_path, name="study_loader_op" + self, CountCondition(self, 1), input_folder=self._app_input_path, name="study_loader_op" ) series_selector_op = DICOMSeriesSelectorOperator(self, rules=Sample_Rules_Text, name="series_selector_op") series_to_vol_op = DICOMSeriesToVolumeOperator(self, name="series_to_vol_op") @@ -122,11 +184,11 @@ def compose(self): self, segment_descriptions=segment_descriptions, custom_tags=custom_tags, - output_folder=app_output_path, + output_folder=self._app_output_path, name="dicom_seg_writer", ) - reporter_op = ExecutionStatusReporterOperator(self, status_callback=self._status_callback) + # reporter_op = ExecutionStatusReporterOperator(self, status_callback=self._status_callback) # Create the processing pipeline, by specifying the source and destination operators, and # ensuring the output from the former matches the input of the latter, in both name and type. @@ -143,13 +205,13 @@ def compose(self): # Create the surface mesh STL conversion operator and add it to the app execution flow, if needed, by # uncommenting the following couple lines. stl_conversion_op = STLConversionOperator( - self, output_file=app_output_path.joinpath("stl/spleen.stl"), name="stl_conversion_op" + self, output_file=self._app_output_path.joinpath("stl/spleen.stl"), name="stl_conversion_op" ) self.add_flow(bundle_spleen_seg_op, stl_conversion_op, {("pred", "image")}) # Connect the reporter operator to the end of the pipeline. # It will be triggered after the DICOM SEG file is written. - self.add_flow(stl_conversion_op, reporter_op, {("stl_bytes", "data")}) + # self.add_flow(stl_conversion_op, reporter_op, {("stl_bytes", "data")}) logging.info(f"End {self.compose.__name__}") diff --git a/platforms/aidoc/restful_app/ai_spleen_seg_app/reporter_operator.py b/platforms/aidoc/restful_app/ai_spleen_seg_app/reporter_operator.py deleted file mode 100644 index b62a4b86..00000000 --- a/platforms/aidoc/restful_app/ai_spleen_seg_app/reporter_operator.py +++ /dev/null @@ -1,65 +0,0 @@ -# Copyright 2025 MONAI Consortium -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import logging - -from monai.deploy.core import ( - Application, - ConditionType, - ExecutionContext, - Fragment, - InputContext, - IOType, - Operator, - OperatorSpec, - OutputContext, -) - - -class ExecutionStatusReporterOperator(Operator): - """ - This operator reports the execution status of the application via a callback. - It is intended to be the last operator in the application's workflow. - """ - - def __init__(self, fragment: Fragment, *args, status_callback, **kwargs): - """ - Args: - fragment (Fragment): An instance of the Application class. - status_callback (callable): The callback function to invoke with the status. - """ - self._logger = logging.getLogger("{}.{}".format(__name__, type(self).__name__)) - self._status_callback = status_callback - super().__init__(fragment, *args, **kwargs) - - def setup(self, spec: OperatorSpec): - spec.input("data") - spec.output("data").condition(ConditionType.NONE) - - def compute(self, op_input, op_output, context): - """ - Receives data from the upstream operator and invokes the status callback. - """ - # For now, we are not doing anything with the input data or collecting logs. - # We will just report success. - # In the future, this is where log collection and summary generation would happen. - try: - # In a real implementation, you would gather data here. - op_input.receive("data") - summary = {"status": "Success", "message": "Application completed successfully."} - if self._status_callback: - self._status_callback(summary) - op_output.emit(summary, "data") - except Exception as e: - self._logger.error(f"Error in status reporter: {e}") - if self._status_callback: - error_summary = {"status": "Failure", "message": f"Application failed with error: {e}"} - self._status_callback(error_summary) diff --git a/platforms/aidoc/restful_app/app.py b/platforms/aidoc/restful_app/app.py index 36f57e89..11fb3f17 100644 --- a/platforms/aidoc/restful_app/app.py +++ b/platforms/aidoc/restful_app/app.py @@ -11,6 +11,7 @@ import argparse import importlib +import json import logging import os import sys @@ -23,11 +24,12 @@ # The MONAI Deploy application to be wrapped. # This can be changed to any other application in the repository. # Provide the module path and the class name of the application. -APP_MODULE_NAME = "ai_spleen_seg_app.app" + +APP_MODULE_NAME = "ai_spleen_seg_app" APP_CLASS_NAME = "AISpleenSegApp" # Flask application setup -app = Flask(__name__) +restful_app = Flask(__name__) logging.basicConfig(stream=sys.stdout, level=logging.INFO) # Global state to track processing status. A lock is used for thread safety. @@ -54,7 +56,7 @@ def run_processing(input_folder, output_folder, callback_url): """ # Define the callback function that the MONAI Deploy app will call. - def app_status_callback(summary): + def app_status_callback(summary: str): """Callback function to handle the final status from the application.""" logging.info(f"Received status from application: {summary}") if callback_url: @@ -62,7 +64,7 @@ def app_status_callback(summary): logging.info(f"Sending final status callback to {callback_url}") # Here you could map the summary to the expected format of the callback. # For now, we'll just forward the summary. - requests.post(callback_url, json=summary, timeout=5) + requests.post(callback_url, data=summary, timeout=5) logging.info("Sent final status callback.") except Exception as e: logging.error(f"Failed to send callback to {callback_url}: {e}") @@ -84,28 +86,33 @@ def app_status_callback(summary): app_class = getattr(module, APP_CLASS_NAME) monai_app = app_class(status_callback=app_status_callback) - # Run the MONAI Deploy application. + # Run the MONAI Deploy application which calls the callback if successful. logging.info("Running the MONAI Deploy application.") monai_app.run() logging.info("Processing completed successfully.") except Exception as e: logging.error(f"An error occurred during processing: {e}") - # If the app fails to even start, we need to report a failure. - app_status_callback({"status": "Failure", "message": f"Application failed to run: {e}"}) + # If the app fails, we need to handle it here and report a failure. + callback_msg = { + "run_success": False, + "error_message": f"Error during processing: {e}", + "error_code": 500, + } + app_status_callback(json.dumps(callback_msg)) finally: set_processing_status("IDLE") logging.info("Processor is now IDLE.") -@app.route("/status", methods=["GET"]) +@restful_app.route("/status", methods=["GET"]) def status(): """Endpoint to check the current processing status.""" return jsonify({"status": get_processing_status()}) -@app.route("/process", methods=["POST"]) +@restful_app.route("/process", methods=["POST"]) def process(): """Endpoint to start a new processing job.""" if get_processing_status() == "BUSY": @@ -144,4 +151,4 @@ def process(): args = parser.parse_args() host = args.host or os.environ.get("FLASK_HOST", "0.0.0.0") port = args.port or int(os.environ.get("FLASK_PORT", 5000)) - app.run(host=host, port=port) + restful_app.run(host=host, port=port) diff --git a/platforms/aidoc/restful_app/env_settings_repo.sh b/platforms/aidoc/restful_app/env_settings_repo.sh index aa506f2b..a1aa932c 100755 --- a/platforms/aidoc/restful_app/env_settings_repo.sh +++ b/platforms/aidoc/restful_app/env_settings_repo.sh @@ -1,5 +1,5 @@ #!/bin/bash -export HOLOSCAN_INPUT_PATH="$(pwd)/../monai-deploy-app-sdk/inputs/spleen_ct_tcia" -export HOLOSCAN_MODEL_PATH="$(pwd)/../monai-deploy-app-sdk/models/spleen_ct" -export HOLOSCAN_OUTPUT_PATH="$(pwd)/output_spleen" -export HOLOSCAN_LOG_LEVEL=INFO \ No newline at end of file +export HOLOSCAN_INPUT_PATH="/home/mqin/src//monai-deploy-app-sdk/inputs/spleen_ct_tcia" +export HOLOSCAN_MODEL_PATH="/home/mqin/src//monai-deploy-app-sdk/models/spleen_ct" +export HOLOSCAN_OUTPUT_PATH="./output_spleen" +export HOLOSCAN_LOG_LEVEL=DEBUG \ No newline at end of file diff --git a/platforms/aidoc/restful_app/requirements.txt b/platforms/aidoc/restful_app/requirements.txt index 4970ddd5..ec839c18 100644 --- a/platforms/aidoc/restful_app/requirements.txt +++ b/platforms/aidoc/restful_app/requirements.txt @@ -7,8 +7,10 @@ Pillow>=8.0.0 numpy-stl>=2.12.0 trimesh>=3.8.11 nibabel>=3.2.1 -torch>=1.12.0 -monai>=1.0.0 +torch>=2.4.1 +monai>=1.5.0 Flask==2.2.2 requests>=2.32 -Werkzeug==2.2.3 \ No newline at end of file +types-requests>=2.32.0 +Werkzeug==2.2.3 +pydantic>=2.9.0 From ed95fb16caca7d618802a314b601b2869068ac7e Mon Sep 17 00:00:00 2001 From: M Q Date: Thu, 18 Sep 2025 19:54:00 -0700 Subject: [PATCH 05/21] Proper json string and security fix Signed-off-by: M Q --- .../restful_app/ai_spleen_seg_app/app.py | 5 +++- platforms/aidoc/restful_app/app.py | 26 +++++++++++++++---- platforms/aidoc/restful_app/requirements.txt | 1 + 3 files changed, 26 insertions(+), 6 deletions(-) diff --git a/platforms/aidoc/restful_app/ai_spleen_seg_app/app.py b/platforms/aidoc/restful_app/ai_spleen_seg_app/app.py index 35965213..1bbc4185 100644 --- a/platforms/aidoc/restful_app/ai_spleen_seg_app/app.py +++ b/platforms/aidoc/restful_app/ai_spleen_seg_app/app.py @@ -117,8 +117,11 @@ def run(self, *args, **kwargs): "output_files": output_files, "error_message": None, "error_code": None, - "result": ai_results.model_dump_json(), } + + # Need to use pydantic function to dump to string and then reload to dict + # because for some reason direct dumping to dict did not work well + callback_msg_dict["result"] = json.loads(ai_results.model_dump_json()) self._status_callback(json.dumps(callback_msg_dict)) except Exception as e: diff --git a/platforms/aidoc/restful_app/app.py b/platforms/aidoc/restful_app/app.py index 11fb3f17..38164f2c 100644 --- a/platforms/aidoc/restful_app/app.py +++ b/platforms/aidoc/restful_app/app.py @@ -20,6 +20,7 @@ import requests from flask import Flask, jsonify, request +from flask_wtf.csrf import CSRFProtect # The MONAI Deploy application to be wrapped. # This can be changed to any other application in the repository. @@ -29,7 +30,13 @@ APP_CLASS_NAME = "AISpleenSegApp" # Flask application setup -restful_app = Flask(__name__) +app = Flask(__name__) +# It is recommended to use a securely generated random string for the secret key, +# and store it in an environment variable or a secure configuration file. +app.config["SECRET_KEY"] = os.environ.get("FLASK_SECRET_KEY", "a-secure-default-secret-key-for-dev") +csrf = CSRFProtect(app) + + logging.basicConfig(stream=sys.stdout, level=logging.INFO) # Global state to track processing status. A lock is used for thread safety. @@ -64,8 +71,16 @@ def app_status_callback(summary: str): logging.info(f"Sending final status callback to {callback_url}") # Here you could map the summary to the expected format of the callback. # For now, we'll just forward the summary. - requests.post(callback_url, data=summary, timeout=5) + response = requests.post(callback_url, data=summary, timeout=5) + response.raise_for_status() # for bad status codes (4xx or 5xx) logging.info("Sent final status callback.") + + except requests.exceptions.Timeout: + logging.error("The request timed out.") + except requests.exceptions.ConnectionError: + logging.error("A connection error occurred.") + except requests.exceptions.RequestException as e: + logging.error(f"An unexpected error occurred: {e}") except Exception as e: logging.error(f"Failed to send callback to {callback_url}: {e}") @@ -106,13 +121,14 @@ def app_status_callback(summary: str): logging.info("Processor is now IDLE.") -@restful_app.route("/status", methods=["GET"]) +@app.route("/status", methods=["GET"]) def status(): """Endpoint to check the current processing status.""" return jsonify({"status": get_processing_status()}) -@restful_app.route("/process", methods=["POST"]) +@app.route("/process", methods=["POST"]) +@csrf.exempt def process(): """Endpoint to start a new processing job.""" if get_processing_status() == "BUSY": @@ -151,4 +167,4 @@ def process(): args = parser.parse_args() host = args.host or os.environ.get("FLASK_HOST", "0.0.0.0") port = args.port or int(os.environ.get("FLASK_PORT", 5000)) - restful_app.run(host=host, port=port) + app.run(host=host, port=port) diff --git a/platforms/aidoc/restful_app/requirements.txt b/platforms/aidoc/restful_app/requirements.txt index ec839c18..315c1a7c 100644 --- a/platforms/aidoc/restful_app/requirements.txt +++ b/platforms/aidoc/restful_app/requirements.txt @@ -14,3 +14,4 @@ requests>=2.32 types-requests>=2.32.0 Werkzeug==2.2.3 pydantic>=2.9.0 +Flask-WTF>=1.0 From aee7695788e4cf3b00d23cdd571994d0294134db Mon Sep 17 00:00:00 2001 From: M Q Date: Thu, 18 Sep 2025 20:49:13 -0700 Subject: [PATCH 06/21] Fix complaints Signed-off-by: M Q --- platforms/aidoc/env_settings.sh | 5 +++++ platforms/aidoc/restful_app/ai_spleen_seg_app/app.py | 2 +- platforms/aidoc/restful_app/env_settings_repo.sh | 5 ----- 3 files changed, 6 insertions(+), 6 deletions(-) create mode 100755 platforms/aidoc/env_settings.sh delete mode 100755 platforms/aidoc/restful_app/env_settings_repo.sh diff --git a/platforms/aidoc/env_settings.sh b/platforms/aidoc/env_settings.sh new file mode 100755 index 00000000..24f5f44c --- /dev/null +++ b/platforms/aidoc/env_settings.sh @@ -0,0 +1,5 @@ +#!/bin/bash +export HOLOSCAN_MODEL_PATH=models +export HOLOSCAN_INPUT_PATH=dcm +export HOLOSCAN_OUTPUT_PATH=output_restful_app +export HOLOSCAN_LOG_LEVEL=INFO diff --git a/platforms/aidoc/restful_app/ai_spleen_seg_app/app.py b/platforms/aidoc/restful_app/ai_spleen_seg_app/app.py index 1bbc4185..e5a5977a 100644 --- a/platforms/aidoc/restful_app/ai_spleen_seg_app/app.py +++ b/platforms/aidoc/restful_app/ai_spleen_seg_app/app.py @@ -78,7 +78,7 @@ def _get_files_in_folder(self, folder_path: Union[str, Path]) -> List[str]: List[str]: A list of absolute paths to the files in the folder. """ if not os.path.isdir(folder_path): - self._logger.warning(f"Output folder '{folder_path}' not found, returning empty file list.") + self._logger.warning(f"Output folder {folder_path!r} not found, returning empty file list.") return [] file_paths = [] diff --git a/platforms/aidoc/restful_app/env_settings_repo.sh b/platforms/aidoc/restful_app/env_settings_repo.sh deleted file mode 100755 index a1aa932c..00000000 --- a/platforms/aidoc/restful_app/env_settings_repo.sh +++ /dev/null @@ -1,5 +0,0 @@ -#!/bin/bash -export HOLOSCAN_INPUT_PATH="/home/mqin/src//monai-deploy-app-sdk/inputs/spleen_ct_tcia" -export HOLOSCAN_MODEL_PATH="/home/mqin/src//monai-deploy-app-sdk/models/spleen_ct" -export HOLOSCAN_OUTPUT_PATH="./output_spleen" -export HOLOSCAN_LOG_LEVEL=DEBUG \ No newline at end of file From 2967b912dc82b6e1113e0c8fd7416c723967cf3a Mon Sep 17 00:00:00 2001 From: M Q Date: Thu, 18 Sep 2025 22:23:06 -0700 Subject: [PATCH 07/21] Updated readme Signed-off-by: M Q --- platforms/aidoc/README.md | 123 ++++++++++++++++++++++++++++++++++---- 1 file changed, 111 insertions(+), 12 deletions(-) diff --git a/platforms/aidoc/README.md b/platforms/aidoc/README.md index 8e44359e..e0a058bf 100644 --- a/platforms/aidoc/README.md +++ b/platforms/aidoc/README.md @@ -1,16 +1,100 @@ -# RESTful Wrapper Application for MONAI Deploy +# Creating REST Service with MONAI Deploy Application -This application provides a RESTful web interface to run MONAI Deploy applications. +This application provides an example of how to make a MONAI Deploy app run as a REST service on [Aidoc](https://www.aidoc.com/) platform. It is compliant with its [third party integration API](https://ai-partner-sdk.aidoc-cloud.com/prod/api/third-parties/doc/#), and the results [callback message schema](https://ai-partner-sdk.aidoc-cloud.com/prod/api/aidoc-callback/doc/#). -It allows you to start a processing job, check the status, and receive a callback when the job is complete. +This example uses a subset of the callback message attributes, covering only the required ones as well as some common attributes. For the full message definition, please contact Aidoc directly. -As it stands now, the callback message content is stubbed/generated in the wrapper app, and this will change to the design -where the wrapper app will pass a static callback function to the MONAI Deploy app which will have a reporter operator -that gathers the operations and domain specific info in the app's pipeline and then reports back the content via -this callback. The wrapper app will then have a mapping function to transform the reported data to that expected by -the external callback endpoint. +## High Level Design -Also, the whole Restful application can be packaged into a container image using MONAI Deploy app packager, but not doner here. +The high-level design of this REST service involves a few key components: + +1. **MONAI Deploy Application**: The core AI logic is encapsulated in a standard MONAI Deploy application (e.g., `AISpleenSegApp`), which is built and tested as a regular containerized workload. +2. **RESTful Service**: A lightweight RESTful application, built using Flask, acts as the front-end. It exposes endpoints to start and check the status of a processing job. +3. **Request Handling**: + - When the RESTful service receives a request to process data, it handles only one request at a time, as per the API specification. + - It creates an instance of the MONAI Deploy application. + - It sets the necessary environment variables for the input and output folders. + - Crucially, it delegates the execution of the MONAI Deploy application to a separate background thread to avoid blocking the web server. +4. **Callback Mechanism**: + - The callback message, which includes the AI results and a list of output files, is generated within the MONAI Deploy application at the end of its run. + - This message is then passed to a callback function that was provided by the REST service during the creation of the MONAI Deploy app instance. + - The REST service, upon receiving the callback, is then responsible for making the final `POST` request to the external callback endpoint specified by the original caller. + +This design separates the core AI application from the web-serving logic, allowing each to be developed and tested independently. + +## Diagrams + +### Component Diagram + +This diagram shows the static components of the system and their relationships using the C4 model. + +```mermaid +C4Component + title Component Diagram for MONAI Deploy REST Service + + Person(client, "External Client", "e.g., Aidoc Platform") + + Container_Boundary(rest_service_container, "RESTful Service") { + Component(flask, "Flask App", "Python, Flask", "Handles HTTP requests, manages processing threads, and sends callbacks.") + } + + Container_Boundary(monai_app_container, "MONAI Deploy Application") { + Component(monai_app, "AISpleenSegApp", "Python, MONAI Deploy SDK", "Orchestrates the AI inference pipeline and prepares the result message.") + Component(operators, "MONAI Deploy Operators", "Python, MONAI Deploy SDK", "Perform tasks like data loading, inference, and writing results.") + } + + System_Ext(fs, "Filesystem", "Stores input/output data.") + + Rel(client, flask, "1. Sends processing request", "JSON/HTTPS") + Rel(flask, client, "2. Responds 202 Accepted") + Rel(flask, monai_app, "3. Instantiates & runs in background thread") + Rel(monai_app, operators, "4. Uses operators to process data") + Rel(monai_app, fs, "5. Reads from & Writes to") + Rel(monai_app, flask, "6. Invokes callback on completion") + Rel(flask, client, "7. Sends final results", "JSON/HTTPS") +``` + +### Sequence Diagram + +This diagram illustrates the sequence of interactions for a processing job, including status checks. + +```mermaid +sequenceDiagram + actor Client + participant RESTful Service + participant "MONAI Deploy App Thread" as AppThread + participant AISpleenSegApp + + Client->>+RESTful Service: POST /process (payload) + RESTful Service-->>-Client: HTTP 202 Accepted + RESTful Service->>+AppThread: Spawn thread(run_processing) + + opt While processing is busy + Client->>+RESTful Service: POST /process (payload) + RESTful Service-->>-Client: HTTP 409 Conflict + + Client->>+RESTful Service: GET /status + RESTful Service-->>-Client: HTTP 200 OK ("status": "BUSY") + end + + AppThread->>+AISpleenSegApp: Create instance(status_callback) + AppThread->>AISpleenSegApp: run() + Note over AISpleenSegApp: Executes processing pipeline... + AISpleenSegApp->>AISpleenSegApp: Formats success message + AISpleenSegApp->>AppThread: status_callback (message) + + AISpleenSegApp-->>AppThread: run() completes successfully + deactivate AISpleenSegApp + AppThread->>AppThread: Formats final message + AppThread->>+Client: POST callback_url (Final Results) + Client-->>-AppThread: HTTP 200 OK + + Note over RESTful Service: Processing status set to IDLE. + deactivate AppThread + + Client->>+RESTful Service: GET /status + RESTful Service-->>-Client: HTTP 200 OK ("status": "IDLE") +``` ## How to Run @@ -131,10 +215,25 @@ When processing is complete, the application will send a `POST` request to the ` ```json { "run_success": true, - "result": "Processing completed successfully.", - "output_files": ["test.json", "seg.com"], + "output_files": ["output_spleen/1.2.826.0.1.3680043.10.511.3.13787585732573161684951883631909444.dcm", "output_spleen/stl/spleen.stl"], "error_message": null, - "error_code": null + "error_code": null, + "result": { + "aggregated_results": { + "name": "Spleen Segmentation", + "algorithm_class": ["Measurement"] + }, + "detailed_results":{ + "Spleen Segmentation": { + "detection": null, + "measurement": { + "measurements_text": "Spleen segmentation completed successfully.", "key_slice_instance_uid": null, + "key_measurement": null + }, + "classification": null + } + } + } } ``` From c228ce075cc593f3075ed5ff16d39db87dc5940d Mon Sep 17 00:00:00 2001 From: M Q Date: Thu, 18 Sep 2025 22:47:36 -0700 Subject: [PATCH 08/21] Update per review comments Signed-off-by: M Q --- platforms/aidoc/README.md | 6 +++--- .../aidoc/restful_app/ai_spleen_seg_app/requirements.txt | 4 ++-- platforms/aidoc/restful_app/app.py | 4 ++-- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/platforms/aidoc/README.md b/platforms/aidoc/README.md index e0a058bf..920ec2b0 100644 --- a/platforms/aidoc/README.md +++ b/platforms/aidoc/README.md @@ -119,7 +119,7 @@ Change working directory to the same level as this README. gdown https://drive.google.com/uc?id=1IwWMpbo2fd38fKIqeIdL8SKTGvkn31tK ``` - Unzip the file to local folders. If deviating from the path noted below, please adjuest the env var values + Unzip the file to local folders. If deviating from the path noted below, please adjust the env var values ``` unzip -o "ai_spleen_seg_bundle_data.zip" @@ -144,7 +144,7 @@ Change working directory to the same level as this README. ## Test API Endpoints A simplest test client is provided, which makes call to the endpoint, as well as providing -a callback endpoint to receives message content at the specidied port. +a callback endpoint to receives message content at the specified port. Open another console window and change directory to the same as this file. @@ -241,7 +241,7 @@ Or in case of an error: ```json { - "run_success": False, + "run_success": false, "error_message": "E.g., Model network is not load and model file not found.", "error_code": 500 } diff --git a/platforms/aidoc/restful_app/ai_spleen_seg_app/requirements.txt b/platforms/aidoc/restful_app/ai_spleen_seg_app/requirements.txt index 210906dc..fc5453a3 100644 --- a/platforms/aidoc/restful_app/ai_spleen_seg_app/requirements.txt +++ b/platforms/aidoc/restful_app/ai_spleen_seg_app/requirements.txt @@ -7,5 +7,5 @@ Pillow>=8.0.0 numpy-stl>=2.12.0 trimesh>=3.8.11 nibabel>=3.2.1 -torch>=1.12.0 -monai>=1.0.0 +torch>=2.4.1 +monai>=1.5.0 diff --git a/platforms/aidoc/restful_app/app.py b/platforms/aidoc/restful_app/app.py index 38164f2c..a6672d28 100644 --- a/platforms/aidoc/restful_app/app.py +++ b/platforms/aidoc/restful_app/app.py @@ -165,6 +165,6 @@ def process(): help="Port to listen on. Defaults to env var FLASK_PORT or 5000.", ) args = parser.parse_args() - host = args.host or os.environ.get("FLASK_HOST", "0.0.0.0") - port = args.port or int(os.environ.get("FLASK_PORT", 5000)) + host = args.host + port = args.port app.run(host=host, port=port) From e58806874c5c320a65314304694906e29a3f5d50 Mon Sep 17 00:00:00 2001 From: M Q Date: Thu, 18 Sep 2025 22:53:08 -0700 Subject: [PATCH 09/21] More update Signed-off-by: M Q --- platforms/aidoc/README.md | 30 +++++++++++++++--------------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/platforms/aidoc/README.md b/platforms/aidoc/README.md index 920ec2b0..44c92c26 100644 --- a/platforms/aidoc/README.md +++ b/platforms/aidoc/README.md @@ -9,9 +9,9 @@ This example uses a subset of the callback message attributes, covering only the The high-level design of this REST service involves a few key components: 1. **MONAI Deploy Application**: The core AI logic is encapsulated in a standard MONAI Deploy application (e.g., `AISpleenSegApp`), which is built and tested as a regular containerized workload. -2. **RESTful Service**: A lightweight RESTful application, built using Flask, acts as the front-end. It exposes endpoints to start and check the status of a processing job. +2. **REST Service**: A lightweight REST application, built using Flask, acts as the front-end. It exposes endpoints to start and check the status of a processing job. 3. **Request Handling**: - - When the RESTful service receives a request to process data, it handles only one request at a time, as per the API specification. + - When the REST service receives a request to process data, it handles only one request at a time, as per the API specification. - It creates an instance of the MONAI Deploy application. - It sets the necessary environment variables for the input and output folders. - Crucially, it delegates the execution of the MONAI Deploy application to a separate background thread to avoid blocking the web server. @@ -34,7 +34,7 @@ C4Component Person(client, "External Client", "e.g., Aidoc Platform") - Container_Boundary(rest_service_container, "RESTful Service") { + Container_Boundary(rest_service_container, "REST Service") { Component(flask, "Flask App", "Python, Flask", "Handles HTTP requests, manages processing threads, and sends callbacks.") } @@ -61,20 +61,20 @@ This diagram illustrates the sequence of interactions for a processing job, incl ```mermaid sequenceDiagram actor Client - participant RESTful Service + participant REST Service participant "MONAI Deploy App Thread" as AppThread participant AISpleenSegApp - Client->>+RESTful Service: POST /process (payload) - RESTful Service-->>-Client: HTTP 202 Accepted - RESTful Service->>+AppThread: Spawn thread(run_processing) + Client->>+REST Service: POST /process (payload) + REST Service-->>-Client: HTTP 202 Accepted + REST Service->>+AppThread: Spawn thread(run_processing) opt While processing is busy - Client->>+RESTful Service: POST /process (payload) - RESTful Service-->>-Client: HTTP 409 Conflict + Client->>+REST Service: POST /process (payload) + REST Service-->>-Client: HTTP 409 Conflict - Client->>+RESTful Service: GET /status - RESTful Service-->>-Client: HTTP 200 OK ("status": "BUSY") + Client->>+REST Service: GET /status + REST Service-->>-Client: HTTP 200 OK ("status": "BUSY") end AppThread->>+AISpleenSegApp: Create instance(status_callback) @@ -89,11 +89,11 @@ sequenceDiagram AppThread->>+Client: POST callback_url (Final Results) Client-->>-AppThread: HTTP 200 OK - Note over RESTful Service: Processing status set to IDLE. + Note over REST Service: Processing status set to IDLE. deactivate AppThread - Client->>+RESTful Service: GET /status - RESTful Service-->>-Client: HTTP 200 OK ("status": "IDLE") + Client->>+REST Service: GET /status + REST Service-->>-Client: HTTP 200 OK ("status": "IDLE") ``` ## How to Run @@ -149,7 +149,7 @@ a callback endpoint to receives message content at the specified port. Open another console window and change directory to the same as this file. Set the environment vars so that the test script can get the input DCM and write the callback contents. -Also, once the Restful app completes each processing, the Spleen Seg app's output will also be saved in +Also, once the REST app completes each processing, the Spleen Seg app's output will also be saved in the output folder specified below (the script passes the output folder via the Rest API). ``` From e1a73f48a4120234e7ba790f5d3166b13976f07f Mon Sep 17 00:00:00 2001 From: M Q Date: Thu, 18 Sep 2025 23:05:47 -0700 Subject: [PATCH 10/21] Set header Signed-off-by: M Q --- platforms/aidoc/restful_app/app.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/platforms/aidoc/restful_app/app.py b/platforms/aidoc/restful_app/app.py index a6672d28..d17e7bbc 100644 --- a/platforms/aidoc/restful_app/app.py +++ b/platforms/aidoc/restful_app/app.py @@ -71,7 +71,8 @@ def app_status_callback(summary: str): logging.info(f"Sending final status callback to {callback_url}") # Here you could map the summary to the expected format of the callback. # For now, we'll just forward the summary. - response = requests.post(callback_url, data=summary, timeout=5) + headers = {"Content-Type": "application/json"} + response = requests.post(callback_url, data=summary, headers=headers, timeout=5) response.raise_for_status() # for bad status codes (4xx or 5xx) logging.info("Sent final status callback.") From a92767aaec14496c59d19a551b014e1f40cdb9e1 Mon Sep 17 00:00:00 2001 From: M Q Date: Fri, 19 Sep 2025 19:39:49 -0700 Subject: [PATCH 11/21] Added packaging and running MAP of the REST service Signed-off-by: M Q --- platforms/aidoc/README.md | 94 ++++++++++++++++++++++++++++----------- 1 file changed, 68 insertions(+), 26 deletions(-) diff --git a/platforms/aidoc/README.md b/platforms/aidoc/README.md index 44c92c26..ae11020d 100644 --- a/platforms/aidoc/README.md +++ b/platforms/aidoc/README.md @@ -1,6 +1,6 @@ # Creating REST Service with MONAI Deploy Application -This application provides an example of how to make a MONAI Deploy app run as a REST service on [Aidoc](https://www.aidoc.com/) platform. It is compliant with its [third party integration API](https://ai-partner-sdk.aidoc-cloud.com/prod/api/third-parties/doc/#), and the results [callback message schema](https://ai-partner-sdk.aidoc-cloud.com/prod/api/aidoc-callback/doc/#). +This application provides an example of how to make a MONAI Deploy app run as a REST service on the [Aidoc](https://www.aidoc.com/) platform. It is compliant with its [third party integration API](https://ai-partner-sdk.aidoc-cloud.com/prod/api/third-parties/doc/#), and the results [callback message schema](https://ai-partner-sdk.aidoc-cloud.com/prod/api/aidoc-callback/doc/#). This example uses a subset of the callback message attributes, covering only the required ones as well as some common attributes. For the full message definition, please contact Aidoc directly. @@ -8,12 +8,12 @@ This example uses a subset of the callback message attributes, covering only the The high-level design of this REST service involves a few key components: -1. **MONAI Deploy Application**: The core AI logic is encapsulated in a standard MONAI Deploy application (e.g., `AISpleenSegApp`), which is built and tested as a regular containerized workload. +1. **MONAI Deploy Application**: The core AI logic is encapsulated in a standard MONAI Deploy application (e.g., `AISpleenSegApp`), which is built and tested as a regular containerized workload. The application is responsible for generating the inference results using Pydantic classes that are based on Aidoc's callback message schema. It then reports these results as a JSON string via a callback function provided during its construction. 2. **REST Service**: A lightweight REST application, built using Flask, acts as the front-end. It exposes endpoints to start and check the status of a processing job. 3. **Request Handling**: - When the REST service receives a request to process data, it handles only one request at a time, as per the API specification. - It creates an instance of the MONAI Deploy application. - - It sets the necessary environment variables for the input and output folders. + - It sets the necessary environment variables for the input and output folders for the processing execution. - Crucially, it delegates the execution of the MONAI Deploy application to a separate background thread to avoid blocking the web server. 4. **Callback Mechanism**: - The callback message, which includes the AI results and a list of output files, is generated within the MONAI Deploy application at the end of its run. @@ -22,11 +22,9 @@ The high-level design of this REST service involves a few key components: This design separates the core AI application from the web-serving logic, allowing each to be developed and tested independently. -## Diagrams - ### Component Diagram -This diagram shows the static components of the system and their relationships using the C4 model. +This diagram shows the static components of the system and their relationships. ```mermaid C4Component @@ -96,9 +94,9 @@ sequenceDiagram REST Service-->>-Client: HTTP 200 OK ("status": "IDLE") ``` -## How to Run +## How to Run in Development Environment -Change working directory to the same level as this README. +Change your working directory to the one containing this README file and the `restful_app` folder. 1. **Install Dependencies** @@ -107,27 +105,25 @@ Change working directory to the same level as this README. ```bash pip install -r restful_app/requirements.txt ``` -2. **Download Test Data and Set Env Vars** - The model and test DICOM series are shared on Google Drive requiring first gaining access permission, and - the zip file is [here](https://drive.google.com/uc?id=1IwWMpbo2fd38fKIqeIdL8SKTGvkn31tK). +2. **Download Test Data and Set Environment Variables** + The model and test DICOM series are shared on Google Drive, which requires gaining access permission first. The zip file is available [here](https://drive.google.com/uc?id=1IwWMpbo2fd38fKIqeIdL8SKTGvkn31tK). - Please make a request so that it can be shared to specific Gmail account. + Please make a request so that it can be shared with a specific Gmail account. - `gdown` may also work. + `gdown` may also work: ``` pip install gdown gdown https://drive.google.com/uc?id=1IwWMpbo2fd38fKIqeIdL8SKTGvkn31tK ``` - Unzip the file to local folders. If deviating from the path noted below, please adjust the env var values + Unzip the file to local folders. If deviating from the path noted below, please adjust the env var values. ``` unzip -o "ai_spleen_seg_bundle_data.zip" rm -rf models && mkdir -p models/model && mv model.ts models/model && ls models/model ``` - Set the environment vars so that the model can be found by the Spleen Seg app. Also, - the settings are consolidated in the `env_settings.sh`. + Set the environment variables so that the model can be found by the Spleen Seg app. These settings are also consolidated in the `env_settings.sh` script. ``` export HOLOSCAN_MODEL_PATH=models @@ -143,18 +139,15 @@ Change working directory to the same level as this README. ## Test API Endpoints -A simplest test client is provided, which makes call to the endpoint, as well as providing -a callback endpoint to receives message content at the specified port. +A simple test client is provided, which makes calls to the endpoint, as well as providing a callback endpoint to receive message content at the specified port. Open another console window and change directory to the same as this file. -Set the environment vars so that the test script can get the input DCM and write the callback contents. -Also, once the REST app completes each processing, the Spleen Seg app's output will also be saved in -the output folder specified below (the script passes the output folder via the Rest API). +Set the environment vars so that the test script can get the input DCM and write the callback contents. Also, once the REST app completes each processing, the Spleen Seg app's output will also be saved in the output folder specified below (the script passes the output folder via the REST API). ``` export HOLOSCAN_INPUT_PATH=dcm -export HOLOSCAN_OUTPUT_PATH=output +export HOLOSCAN_OUTPUT_PATH=output_restful_app ``` Run the test script, and examine its console output. @@ -163,8 +156,7 @@ Run the test script, and examine its console output. source test_endpoints.sh ``` -Once the script completes, examine the `output` folder, which should contain the following (dcm file -name will be different) +Once the script completes, examine the `output` folder, which should contain the following (the DICOM file name will be different): ``` output @@ -173,7 +165,7 @@ output └── spleen.stl ``` -The script can run multiple times, or modified to loop with different output folder setting. +The script can be run multiple times or modified to loop with different output folder settings. ### Check Status @@ -210,7 +202,7 @@ The script can run multiple times, or modified to loop with different output fol ### Callback -When processing is complete, the application will send a `POST` request to the `callback_url` provided in the process request. The body of the callback will be: +When processing is complete, the application will send a `POST` request to the `callback_url` provided in the process request. The body of the callback will be similar to this: ```json { @@ -246,3 +238,53 @@ Or in case of an error: "error_code": 500 } ``` + +Please note: The test script uses a simple `nc` command to emulate the callback service. This lightweight approach may sometimes lead to timeout errors on the client side (the REST service), preventing the test script from capturing the callback message. If this occurs, running the script again is a known workaround. + +## Packaging and Testing the REST Service Container + +### Packaging the Application + +To package the REST service application into a MONAI App Package (MAP) container, you can use the MONAI Deploy CLI. The following is an example command, run with the current working directory as the parent of `restful_app`: + +```bash +monai-deploy package restful_app -m models/spleen_ct -c restful_app/app.yaml -t monai-rest:1.0 --platform x86_64 -l DEBUG +``` + +This command packages the `restful_app` directory, includes the specified model, uses `app.yaml` for configuration, and tags the resulting Docker image as `monai-rest-x64-workstation-dgpu-linux-amd64:1.0`, which includes the target platform name. + +Note that the model folder should contain only the model file (e.g., `model.ts`) or subfolders that each contain only a model file. + + +### Running the MAP Container + +While you can run MAPs with the `monai-deploy run` command, it currently has limitations regarding the mapping of arbitrary volumes and passing extra environment variables that are necessary for this REST service. Therefore, it's required to use the `docker run` command directly (or a platform specific equivalent) to have full control over the container's execution environment. + +```bash +docker run --gpus=all --network host --name my_monai_rest_service -t --rm \ +-v :/var/holoscan/input/ \ +-v :/var/holoscan/output/ \ +-v :/var/holoscan/ \ +-e FLASK_HOST="0.0.0.0" \ +-e FLASK_PORT="5000" \ +--entrypoint /bin/bash monai-rest-x64-workstation-dgpu-linux-amd64:1.0 -c "python3 -u /opt/holoscan/app/" +``` + +**Command parameters** + +- `--gpus=all`: Exposes all available host GPUs to the container, which is necessary for CUDA-based inference. A specific CUDA device ID can also be used. +- `--network host`: The container shares the host's network stack, making the Flask server directly accessible on the host's IP address and port (e.g., `http://localhost:5000`). +- `--name my_monai_rest_service`: Assigns a convenient name to the running container. +- `-t --rm`: Allocates a pseudo-terminal and automatically removes the container when it stops. +- `-v :/var/holoscan/input/`: Mounts a host directory into the container as `/var/holoscan/input/`. This allows the REST service to access input files using an internal container path. For example, the inference input (e.g., a DICOM study's instance files) should be staged in a subfolder on the host, e.g. `my_test_study`, and the client request message must use the corresponding internal container path (e.g., `/var/holoscan/input/my_test_study`). +- `-v :/var/holoscan/output/`: Mounts a host directory into the container as `/var/holoscan/output/`, allowing the REST service to save the inference result files. +- `-e FLASK_HOST="0.0.0.0"` and `-e FLASK_PORT="5000"`: These environment variables configure the Flask-based REST application to be accessible from outside the container on the specified port. +- `--entrypoint /bin/bash ... -c "python3 -u /opt/holoscan/app/"`: This overrides the default entrypoint of the MAP container. Instead of running the MONAI Deploy application directly, it starts a bash shell that executes the command to run the Flask application, effectively starting the REST service. + +The simple test client, `test_endpoints.sh`, can be used to test the REST service container. It requires a couple of simple changes to use the container's internal folder paths for I/O. For example: + +```bash +# Get the absolute path to the input and output directories +INPUT_DIR="/var/holoscan/input/spleen_ct_tcia" +OUTPUT_DIR="/var/holoscan/output/output_spleen_rest" +``` \ No newline at end of file From b4db5ea4336e09c4ec66b155e0710b84b71fb534 Mon Sep 17 00:00:00 2001 From: M Q Date: Fri, 19 Sep 2025 19:55:25 -0700 Subject: [PATCH 12/21] Add app.yaml that has missed the checkin Signed-off-by: M Q --- platforms/aidoc/restful_app/app.yaml | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) create mode 100644 platforms/aidoc/restful_app/app.yaml diff --git a/platforms/aidoc/restful_app/app.yaml b/platforms/aidoc/restful_app/app.yaml new file mode 100644 index 00000000..a78e7ab2 --- /dev/null +++ b/platforms/aidoc/restful_app/app.yaml @@ -0,0 +1,27 @@ +%YAML 1.2 +# SPDX-FileCopyrightText: Copyright (c) 2022-2025 MONAI. All rights reserved. +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +--- +application: + title: MONAI Deploy App Package - Spleen Seg Inference REST Service + version: 1.0 + inputFormats: ["file"] + outputFormats: ["file"] + +resources: + cpu: 1 + gpu: 1 + memory: 1Gi + gpuMemory: 7Gi From 0bc3bf5c8fc87eb67dc45c9c2f624099ea892dec Mon Sep 17 00:00:00 2001 From: M Q Date: Thu, 25 Sep 2025 14:47:33 -0700 Subject: [PATCH 13/21] pin monai<=1.5.0 for now as 1.5.1 has breaking changes Signed-off-by: M Q --- platforms/aidoc/restful_app/ai_spleen_seg_app/requirements.txt | 2 +- platforms/aidoc/restful_app/requirements.txt | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/platforms/aidoc/restful_app/ai_spleen_seg_app/requirements.txt b/platforms/aidoc/restful_app/ai_spleen_seg_app/requirements.txt index fc5453a3..b1ea86b5 100644 --- a/platforms/aidoc/restful_app/ai_spleen_seg_app/requirements.txt +++ b/platforms/aidoc/restful_app/ai_spleen_seg_app/requirements.txt @@ -8,4 +8,4 @@ numpy-stl>=2.12.0 trimesh>=3.8.11 nibabel>=3.2.1 torch>=2.4.1 -monai>=1.5.0 +monai>=1.4.0,<=1.5.0 diff --git a/platforms/aidoc/restful_app/requirements.txt b/platforms/aidoc/restful_app/requirements.txt index 315c1a7c..0fcbdc17 100644 --- a/platforms/aidoc/restful_app/requirements.txt +++ b/platforms/aidoc/restful_app/requirements.txt @@ -8,7 +8,7 @@ numpy-stl>=2.12.0 trimesh>=3.8.11 nibabel>=3.2.1 torch>=2.4.1 -monai>=1.5.0 +monai>=1.4.0,<=1.5.0 Flask==2.2.2 requests>=2.32 types-requests>=2.32.0 From 2e4f3bc8c482abaeb8481377a57d5c9d42318eab Mon Sep 17 00:00:00 2001 From: Elan Somasundaram Date: Wed, 1 Oct 2025 19:34:32 -0400 Subject: [PATCH 14/21] Add CCHMC nnUNet fifteen checkpoint application example Signed-off-by: Elan Somasundaram Signed-off-by: chezhia --- .../cchmc_nnunet_fifteen_ckpt_app/LICENSE | 201 ++++ .../cchmc_nnunet_fifteen_ckpt_app/README.md | 200 ++++ .../convert_nnunet_ckpts.py | 104 ++ .../development_notes.md | 62 ++ .../my_app/__init__.py | 29 + .../my_app/__main__.py | 26 + .../my_app/app.py | 257 +++++ .../my_app/app.yaml | 34 + .../my_app/dicom_sc_writer_operator.py | 253 +++++ .../my_app/dicom_series_selector_operator.py | 629 +++++++++++ .../my_app/nnunet_bundle.py | 995 ++++++++++++++++++ .../my_app/nnunet_seg_operator.py | 426 ++++++++ .../my_app/post_transforms.py | 390 +++++++ .../my_app/requirements.txt | 37 + 14 files changed, 3643 insertions(+) create mode 100644 examples/apps/cchmc_nnunet_fifteen_ckpt_app/LICENSE create mode 100644 examples/apps/cchmc_nnunet_fifteen_ckpt_app/README.md create mode 100644 examples/apps/cchmc_nnunet_fifteen_ckpt_app/convert_nnunet_ckpts.py create mode 100644 examples/apps/cchmc_nnunet_fifteen_ckpt_app/development_notes.md create mode 100644 examples/apps/cchmc_nnunet_fifteen_ckpt_app/my_app/__init__.py create mode 100644 examples/apps/cchmc_nnunet_fifteen_ckpt_app/my_app/__main__.py create mode 100644 examples/apps/cchmc_nnunet_fifteen_ckpt_app/my_app/app.py create mode 100644 examples/apps/cchmc_nnunet_fifteen_ckpt_app/my_app/app.yaml create mode 100644 examples/apps/cchmc_nnunet_fifteen_ckpt_app/my_app/dicom_sc_writer_operator.py create mode 100644 examples/apps/cchmc_nnunet_fifteen_ckpt_app/my_app/dicom_series_selector_operator.py create mode 100644 examples/apps/cchmc_nnunet_fifteen_ckpt_app/my_app/nnunet_bundle.py create mode 100644 examples/apps/cchmc_nnunet_fifteen_ckpt_app/my_app/nnunet_seg_operator.py create mode 100644 examples/apps/cchmc_nnunet_fifteen_ckpt_app/my_app/post_transforms.py create mode 100644 examples/apps/cchmc_nnunet_fifteen_ckpt_app/my_app/requirements.txt diff --git a/examples/apps/cchmc_nnunet_fifteen_ckpt_app/LICENSE b/examples/apps/cchmc_nnunet_fifteen_ckpt_app/LICENSE new file mode 100644 index 00000000..753842b6 --- /dev/null +++ b/examples/apps/cchmc_nnunet_fifteen_ckpt_app/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. \ No newline at end of file diff --git a/examples/apps/cchmc_nnunet_fifteen_ckpt_app/README.md b/examples/apps/cchmc_nnunet_fifteen_ckpt_app/README.md new file mode 100644 index 00000000..00263d95 --- /dev/null +++ b/examples/apps/cchmc_nnunet_fifteen_ckpt_app/README.md @@ -0,0 +1,200 @@ +# MONAI Application Package (MAP) for sample nnunet model + +This README describes the process of converting the [CCHMC Pediatric Airway Segmentation nnUnet model] into a MONAI Application Package (MAP). + +## Convert nnUNet checkpoints to MONAI compatible models + +The `convert_nnunet_ckpts.py` script simplifies the process of converting nnUNet model checkpoints to MONAI bundle format. This conversion is necessary to use nnUNet models within MONAI applications and the MONAI Deploy ecosystem. + +## Example model checkpoints + +Sample nnunet model checkpoints for a UTE MRI airway segmentation in NICU patients are available here + +https://drive.google.com/drive/folders/1lRs-IoLR47M_WFyZmuCaROJULtyPdkLm?usp=drive_link + +### Prerequisites + +Before running the conversion script, ensure that: +1. You have trained nnUNet models available +2. The nnUNet environment variables are set or you can provide them as arguments +3. Python environment with required dependencies is set up (my_app/requirements.txt) + +### Basic Usage + +The script can be executed with the following command: + +```bash +python convert_nnunet_ckpts.py --dataset_name_or_id DATASET_ID --MAP_root OUTPUT_DIR --nnUNet_results RESULTS_PATH +``` + +The RESULTS_PATH should have "inference_information.json" file created by nnunetv2 automatically, as the conversion relies on this to figure out the best model configuration and convert those for the MAP. + +### Command-line Arguments + +| Argument | Description | Required | Default | +|----------|-------------|----------|---------| +| `--dataset_name_or_id` | Name or ID of the nnUNet dataset to convert | Yes | N/A | +| `--MAP_root` | Output directory for the converted MONAI bundle | No | Current directory | +| `--nnUNet_raw` | Path to nnUNet raw data directory | Yes | Uses environment variable if set | +| `--nnUNet_preprocessed` | Path to nnUNet preprocessed data directory | Yes | Uses environment variable if set | +| `--nnUNet_results` | Path to nnUNet results directory with trained models | Yes | Uses environment variable if set | + +#### Example + +Convert dataset with ID 4 to models directory: + +```bash +python convert_nnunet_ckpts.py \ + --dataset_name_or_id 4 \ + --MAP_root "." \ + --nnUNet_results "/path/to/nnunet/models" +``` + +#### Output Structure + +The conversion creates a MONAI bundle with the following structure in the specified `MAP_root` directory: + +``` +MAP_root/ +└── models/ + ├── jsonpkls/ + │ ├── dataset.json # Dataset configuration + │ ├── plans.json # Model planning information + │ ├── postprocessing.pkl # Optional postprocessing configuration + ├── 3d_fullres/ # Model configuration (if present) + │ ├── nnunet_checkpoint.pth + │ └── fold_X/ # Each fold's model weights + │ └── best_model.pt + ├── 3d_lowres/ # Model configuration (if present) + └── 3d_cascade_fullres/ # Model configuration (if present) +``` + +This bundle structure is compatible with MONAI inference tools and the MONAI Deploy application ecosystem. + + +## Setting Up Environment +Instructions regarding installation of MONAI Deploy App SDK and details of the necessary system requirements can be found on the MONAI Deploy App SDK [GitHub Repository](https://github.com/Project-MONAI/monai-deploy-app-sdk) and [docs](https://docs.monai.io/projects/monai-deploy-app-sdk/en/latest/getting_started/installing_app_sdk.html). Instructions on how to create a virtual environment and install other dependencies can be found in the MONAI Deploy App SDK docs under the Creating a Segmentation App Consuming a MONAI Bundle [example](https://docs.monai.io/projects/monai-deploy-app-sdk/en/latest/getting_started/tutorials/monai_bundle_app.html). + +Per MONAI, MONAI Deploy App SDK is required to be run in a Linux environment, specifically Ubuntu 22.04 on X86-64, as this is the only X86 platform that the underlying Holoscan SDK has been tested to support as of now. This project uses Poetry for dependency management, which simplifies setting up the environment with all required dependencies. + +### System Requirements +- **Operating System:** Linux (Ubuntu 22.04 recommended) +- **Architecture:** x86_64 +- **GPU:** NVIDIA GPU (recommended for inference) +- **Python:** 3.10 or newer (project requires >=3.10,<3.13) + + + +## Executing Model Bundle Pythonically +Prior to MAP building, the exported model bundle can be executed pythonically via the command line. + +Within the main directory of this downloaded repository, create a `.env` file. MONAI recommends the following `.env` structure and naming conventions: + +```env +HOLOSCAN_INPUT_PATH=${PWD}/input +HOLOSCAN_MODEL_PATH=${PWD}/models +HOLOSCAN_OUTPUT_PATH=${PWD}/output +``` + +Load in the environment variables: + +``` +source .env +``` + +If already specified, remove the directory specified by the `HOLOSCAN_OUTPUT_PATH` environment variable: + +``` +rm -rf $HOLOSCAN_OUTPUT_PATH +``` + +Execute the model bundle pythonically via the command line; the directory specified by the `HOLOSCAN_INPUT_PATH` environment variable should be created and populated with a DICOM series for testing by the user. The model bundle file should be populated within the `/model` folder to match the recommended `HOLOSCAN_MODEL_PATH` value. `HOLOSCAN_INPUT_PATH`, `HOLOSCAN_OUTPUT_PATH`, and `HOLOSCAN_MODEL_PATH` default values can be amended by updating the `.env` file appropriately. + +``` +python my_app -i "$HOLOSCAN_INPUT_PATH" -o "$HOLOSCAN_OUTPUT_PATH" -m "$HOLOSCAN_MODEL_PATH" +``` + +## Building the MAP +It is recommended that the NVIDIA Clara Holoscan base image is pulled prior to building the MAP. If this base image is not pulled prior to MAP building, it will be done so automically during the build process, which will increase the build time from around 1/2 minutes to around 10/15 minutes. Ensure the base image matches the Holoscan SDK version being used in your environment (e.g. if you are using Holoscan SDK v3.2.0, replace `${holoscan-version}` with `v3.2.0`). + +``` +docker pull nvcr.io/nvidia/clara-holoscan/holoscan:${holoscan-version}-dgpu +``` + +Execute the following command to build the MAP Docker image based on the supported NVIDIA Clara Holoscan base image. During MAP building, a Docker container based on the `moby/buildkit` Docker image will be spun up; this container (Docker BuildKit builder `holoscan_app_builder`) facilitates the MAP build. + +``` +monai-deploy package my_app -m $HOLOSCAN_MODEL_PATH -c my_app/app.yaml -t ${tag_prefix}:${image_version} --platform x86_64 -l DEBUG +``` + +As of August 2024, a new error may appear during the MAP build related to the Dockerfile, where `monai-deploy-app-sdk` v0 (which does not exist) is attempted to be installed: + +```bash +Dockerfile:78 +-------------------- + 76 | + 77 | # Install MONAI Deploy from PyPI org + 78 | >>> RUN pip install monai-deploy-app-sdk==0 + 79 | + 80 | +-------------------- +``` + +If you encounter this error, you can specify the MONAI Deploy App SDK version via `--sdk-version` directly in the build command (`3.0.0`, for example). The base image for the MAP build can also be specified via `--base-image`: + +``` +monai-deploy package my_app -m $HOLOSCAN_MODEL_PATH -c my_app/app.yaml -t ${tag_prefix}:${image_version} --platform x86_64 --base-image ${base_image} --sdk-version ${version} -l DEBUG +``` + +If using Docker Desktop, the MAP should now appear in the "Images" tab as `${tag_prefix}-x64-workstation-dgpu-linux-amd64:${image_version}`. You can also confirm MAP creation in the CLI by executing this command: + +``` +docker image ls | grep ${tag_prefix} +``` + +## Display and Extract MAP Contents +There are a few commands that can be executed in the command line to view MAP contents. + +To display some basic MAP manifests, use the `show` command. The following command will run and subsequently remove a MAP Docker container; the `show` command will display informaiton about the MAP-associated `app.json` and `pkg.json` files as command line outputs. + +``` +docker run --rm ${tag_prefix}-x64-workstation-dgpu-linux-amd64:${image_version} show +``` + +MAP manifests and other contents can also be extracted into a specific host folder using the `extract` command. + +The host folder used to store the extracted MAP contents must be created by the host, not by Docker upon running the MAP as a container. This is most applicable when MAP contents are extracted more than once; the export folder must be deleted and recreated in this case. + +``` +rm -rf `pwd`/export && mkdir -p `pwd`/export +``` + +After creating the folder for export, executing the following command will run and subsequently remove a MAP Docker container. + +``` +docker run --rm -v `pwd`/export/:/var/run/holoscan/export/ ${tag_prefix}-x64-workstation-dgpu-linux-amd64:${image_version} extract +``` + +The `extract` command will extract MAP contents to the `/export` folder, organized as follows: +- `app` folder, which contains of the all the files present in `my_app` +- `config` folder, which contains the MAP manifests (`app.json`, `pkg.json`, and `app.yaml`) +- `models` folder, which contains the model bundle used to created the MAP + +## Executing MAP Locally via the MONAI Application Runner (MAR) +The generated MAP can be tested locally using the MONAI Application Runner (MAR). + +First, clear the contents of the output directory: + +``` +rm -rf $HOLOSCAN_OUTPUT_PATH +``` + +Then, the MAP can be executed locally via the MAR command line utility; input and output directories must be specified: + +``` +monai-deploy run -i $HOLOSCAN_INPUT_PATH -o $HOLOSCAN_OUTPUT_PATH ${tag_prefix}-x64-workstation-dgpu-linux-amd64:${image_version} +``` + +## Scripts +Several scripts have been compiled that quickly execute useful actions (such as local model execution, MAP building, etc.) Some scripts require +the input of command line arguments; review the `scripts` folder for more details. diff --git a/examples/apps/cchmc_nnunet_fifteen_ckpt_app/convert_nnunet_ckpts.py b/examples/apps/cchmc_nnunet_fifteen_ckpt_app/convert_nnunet_ckpts.py new file mode 100644 index 00000000..4f0d57f4 --- /dev/null +++ b/examples/apps/cchmc_nnunet_fifteen_ckpt_app/convert_nnunet_ckpts.py @@ -0,0 +1,104 @@ +#!/usr/bin/env python3 +# Copyright (c) MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Convert nnUNet checkpoints to MONAI bundle format. +This script follows the logic in the conversion notebook but imports from local apps.nnunet_bundle. +""" + +import argparse +import os +import sys + +# Add the current directory to the path to find the local module +current_dir = os.path.dirname(os.path.abspath(__file__)) +if current_dir not in sys.path: + sys.path.insert(0, current_dir) + +# Try importing from local apps.nnunet_bundle instead of from MONAI +try: + from my_app.nnunet_bundle import convert_best_nnunet_to_monai_bundle +except ImportError: + # If local import fails, try to find the module in alternate locations + try: + from monai.apps.nnunet_bundle import convert_best_nnunet_to_monai_bundle + except ImportError: + print( + "Error: Could not import convert_best_nnunet_to_monai_bundle from my_app.nnunet_bundle or apps.nnunet_bundle" + ) + print("Please ensure that nnunet_bundle.py is properly installed in your project.") + sys.exit(1) + + +def parse_args(): + parser = argparse.ArgumentParser(description="Convert nnUNet checkpoints to MONAI bundle format.") + parser.add_argument( + "--dataset_name_or_id", type=str, required=True, help="The name or ID of the dataset to convert." + ) + parser.add_argument( + "--MAP_root", + type=str, + default=os.getcwd(), + help="The root directory where the Medical Application Package (MAP) will be created. Defaults to current directory.", + ) + + parser.add_argument( + "--nnUNet_results", + type=str, + required=False, + default=None, + help="Path to nnUNet results directory with trained models.", + ) + return parser.parse_args() + + +def main(): + args = parse_args() + + # Create the nnUNet config dictionary + nnunet_config = { + "dataset_name_or_id": args.dataset_name_or_id, + } + + # Create the MAP root directory + map_root = args.MAP_root + os.makedirs(map_root, exist_ok=True) + + # Set nnUNet environment variables if provided + if args.nnUNet_results: + os.environ["nnUNet_results"] = args.nnUNet_results + print(f"Set nnUNet_results to: {args.nnUNet_results}") + + # Check if required environment variables are set + required_env_vars = ["nnUNet_results"] + missing_vars = [var for var in required_env_vars if var not in os.environ] + + if missing_vars: + print(f"Error: The following required nnUNet environment variables are not set: {', '.join(missing_vars)}") + print("Please provide them as arguments or set them in your environment before running this script.") + sys.exit(1) + + print(f"Converting nnUNet checkpoints for dataset {nnunet_config['dataset_name_or_id']} to MONAI bundle format...") + print(f"MAP will be created at: {map_root}") + print(f" nnUNet_results: {os.environ.get('nnUNet_results')}") + + # Convert the nnUNet checkpoints to MONAI bundle format + try: + convert_best_nnunet_to_monai_bundle(nnunet_config, map_root) + print(f"Successfully converted nnUNet checkpoints to MONAI bundle at: {map_root}/models") + except Exception as e: + print(f"Error converting nnUNet checkpoints: {e}") + sys.exit(1) + + +if __name__ == "__main__": + main() diff --git a/examples/apps/cchmc_nnunet_fifteen_ckpt_app/development_notes.md b/examples/apps/cchmc_nnunet_fifteen_ckpt_app/development_notes.md new file mode 100644 index 00000000..5e647e38 --- /dev/null +++ b/examples/apps/cchmc_nnunet_fifteen_ckpt_app/development_notes.md @@ -0,0 +1,62 @@ +# Development Notes + +## Implementation Notes for nnUNet MAP + + +* Initial Tests show volume and Dice agreement with Bundle, need to do more thorough testing. + +1. For each model configuration the output gets written to .npz file by nnunet inference functions. + +2. These file paths are then used by the EnsembleProbabilities Transform function to create the final output. + +3. If nnunet postprocessing is used, use the largest connected component transform in the MAP. There could be minor differences in the implementation, will do thorough analysis later. + +3. Need to better understand the use of "context" in compute and compute_impl as input arguments. + +4. Investigate keeping the probabilities in the memory, to help with speedup. + +5. Need to investigate the current traceability provisions in the operators implemented. + + +## Implementation Details + +### Testing Strategy + +Tests should be conducted to: +1. Compare MAP output with native nnUNet output +2. Measure performance (time, memory usage) +3. Validate with various input formats and sizes +4. Test error handling and edge cases + + +### nnUNet Integration + +The current implementation relies on the nnUNet's native inference approach which outputs intermediate .npz files for each model configuration. While this works, it introduces file I/O overhead which could potentially be optimized. + +### Ensemble Prediction Flow + +1. Multiple nnUNet models (3d_fullres, 3d_lowres, 3d_cascade_fullres) are loaded +2. Each model performs inference separately +3. Results are written to temporary .npz files +4. EnsembleProbabilitiesToSegmentation transform reads these files +5. Final segmentation is produced by combining results + +### Potential Optimizations + +- Keep probability maps in memory instead of writing to disk +- Parallelize model inference where applicable +- Streamline the ensemble computation process + +### Context Usage + +The `context` parameter in `compute` and `compute_impl` functions appears to be used for storing and retrieving models. Further investigation is needed to fully understand how this context is managed and whether it's being used optimally. + +### Traceability + +Current traceability in the operators may need improvement. Consider adding: + +- More detailed logging +- Performance metrics +- Input/output validation steps +- Error handling with informative messages + diff --git a/examples/apps/cchmc_nnunet_fifteen_ckpt_app/my_app/__init__.py b/examples/apps/cchmc_nnunet_fifteen_ckpt_app/my_app/__init__.py new file mode 100644 index 00000000..52274b46 --- /dev/null +++ b/examples/apps/cchmc_nnunet_fifteen_ckpt_app/my_app/__init__.py @@ -0,0 +1,29 @@ +# Copyright 2021-2025 MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# __init__.py is used to initialize a Python package +# ensures that the directory __init__.py resides in is included at the start of the sys.path +# this is useful when you want to import modules from this directory, even if it’s not the +# directory where your Python script is running. + +# give access to operating system and Python interpreter +import os +import sys + +# grab absolute path of directory containing __init__.py +_current_dir = os.path.abspath(os.path.dirname(__file__)) + +# if sys.path is not the same as the directory containing the __init__.py file +if sys.path and os.path.abspath(sys.path[0]) != _current_dir: + # insert directory containing __init__.py file at the beginning of sys.path + sys.path.insert(0, _current_dir) +# delete variable +del _current_dir diff --git a/examples/apps/cchmc_nnunet_fifteen_ckpt_app/my_app/__main__.py b/examples/apps/cchmc_nnunet_fifteen_ckpt_app/my_app/__main__.py new file mode 100644 index 00000000..0a6920ed --- /dev/null +++ b/examples/apps/cchmc_nnunet_fifteen_ckpt_app/my_app/__main__.py @@ -0,0 +1,26 @@ +# Copyright 2021-2025 MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# __main__.py is needed for MONAI Application Packager to detect the main app code (app.py) when +# app.py is executed in the application folder path +# e.g., python my_app + +import logging + +# import UTEAirwayNNUnetApp class from app.py +from app import UTEAirwayNNUnetApp + +# if __main__.py is being run directly +if __name__ == "__main__": + logging.info(f"Begin {__name__}") + # create and run an instance of UTEAirwayNNUnetApp + UTEAirwayNNUnetApp().run() + logging.info(f"End {__name__}") diff --git a/examples/apps/cchmc_nnunet_fifteen_ckpt_app/my_app/app.py b/examples/apps/cchmc_nnunet_fifteen_ckpt_app/my_app/app.py new file mode 100644 index 00000000..a944fada --- /dev/null +++ b/examples/apps/cchmc_nnunet_fifteen_ckpt_app/my_app/app.py @@ -0,0 +1,257 @@ +# Copyright 2021-2025 MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +from pathlib import Path + +# custom DICOMSCWriterOperator (Secondary Capture) +from dicom_sc_writer_operator import DICOMSCWriterOperator + +# custom DICOMSeriesSelectorOperator +from dicom_series_selector_operator import DICOMSeriesSelectorOperator + +# custom inference operator +from nnunet_seg_operator import NNUnetSegOperator + +# required for setting SegmentDescription attributes +# direct import as this is not part of App SDK package +from pydicom.sr.codedict import codes + +from monai.deploy.conditions import CountCondition +from monai.deploy.core import Application +from monai.deploy.operators.dicom_data_loader_operator import DICOMDataLoaderOperator +from monai.deploy.operators.dicom_seg_writer_operator import DICOMSegmentationWriterOperator, SegmentDescription +from monai.deploy.operators.dicom_series_to_volume_operator import DICOMSeriesToVolumeOperator +from monai.deploy.operators.dicom_text_sr_writer_operator import DICOMTextSRWriterOperator, EquipmentInfo, ModelInfo + + +# inherit new Application class instance, AIAbdomenSegApp, from MONAI Application base class +# base class provides support for chaining up operators and executing application +class UTEAirwayNNUnetApp(Application): + """Demonstrates inference with nnU-Net ensemble models for airway segmentation. + + This application loads a set of DICOM instances, selects the appropriate series, converts the series to + 3D volume image, performs inference with the NNUnetSegOperator, including pre-processing + and post-processing, saves a DICOM SEG (airway contour), a DICOM Secondary Capture (airway contour overlay), + and a DICOM SR (airway volume). + + Pertinent MONAI Bundle: + This MAP is designed to work with a MONAI bundle compatible with nnU-Net. + """ + + def __init__(self, *args, **kwargs): + """Creates an application instance.""" + self._logger = logging.getLogger("{}.{}".format(__name__, type(self).__name__)) + super().__init__(*args, **kwargs) + + def run(self, *args, **kwargs): + # this method calls the base class to run; can be omitted if simply calling through + self._logger.info(f"Begin {self.run.__name__}") + super().run(*args, **kwargs) + self._logger.info(f"End {self.run.__name__}") + + # use compose method to instantiate operators and connect them to form a Directed Acyclic Graph (DAG) + def compose(self): + """Creates the app specific operators and chain them up in the processing DAG.""" + + logging.info(f"Begin {self.compose.__name__}") + + # use Commandline options over environment variables to init context + app_context = Application.init_app_context(self.argv) + app_input_path = Path(app_context.input_path) + app_output_path = Path(app_context.output_path) + model_path = Path(app_context.model_path) + + # Temporary bug fix for MAP execution where model path copy is messed up - need fix to app-sdk package function + # Check if the model_path has a subfolder named 'models' and set model_path to that subfolder if it exists + models_subfolder = model_path / "models" + if models_subfolder.exists() and models_subfolder.is_dir(): + self._logger.info(f"Found 'models' subfolder in {model_path}. Setting model_path to {models_subfolder}") + model_path = models_subfolder + + # create the custom operator(s) as well as SDK built-in operator(s) + # DICOM Data Loader op + study_loader_op = DICOMDataLoaderOperator( + self, CountCondition(self, 1), input_folder=app_input_path, name="study_loader_op" + ) + + # custom DICOM Series Selector op + # all_matched and sort_by_sop_instance_count = True; want all series that meet the selection criteria + # to be matched, and SOP sorting + series_selector_op = DICOMSeriesSelectorOperator( + self, rules=Sample_Rules_Text, all_matched=True, sort_by_sop_instance_count=True, name="series_selector_op" + ) + + # DICOM Series to Volume op + series_to_vol_op = DICOMSeriesToVolumeOperator(self, name="series_to_vol_op") + + # custom inference op + # output_labels specifies which of the organ segmentations are desired in the DICOM SEG, DICOM SC, and DICOM SR outputs + # 1 = airway + output_labels = [1] + nnunet_seg_op = NNUnetSegOperator( + self, + app_context=app_context, + model_path=model_path, + output_folder=app_output_path, + output_labels=output_labels, + name="nnunet_seg_op", + ) + + # create DICOM Seg writer providing the required segment description for each segment with + # the actual algorithm and the pertinent organ/tissue; the segment_label, algorithm_name, + # and algorithm_version are of DICOM VR LO type, limited to 64 chars + # https://dicom.nema.org/medical/dicom/current/output/chtml/part05/sect_6.2.html + + # general algorithm information + _algorithm_name = "UTE_nnunet_airway" + _algorithm_family = codes.DCM.ArtificialIntelligence + _algorithm_version = "1.0.0" + + segment_descriptions = [ + SegmentDescription( + segment_label="Airway", + segmented_property_category=codes.SCT.BodyStructure, + segmented_property_type=codes.SCT.TracheaAndBronchus, + algorithm_name=_algorithm_name, + algorithm_family=_algorithm_family, + algorithm_version=_algorithm_version, + ), + ] + + # model info is algorithm information + my_model_info = ModelInfo( + creator="UTE", # institution name + name=_algorithm_name, # algorithm name + version=_algorithm_version, # algorithm version + uid="1.0.0", # MAP version + ) + + # equipment info is MONAI Deploy App SDK information + my_equipment = EquipmentInfo( + manufacturer="The MONAI Consortium", + manufacturer_model="MONAI Deploy App SDK", + software_version_number="3.0.0", # MONAI Deploy App SDK version + ) + + # custom tags - add AlgorithmName for monitoring purposes + custom_tags_seg = { + "SeriesDescription": "AI Generated DICOM SEG; Not for Clinical Use.", + "AlgorithmName": f"{my_model_info.name}:{my_model_info.version}:{my_model_info.uid}", + } + custom_tags_sr = { + "SeriesDescription": "AI Generated DICOM SR; Not for Clinical Use.", + "AlgorithmName": f"{my_model_info.name}:{my_model_info.version}:{my_model_info.uid}", + } + custom_tags_sc = { + "SeriesDescription": "AI Generated DICOM Secondary Capture; Not for Clinical Use.", + "AlgorithmName": f"{my_model_info.name}:{my_model_info.version}:{my_model_info.uid}", + } + + # DICOM SEG Writer op writes content from segment_descriptions to output DICOM images as DICOM tags + dicom_seg_writer = DICOMSegmentationWriterOperator( + self, + segment_descriptions=segment_descriptions, + model_info=my_model_info, + custom_tags=custom_tags_seg, + # store DICOM SEG in SEG subdirectory; necessary for routing in CCHMC MDE workflow definition + output_folder=app_output_path / "SEG", + # omit_empty_frames is a default parameteter (type bool) of DICOMSegmentationWriterOperator + # dictates whether or not to omit frames that contain no segmented pixels from the output segmentation + # default value is True; changed to False to ensure input and output DICOM series #'s match + omit_empty_frames=False, + name="dicom_seg_writer", + ) + + # DICOM SR Writer op + dicom_sr_writer = DICOMTextSRWriterOperator( + self, + # copy_tags is a default parameteter (type bool) of DICOMTextSRWriterOperator; default value is True + # dictates whether or not to copy DICOM attributes from the selected DICOM series + # changed to True to copy DICOM attributes so DICOM SR has same Study UID + copy_tags=True, + model_info=my_model_info, + equipment_info=my_equipment, + custom_tags=custom_tags_sr, + # store DICOM SR in SR subdirectory; necessary for routing in CCHMC MDE workflow definition + output_folder=app_output_path / "SR", + ) + + # custom DICOM SC Writer op + dicom_sc_writer = DICOMSCWriterOperator( + self, + model_info=my_model_info, + equipment_info=my_equipment, + custom_tags=custom_tags_sc, + # store DICOM SC in SC subdirectory; necessary for routing in CCHMC MDE workflow definition + output_folder=app_output_path / "SC", + ) + + # create the processing pipeline, by specifying the source and destination operators, and + # ensuring the output from the former matches the input of the latter, in both name and type + # instantiate and connect operators using self.add_flow(); specify current operator, next operator, and tuple to match I/O + self.add_flow(study_loader_op, series_selector_op, {("dicom_study_list", "dicom_study_list")}) + self.add_flow( + series_selector_op, series_to_vol_op, {("study_selected_series_list", "study_selected_series_list")} + ) + self.add_flow(series_to_vol_op, nnunet_seg_op, {("image", "image")}) + + # note below the dicom_seg_writer, dicom_sr_writer, and dicom_sc_writer each require two inputs, + # each coming from a source operator + + # DICOM SEG + self.add_flow( + series_selector_op, dicom_seg_writer, {("study_selected_series_list", "study_selected_series_list")} + ) + self.add_flow(nnunet_seg_op, dicom_seg_writer, {("seg_image", "seg_image")}) + + # DICOM SR + self.add_flow( + series_selector_op, dicom_sr_writer, {("study_selected_series_list", "study_selected_series_list")} + ) + self.add_flow(nnunet_seg_op, dicom_sr_writer, {("result_text", "text")}) + + # DICOM SC + self.add_flow( + series_selector_op, dicom_sc_writer, {("study_selected_series_list", "study_selected_series_list")} + ) + self.add_flow(nnunet_seg_op, dicom_sc_writer, {("dicom_sc_dir", "dicom_sc_dir")}) + + logging.info(f"End {self.compose.__name__}") + + +# series selection rule in JSON, which selects for Axial T2 MR series: +# StudyDescription (Type 3): matches any value +# Modality (Type 1): matches "MR" value (case-insensitive); filters out non-MR modalities +# ImageOrientationPatient (Type 1): matches Axial orientations; filters out Sagittal and Coronal orientations +# MRAcquisitionType (Type 2): matches "2D" value (case-insensitive); filters out 3D acquisitions +# RepetitionTime (Type 2C): matches values greater than 1200; filters for T2 acquisitions +# EchoTime (Type 2): matches values bewtween 75 and 100 (inclusive); filters out SSH series +# EchoTrainLength (Type 2): matches values less than 50; filters out SSH series +# FlipAngle (Type 3): matches values greater than 75; filters for T2 acquisitions +# all valid series will be selected; downstream operators only perform inference and write outputs for 1st selected series +# please see more detail in DICOMSeriesSelectorOperator + +Sample_Rules_Text = """ +""" + +# if executing application code using python interpreter: +if __name__ == "__main__": + # creates the app and test it standalone; when running is this mode, please note the following: + # -m , for model file path + # -i , for input DICOM MR series folder + # -o , for the output folder, default $PWD/output + # e.g. + # monai-deploy exec app.py -i input -m model/ls_swinunetr_FT.pt + # + logging.info(f"Begin {__name__}") + UTEAirwayNNUnetApp().run() + logging.info(f"End {__name__}") diff --git a/examples/apps/cchmc_nnunet_fifteen_ckpt_app/my_app/app.yaml b/examples/apps/cchmc_nnunet_fifteen_ckpt_app/my_app/app.yaml new file mode 100644 index 00000000..bd94c326 --- /dev/null +++ b/examples/apps/cchmc_nnunet_fifteen_ckpt_app/my_app/app.yaml @@ -0,0 +1,34 @@ +# Copyright 2021-2025 MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +--- + +# app.yaml is a configuration file that specifies MAP settings +# used by MONAI App SDK to understand how to run our app in a MAP and what resources it needs + +# specifies high-level information about our app +application: + title: MONAI Deploy App Package - CCHMC Pediatric Airway Segmentation using nnUNet + description: This application segments the airway from a MRI scan using a nnUNet model trained + version: 0.0.1 + inputFormats: ["file"] + outputFormats: ["file"] + +# specifies the resources our app needs to run +# per MONAI docs (https://docs.monai.io/projects/monai-deploy-app-sdk/en/latest/developing_with_sdk/executing_packaged_app_locally.html) +# MAR does not validate all of the resource requirements embedded in the MAP to ensure they are met in host system +# e.g, MAR will throw an error if gpu requirement is not met on host system; however, gpuMemory parameter doesn't appear to be validated +resources: + cpu: 4 + gpu: 1 + memory: 4Gi + # during MAP execution, for an input DICOM Series of 72 instances, GPU usage peaks at just under 8100 MiB ~= 8.5 GB ~= 7.9 Gi + gpuMemory: 8Gi + \ No newline at end of file diff --git a/examples/apps/cchmc_nnunet_fifteen_ckpt_app/my_app/dicom_sc_writer_operator.py b/examples/apps/cchmc_nnunet_fifteen_ckpt_app/my_app/dicom_sc_writer_operator.py new file mode 100644 index 00000000..ce37f327 --- /dev/null +++ b/examples/apps/cchmc_nnunet_fifteen_ckpt_app/my_app/dicom_sc_writer_operator.py @@ -0,0 +1,253 @@ +# Copyright 2021-2025 MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +import os +from pathlib import Path +from typing import Dict, Optional, Union + +import pydicom + +from monai.deploy.core import Fragment, Operator, OperatorSpec +from monai.deploy.core.domain.dicom_series import DICOMSeries +from monai.deploy.core.domain.dicom_series_selection import StudySelectedSeries +from monai.deploy.operators.dicom_utils import EquipmentInfo, ModelInfo, write_common_modules +from monai.deploy.utils.importutil import optional_import +from monai.deploy.utils.version import get_sdk_semver + +dcmread, _ = optional_import("pydicom", name="dcmread") +dcmwrite, _ = optional_import("pydicom.filewriter", name="dcmwrite") +generate_uid, _ = optional_import("pydicom.uid", name="generate_uid") +ImplicitVRLittleEndian, _ = optional_import("pydicom.uid", name="ImplicitVRLittleEndian") +Dataset, _ = optional_import("pydicom.dataset", name="Dataset") +FileDataset, _ = optional_import("pydicom.dataset", name="FileDataset") +Sequence, _ = optional_import("pydicom.sequence", name="Sequence") + + +class DICOMSCWriterOperator(Operator): + """Class to write a new DICOM Secondary Capture (DICOM SC) instance with source DICOM Series metadata included. + + Named inputs: + dicom_sc_dir: file path of temporary DICOM SC (w/o source DICOM Series metadata). + study_selected_series_list: DICOM Series for copying metadata from. + + Named output: + None. + + File output: + New, updated DICOM SC file (with source DICOM Series metadata) in the provided output folder. + """ + + # file extension for the generated DICOM Part 10 file + DCM_EXTENSION = ".dcm" + # the default output folder for saving the generated DICOM instance file + # DEFAULT_OUTPUT_FOLDER = Path(os.path.join(os.path.dirname(__file__))) / "output" + DEFAULT_OUTPUT_FOLDER = Path.cwd() / "output" + + def __init__( + self, + fragment: Fragment, + *args, + output_folder: Union[str, Path], + model_info: ModelInfo, + equipment_info: Optional[EquipmentInfo] = None, + custom_tags: Optional[Dict[str, str]] = None, + **kwargs, + ): + """Class to write a new DICOM Secondary Capture (DICOM SC) instance with source DICOM Series metadata. + + Args: + output_folder (str or Path): The folder for saving the generated DICOM SC instance file. + model_info (ModelInfo): Object encapsulating model creator, name, version and UID. + equipment_info (EquipmentInfo, optional): Object encapsulating info for DICOM Equipment Module. + Defaults to None. + custom_tags (Dict[str, str], optional): Dictionary for setting custom DICOM tags using Keywords and str values only. + Defaults to None. + + Raises: + ValueError: If result cannot be found either in memory or from file. + """ + + self._logger = logging.getLogger(f"{__name__}.{type(self).__name__}") + + # need to init the output folder until the execution context supports dynamic FS path + # not trying to create the folder to avoid exception on init + self.output_folder = Path(output_folder) if output_folder else DICOMSCWriterOperator.DEFAULT_OUTPUT_FOLDER + self.input_name_sc_dir = "dicom_sc_dir" + self.input_name_study_series = "study_selected_series_list" + + # for copying DICOM attributes from a provided DICOMSeries + # required input for write_common_modules; will always be True for this implementation + self.copy_tags = True + + self.model_info = model_info if model_info else ModelInfo() + self.equipment_info = equipment_info if equipment_info else EquipmentInfo() + self.custom_tags = custom_tags + + # set own Modality and SOP Class UID + # Standard SOP Classes: https://dicom.nema.org/dicom/2013/output/chtml/part04/sect_B.5.html + # Modality, e.g., + # "OT" for PDF + # "SR" for Structured Report. + # Media Storage SOP Class UID, e.g., + # "1.2.840.10008.5.1.4.1.1.88.11" for Basic Text SR Storage + # "1.2.840.10008.5.1.4.1.1.104.1" for Encapsulated PDF Storage, + # "1.2.840.10008.5.1.4.1.1.88.34" for Comprehensive 3D SR IOD + # "1.2.840.10008.5.1.4.1.1.66.4" for Segmentation Storage + self.modality_type = "OT" # OT Modality for Secondary Capture + self.sop_class_uid = ( + "1.2.840.10008.5.1.4.1.1.7.4" # SOP Class UID for Multi-frame True Color Secondary Capture Image Storage + ) + # custom OverlayImageLabeld post-processing transform creates an RBG overlay + + # equipment version may be different from contributing equipment version + try: + self.software_version_number = get_sdk_semver() # SDK Version + except Exception: + self.software_version_number = "" + self.operators_name = f"AI Algorithm {self.model_info.name}" + + super().__init__(fragment, *args, **kwargs) + + def setup(self, spec: OperatorSpec): + """Set up the named input(s), and output(s) if applicable. + + This operator does not have an output for the next operator, rather file output only. + + Args: + spec (OperatorSpec): The Operator specification for inputs and outputs etc. + """ + + spec.input(self.input_name_sc_dir) + spec.input(self.input_name_study_series) + + def compute(self, op_input, op_output, context): + """Performs computation for this operator and handles I/O. + + For now, only a single result content is supported, which could be in memory or an accessible file. + The DICOM Series used during inference is required (and copy_tags is hardcoded to True). + + When there are multiple selected series in the input, the first series' containing study will + be used for retrieving DICOM Study module attributes, e.g. StudyInstanceUID. + + Raises: + NotADirectoryError: When temporary DICOM SC path is not a directory. + FileNotFoundError: When result object not in the input, and result file not found either. + ValueError: Content object and file path not in the inputs, or no DICOM series provided. + IOError: If the input content is blank. + """ + + # receive the temporary DICOM SC file path and study selected series list + dicom_sc_dir = Path(op_input.receive(self.input_name_sc_dir)) + if not dicom_sc_dir: + raise IOError("Temporary DICOM SC path is read but blank.") + if not dicom_sc_dir.is_dir(): + raise NotADirectoryError(f"Provided temporary DICOM SC path is not a directory: {dicom_sc_dir}") + self._logger.info(f"Received temporary DICOM SC path: {dicom_sc_dir}") + + study_selected_series_list = op_input.receive(self.input_name_study_series) + if not study_selected_series_list or len(study_selected_series_list) < 1: + raise ValueError("Missing input, list of 'StudySelectedSeries'.") + + # retrieve the DICOM Series used during inference in order to grab appropriate study/series level tags + # this will be the 1st Series in study_selected_series_list + dicom_series = None + for study_selected_series in study_selected_series_list: + if not isinstance(study_selected_series, StudySelectedSeries): + raise ValueError(f"Element in input is not expected type, {StudySelectedSeries}.") + selected_series = study_selected_series.selected_series[0] + dicom_series = selected_series.series + break + + # log basic DICOM metadata for the retrieved DICOM Series + self._logger.debug(f"Dicom Series: {dicom_series}") + + # the output folder should come from the execution context when it is supported + self.output_folder.mkdir(parents=True, exist_ok=True) + + # write the new DICOM SC instance + self.write(dicom_sc_dir, dicom_series, self.output_folder) + + def write(self, dicom_sc_dir, dicom_series: DICOMSeries, output_dir: Path): + """Writes a new, updated DICOM SC instance and deletes the temporary DICOM SC instance. + The new, updated DICOM SC instance is the temporary DICOM SC instance with source + DICOM Series metadata copied. + + Args: + dicom_sc_dir: temporary DICOM SC file path. + dicom_series (DICOMSeries): DICOMSeries object encapsulating the original series. + + Returns: + None + + File output: + New, updated DICOM SC file (with source DICOM Series metadata) in the provided output folder. + """ + + if not isinstance(output_dir, Path): + raise ValueError("output_dir is not a valid Path.") + + output_dir.mkdir(parents=True, exist_ok=True) # just in case + + # find the temporary DICOM SC file in the directory; there should only be one .dcm file present + dicom_files = list(dicom_sc_dir.glob("*.dcm")) + dicom_sc_file = dicom_files[0] + + # load the temporary DICOM SC file using pydicom + dicom_sc_dataset = pydicom.dcmread(dicom_sc_file) + self._logger.info(f"Loaded temporary DICOM SC file: {dicom_sc_file}") + + # use write_common_modules to copy metadata from dicom_series + # this will copy metadata and return an updated Dataset + ds = write_common_modules( + dicom_series, + self.copy_tags, # always True for this implementation + self.modality_type, + self.sop_class_uid, + self.model_info, + self.equipment_info, + ) + + # Secondary Capture specific tags + ds.ImageType = ["DERIVED", "SECONDARY"] + + # for now, only allow str Keywords and str value + if self.custom_tags: + for k, v in self.custom_tags.items(): + if isinstance(k, str) and isinstance(v, str): + try: + ds.update({k: v}) + except Exception as ex: + # best effort for now + logging.warning(f"Tag {k} was not written, due to {ex}") + + # merge the copied metadata into the loaded temporary DICOM SC file (dicom_sc_dataset) + for tag, value in ds.items(): + dicom_sc_dataset[tag] = value + + # save the updated DICOM SC file to the output folder + # instance file name is the same as the new SOP instance UID + output_file_path = self.output_folder.joinpath( + f"{dicom_sc_dataset.SOPInstanceUID}{DICOMSCWriterOperator.DCM_EXTENSION}" + ) + dicom_sc_dataset.save_as(output_file_path) + self._logger.info(f"Saved updated DICOM SC file at: {output_file_path}") + + # remove the temporary DICOM SC file + os.remove(dicom_sc_file) + self._logger.info(f"Removed temporary DICOM SC file: {dicom_sc_file}") + + # check if the temp directory is empty, then delete it + if not any(dicom_sc_dir.iterdir()): + os.rmdir(dicom_sc_dir) + self._logger.info(f"Removed temporary directory: {dicom_sc_dir}") + else: + self._logger.warning(f"Temporary directory {dicom_sc_dir} is not empty, skipping removal.") diff --git a/examples/apps/cchmc_nnunet_fifteen_ckpt_app/my_app/dicom_series_selector_operator.py b/examples/apps/cchmc_nnunet_fifteen_ckpt_app/my_app/dicom_series_selector_operator.py new file mode 100644 index 00000000..ced61ea9 --- /dev/null +++ b/examples/apps/cchmc_nnunet_fifteen_ckpt_app/my_app/dicom_series_selector_operator.py @@ -0,0 +1,629 @@ +# Copyright 2021-2025 MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +import numbers +import re +from json import loads as json_loads +from typing import List + +import numpy as np + +from monai.deploy.core import ConditionType, Fragment, Operator, OperatorSpec +from monai.deploy.core.domain.dicom_series import DICOMSeries +from monai.deploy.core.domain.dicom_series_selection import SelectedSeries, StudySelectedSeries +from monai.deploy.core.domain.dicom_study import DICOMStudy + + +class DICOMSeriesSelectorOperator(Operator): + """This operator selects a list of DICOM Series in a DICOM Study for a given set of selection rules. + + Named input: + dicom_study_list: A list of DICOMStudy objects. + + Named output: + study_selected_series_list: A list of StudySelectedSeries objects. Downstream receiver optional. + + This class can be considered a base class, and a derived class can override the 'filter' function with + custom logic. + + In its default implementation, this class + 1. selects a series or all matched series within the scope of a study in a list of studies + 2. uses rules defined in JSON string, see below for details + 3. supports DICOM Study and Series module attribute matching + 4. supports multiple named selections, in the scope of each DICOM study + 5. outputs a list of StudySelectedSeries objects, as well as a flat list of SelectedSeries (to be deprecated) + + The selection rules are defined in JSON, + 1. attribute "selections" value is a list of selections + 2. each selection has a "name", and its "conditions" value is a list of matching criteria + 3. each condition uses the implicit equal operator; in addition, the following are supported: + - regex, relational, and range matching for float and int types + - regex matching for str type + - inclusion and exclusion matching for set type + - image orientation check for the ImageOrientationPatient tag + 4. DICOM attribute keywords are used, and only for those defined as DICOMStudy and DICOMSeries properties + + An example selection rules: + { + "selections": [ + { + "name": "CT Series 1", + "conditions": { + "StudyDescription": "(?i)^Spleen", + "Modality": "(?i)CT", + "SeriesDescription": "(?i)^No series description|(.*?)" + } + }, + { + "name": "CT Series 2", + "conditions": { + "Modality": "CT", + "BodyPartExamined": "Abdomen", + "SeriesDescription" : "Not to be matched. For illustration only." + } + }, + { + "name": "CT Series 3", + "conditions": { + "StudyDescription": "(.*?)", + "Modality": "(?i)CT", + "ImageType": ["PRIMARY", "ORIGINAL", "AXIAL"], + "SliceThickness": [3, 5] + } + }, + { + "name": "CT Series 4", + "conditions": { + "StudyDescription": "(.*?)", + "Modality": "(?i)CT", + "ImageOrientationPatient": "Axial", + "SliceThickness": [2, ">"] + } + }, + { + "name": "CT Series 5", + "conditions": { + "StudyDescription": "(.*?)", + "Modality": "(?i)CT", + "ImageType": ["PRIMARY", "!SECONDARY"] + } + } + ] + } + """ + + def __init__( + self, + fragment: Fragment, + *args, + rules: str = "", + all_matched: bool = False, + sort_by_sop_instance_count: bool = False, + **kwargs, + ) -> None: + """Instantiate an instance. + + Args: + fragment (Fragment): An instance of the Application class which is derived from Fragment. + rules (Text): Selection rules in JSON string. + all_matched (bool): Gets all matched series in a study. Defaults to False for first match only. + sort_by_sop_instance_count (bool): If all_matched = True and multiple series are matched, sorts the matched series in + descending SOP instance count (i.e. the first Series in the returned List[StudySelectedSeries] will have the highest # + of DICOM images); Defaults to False for no sorting. + """ + + # Delay loading the rules as JSON string till compute time. + self._rules_json_str = rules if rules and rules.strip() else None + self._all_matched = all_matched # all_matched + self._sort_by_sop_instance_count = sort_by_sop_instance_count # sort_by_sop_instance_count + self.input_name_study_list = "dicom_study_list" + self.output_name_selected_series = "study_selected_series_list" + + super().__init__(fragment, *args, **kwargs) + + def setup(self, spec: OperatorSpec): + spec.input(self.input_name_study_list) + spec.output(self.output_name_selected_series).condition(ConditionType.NONE) # Receiver optional + + # Can use the config file to alter the selection rules per app run + # spec.param("selection_rules") + + def compute(self, op_input, op_output, context): + """Performs computation for this operator.""" + + dicom_study_list = op_input.receive(self.input_name_study_list) + selection_rules = self._load_rules() if self._rules_json_str else None + study_selected_series = self.filter( + selection_rules, dicom_study_list, self._all_matched, self._sort_by_sop_instance_count + ) + + # Log Series Description and Series Instance UID of the first selected DICOM Series (i.e. the one to be used for inference) + if study_selected_series and len(study_selected_series) > 0: + inference_study = study_selected_series[0] + if inference_study.selected_series and len(inference_study.selected_series) > 0: + inference_series = inference_study.selected_series[0].series + logging.info("Series Selection finalized") + logging.info( + f"Series Description of selected DICOM Series for inference: {inference_series.SeriesDescription}" + ) + logging.info( + f"Series Instance UID of selected DICOM Series for inference: {inference_series.SeriesInstanceUID}" + ) + + op_output.emit(study_selected_series, self.output_name_selected_series) + + def filter( + self, selection_rules, dicom_study_list, all_matched: bool = False, sort_by_sop_instance_count: bool = False + ) -> List[StudySelectedSeries]: + """Selects the series with the given matching rules. + + If rules object is None, all series will be returned with series instance UID as the selection name. + + Supported matching logic: + Float + Int: exact matching, relational matching, range matching, and regex matching + String: matches case insensitive, if fails then tries RegEx search + String array (set): inclusive and exclusive (via !) matching as subsets, case insensitive + ImageOrientationPatient tag: image orientation (Axial, Coronal, Sagittal) matching + + Args: + selection_rules (object): JSON object containing the matching rules. + dicom_study_list (list): A list of DICOMStudy objects. + all_matched (bool): Gets all matched series in a study. Defaults to False for first match only. + sort_by_sop_instance_count (bool): If all_matched = True and multiple series are matched, sorts the matched series in + descending SOP instance count (i.e. the first Series in the returned List[StudySelectedSeries] will have the highest # + of DICOM images); Defaults to False for no sorting. + + Returns: + list: A list of objects of type StudySelectedSeries. + + Raises: + ValueError: If the selection_rules object does not contain "selections" attribute. + """ + + if not dicom_study_list or len(dicom_study_list) < 1: + return [] + + if not selection_rules: + # Return all series if no selection rules are supplied + logging.warn("No selection rules given; select all series.") + return self._select_all_series(dicom_study_list) + + selections = selection_rules.get("selections", None) # TODO type is not json now. + # If missing selections in the rules then it is an error. + if not selections: + raise ValueError('Expected "selections" not found in the rules.') + + study_selected_series_list = [] # List of StudySelectedSeries objects + + for study in dicom_study_list: + study_selected_series = StudySelectedSeries(study) + for selection in selections: + # Get the selection name. Blank name will be handled by the SelectedSeries + selection_name = selection.get("name", "").strip() + logging.info(f"Finding series for Selection named: {selection_name}") + + # Skip if no selection conditions are provided. + conditions = selection.get("conditions", None) + if not conditions: + continue + + # Select only the first series that matches the conditions, list of one + series_list = self._select_series(conditions, study, all_matched, sort_by_sop_instance_count) + if series_list and len(series_list) > 0: + for series in series_list: + selected_series = SelectedSeries(selection_name, series, None) # No Image obj yet. + study_selected_series.add_selected_series(selected_series) + + if len(study_selected_series.selected_series) > 0: + study_selected_series_list.append(study_selected_series) + + return study_selected_series_list + + def _load_rules(self): + return json_loads(self._rules_json_str) if self._rules_json_str else None + + def _select_all_series(self, dicom_study_list: List[DICOMStudy]) -> List[StudySelectedSeries]: + """Select all series in studies + + Returns: + list: list of StudySelectedSeries objects + """ + + study_selected_series_list = [] + for study in dicom_study_list: + logging.info(f"Working on study, instance UID: {study.StudyInstanceUID}") + study_selected_series = StudySelectedSeries(study) + for series in study.get_all_series(): + logging.info(f"Working on series, instance UID: {str(series.SeriesInstanceUID)}") + selected_series = SelectedSeries("", series, None) # No selection name or Image obj. + study_selected_series.add_selected_series(selected_series) + study_selected_series_list.append(study_selected_series) + return study_selected_series_list + + def _select_series( + self, attributes: dict, study: DICOMStudy, all_matched=False, sort_by_sop_instance_count=False + ) -> List[DICOMSeries]: + """Finds series whose attributes match the given attributes. + + Args: + attributes (dict): Dictionary of attributes for matching + all_matched (bool): Gets all matched series in a study. Defaults to False for first match only. + sort_by_sop_instance_count (bool): If all_matched = True and multiple series are matched, sorts the matched series in + descending SOP instance count (i.e. the first Series in the returned List[StudySelectedSeries] will have the highest # + of DICOM images); Defaults to False for no sorting. + + Returns: + List of DICOMSeries. At most one element if all_matched is False. + + Raises: + NotImplementedError: If the value_to_match type is not supported for matching or unsupported PatientPosition value. + """ + assert isinstance(attributes, dict), '"attributes" must be a dict.' + + logging.info(f"Searching study, : {study.StudyInstanceUID}\n # of series: {len(study.get_all_series())}") + study_attr = self._get_instance_properties(study) + + found_series = [] + for series in study.get_all_series(): + logging.info(f"Working on series, instance UID: {series.SeriesInstanceUID}") + + # Combine Study and current Series properties for matching + series_attr = self._get_instance_properties(series) + series_attr.update(study_attr) + + matched = True + # Simple matching on attribute value + for key, value_to_match in attributes.items(): + logging.info(f" On attribute: {key!r} to match value: {value_to_match!r}") + # Ignore None + if not value_to_match: + continue + # Try getting the attribute value from Study and current Series prop dict + attr_value = series_attr.get(key, None) + logging.info(f" Series attribute {key} value: {attr_value}") + + # If not found, try the best at the native instance level for string VR + # This is mainly for attributes like ImageType + if not attr_value: + try: + # Can use some enhancements, especially multi-value where VM > 1 + elem = series.get_sop_instances()[0].get_native_sop_instance()[key] + if elem.VM > 1: + attr_value = [elem.repval] # repval: str representation of the element’s value + else: + attr_value = elem.value # element's value + + logging.info(f" Instance level attribute {key} value: {attr_value}") + series_attr.update({key: attr_value}) + except Exception: + logging.info(f" Attribute {key} not at instance level either") + + if not attr_value: + logging.info(f" Missing attribute: {key!r}") + matched = False + # Image orientation check + elif key == "ImageOrientationPatient": + patient_position = series_attr.get("PatientPosition") + if patient_position is None: + raise NotImplementedError( + "PatientPosition tag absent; value required for image orientation calculation" + ) + if patient_position not in ("HFP", "HFS", "HFDL", "HFDR", "FFP", "FFS", "FFDL", "FFDR"): + raise NotImplementedError(f"No support for PatientPosition value {patient_position}") + matched = self._match_image_orientation(value_to_match, attr_value) + elif isinstance(attr_value, (float, int)): + matched = self._match_numeric_condition(value_to_match, attr_value) + elif isinstance(attr_value, str): + matched = attr_value.casefold() == (value_to_match.casefold()) + if not matched: + # For str, also try RegEx search to check for a match anywhere in the string + # unless the user constrains it in the expression. + if re.search(value_to_match, attr_value, re.IGNORECASE): + matched = True + elif isinstance(attr_value, list): + # Assume multi value string attributes + meta_data_list = str(attr_value).lower() + if isinstance(value_to_match, list): + value_set = {str(element).lower() for element in value_to_match} + # split inclusion and exclusion matches using ! indicator + include_terms = {v for v in value_set if not v.startswith("!")} + exclude_terms = {v[1:] for v in value_set if v.startswith("!")} + matched = all(term in meta_data_list for term in include_terms) and all( + term not in meta_data_list for term in exclude_terms + ) + elif isinstance(value_to_match, (str, numbers.Number)): + v = str(value_to_match).lower() + # ! indicates exclusion match + if v.startswith("!"): + matched = v[1:] not in meta_data_list + else: + matched = v in meta_data_list + else: + raise NotImplementedError( + f"No support for matching condition {value_to_match} (type: {type(value_to_match)})" + ) + + if not matched: + logging.info("This series does not match the selection conditions") + break + + if matched: + logging.info(f"Selected Series, UID: {series.SeriesInstanceUID}") + found_series.append(series) + + if not all_matched: + return found_series + + # If sorting indicated and multiple series found, sort series in descending SOP instance count + if sort_by_sop_instance_count and len(found_series) > 1: + logging.info( + "Multiple series matched the selection criteria; choosing series with the highest number of DICOM images." + ) + found_series.sort(key=lambda x: len(x.get_sop_instances()), reverse=True) + + return found_series + + def _match_numeric_condition(self, value_to_match, attr_value): + """ + Helper method to match numeric conditions, supporting relational, inclusive range, regex, and exact match checks. + + Supported formats: + - [val, ">"]: match if attr_value > val + - [val, ">="]: match if attr_value >= val + - [val, "<"]: match if attr_value < val + - [val, "<="]: match if attr_value <= val + - [val, "!="]: match if attr_value != val + - [min_val, max_val]: inclusive range check + - "regex": regular expression match + - number: exact match + + Args: + value_to_match (Union[list, str, int, float]): The condition to match against. + attr_value (Union[int, float]): The attribute value from the series. + + Returns: + bool: True if the attribute value matches the condition, else False. + + Raises: + NotImplementedError: If the value_to_match condition is not supported for numeric matching. + """ + + if isinstance(value_to_match, list): + # Relational operator check: >, >=, <, <=, != + if len(value_to_match) == 2 and isinstance(value_to_match[1], str): + val = float(value_to_match[0]) + op = value_to_match[1] + if op == ">": + return attr_value > val + elif op == ">=": + return attr_value >= val + elif op == "<": + return attr_value < val + elif op == "<=": + return attr_value <= val + elif op == "!=": + return attr_value != val + else: + raise NotImplementedError( + f"Unsupported relational operator {op!r} in numeric condition. Must be one of: '>', '>=', '<', '<=', '!='" + ) + + # Inclusive range check + elif len(value_to_match) == 2 and all(isinstance(v, (int, float)) for v in value_to_match): + return value_to_match[0] <= attr_value <= value_to_match[1] + + else: + raise NotImplementedError(f"No support for numeric matching condition {value_to_match}") + + # Regular expression match + elif isinstance(value_to_match, str): + return bool(re.fullmatch(value_to_match, str(attr_value))) + + # Exact numeric match + elif isinstance(value_to_match, (int, float)): + return value_to_match == attr_value + + else: + raise NotImplementedError(f"No support for numeric matching on this type: {type(value_to_match)}") + + def _match_image_orientation(self, value_to_match, attr_value): + """ + Helper method to calculate and match the image orientation using the ImageOrientationPatient tag. + The following PatientPosition values are supported and have been tested: + - "HFP" + - "HFS" + - "HFDL" + - "HFDR" + - "FFP" + - "FFS" + - "FFDL" + - "FFDR" + + Supported image orientation inputs for matching (case-insensitive): + - "Axial" + - "Coronal" + - "Sagittal" + + Args: + value_to_match (str): The image orientation condition to match against. + attr_value (List[str]): Raw ImageOrientationPatient tag value from the series. + + Returns: + bool: True if the computed orientation matches the expected orientation, else False. + + Raises: + ValueError: If the expected orientation is invalid or the normal vector cannot be computed. + """ + + # Validate image orientation to match input + value_to_match = value_to_match.strip().lower().capitalize() + allowed_orientations = {"Axial", "Coronal", "Sagittal"} + if value_to_match not in allowed_orientations: + raise ValueError(f"Invalid orientation string {value_to_match!r}. Must be one of: {allowed_orientations}") + + # Format ImageOrientationPatient tag value as an array and grab row and column cosines + iop_str = attr_value[0].strip("[]") + iop = [float(x.strip()) for x in iop_str.split(",")] + row_cosines = np.array(iop[:3], dtype=np.float64) + col_cosines = np.array(iop[3:], dtype=np.float64) + + # Validate DICOM constraints (normal row and column cosines + should be orthogonal) + # Throw warnings if tolerance exceeded + tolerance = 1e-4 + row_norm = np.linalg.norm(row_cosines) + col_norm = np.linalg.norm(col_cosines) + dot_product = np.dot(row_cosines, col_cosines) + + if abs(row_norm - 1.0) > tolerance: + logging.warn(f"Row direction cosine normal is {row_norm}, deviates from 1 by more than {tolerance}") + if abs(col_norm - 1.0) > tolerance: + logging.warn(f"Column direction cosine normal is {col_norm}, deviates from 1 by more than {tolerance}") + if abs(dot_product) > tolerance: + logging.warn(f"Row and Column cosines are not orthogonal: dot product = {dot_product}") + + # Normalize row and column vectors + row_cosines /= np.linalg.norm(row_cosines) + col_cosines /= np.linalg.norm(col_cosines) + + # Compute and validate slice normal + normal = np.cross(row_cosines, col_cosines) + if np.linalg.norm(normal) == 0: + raise ValueError("Invalid normal vector computed from IOP") + + # Normalize the slice normal + normal /= np.linalg.norm(normal) + + # Identify the dominant image orientation + axis_labels = ["Sagittal", "Coronal", "Axial"] + major_axis = np.argmax(np.abs(normal)) + computed_orientation = axis_labels[major_axis] + + logging.info(f" Computed orientation from ImageOrientationPatient value: {computed_orientation}") + + return bool(computed_orientation == value_to_match) + + @staticmethod + def _get_instance_properties(obj: object): + if not obj: + return {} + else: + return {x: getattr(obj, x, None) for x in type(obj).__dict__ if isinstance(type(obj).__dict__[x], property)} + + +# Module functions +# Helper function to get console output of the selection content when testing the script +def _print_instance_properties(obj: object, pre_fix: str = "", print_val=True): + print(f"{pre_fix}Instance of {type(obj)}") + for attribute in [x for x in type(obj).__dict__ if isinstance(type(obj).__dict__[x], property)]: + attr_val = getattr(obj, attribute, None) + print(f"{pre_fix} {attribute}: {type(attr_val)} {attr_val if print_val else ''}") + + +def test(): + from pathlib import Path + + from monai.deploy.operators.dicom_data_loader_operator import DICOMDataLoaderOperator + + current_file_dir = Path(__file__).parent.resolve() + data_path = current_file_dir.joinpath("../../../inputs/spleen_ct/dcm").absolute() + + fragment = Fragment() + loader = DICOMDataLoaderOperator(fragment, name="loader_op") + selector = DICOMSeriesSelectorOperator(fragment, name="selector_op") + study_list = loader.load_data_to_studies(data_path) + sample_selection_rule = json_loads(Sample_Rules_Text) + print(f"Selection rules in JSON:\n{sample_selection_rule}") + study_selected_series_list = selector.filter(sample_selection_rule, study_list) + + for sss_obj in study_selected_series_list: + _print_instance_properties(sss_obj, pre_fix="", print_val=False) + study = sss_obj.study + pre_fix = " " + print(f"{pre_fix}==== Details of the study ====") + _print_instance_properties(study, pre_fix, print_val=False) + print(f"{pre_fix}==============================") + + # The following commented code block accesses and prints the flat list of all selected series. + # for ss_obj in sss_obj.selected_series: + # pre_fix = " " + # _print_instance_properties(ss_obj, pre_fix, print_val=False) + # pre_fix = " " + # print(f"{pre_fix}==== Details of the series ====") + # _print_instance_properties(ss_obj, pre_fix) + # print(f"{pre_fix}===============================") + + # The following block uses hierarchical grouping by selection name, and prints the list of series for each. + for selection_name, ss_list in sss_obj.series_by_selection_name.items(): + pre_fix = " " + print(f"{pre_fix}Selection name: {selection_name}") + for ss_obj in ss_list: + pre_fix = " " + _print_instance_properties(ss_obj, pre_fix, print_val=False) + print(f"{pre_fix}==== Details of the series ====") + _print_instance_properties(ss_obj, pre_fix) + print(f"{pre_fix}===============================") + + print(f" A total of {len(sss_obj.selected_series)} series selected for study {study.StudyInstanceUID}") + + +# Sample rule used for testing +Sample_Rules_Text = """ +{ + "selections": [ + { + "name": "CT Series 1", + "conditions": { + "StudyDescription": "(?i)^Spleen", + "Modality": "(?i)CT", + "SeriesDescription": "(?i)^No series description|(.*?)" + } + }, + { + "name": "CT Series 2", + "conditions": { + "Modality": "CT", + "BodyPartExamined": "Abdomen", + "SeriesDescription" : "Not to be matched" + } + }, + { + "name": "CT Series 3", + "conditions": { + "StudyDescription": "(.*?)", + "Modality": "(?i)CT", + "ImageType": ["PRIMARY", "ORIGINAL", "AXIAL"], + "SliceThickness": [3, 5] + } + }, + { + "name": "CT Series 4", + "conditions": { + "StudyDescription": "(.*?)", + "Modality": "(?i)MR", + "ImageOrientationPatient": "Axial", + "SliceThickness": [2, ">"] + } + }, + { + "name": "CT Series 5", + "conditions": { + "StudyDescription": "(.*?)", + "Modality": "(?i)CT", + "ImageType": ["PRIMARY", "!SECONDARY"] + } + } + ] +} +""" + +if __name__ == "__main__": + test() diff --git a/examples/apps/cchmc_nnunet_fifteen_ckpt_app/my_app/nnunet_bundle.py b/examples/apps/cchmc_nnunet_fifteen_ckpt_app/my_app/nnunet_bundle.py new file mode 100644 index 00000000..712be41a --- /dev/null +++ b/examples/apps/cchmc_nnunet_fifteen_ckpt_app/my_app/nnunet_bundle.py @@ -0,0 +1,995 @@ +# Copyright (c) MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import annotations + +import os +import shutil +from pathlib import Path +from typing import Any, Optional, Tuple, Union + +import numpy as np +import torch +from torch.backends import cudnn + +from monai.data.meta_tensor import MetaTensor +from monai.utils import optional_import + +join, _ = optional_import("batchgenerators.utilities.file_and_folder_operations", name="join") +load_json, _ = optional_import("batchgenerators.utilities.file_and_folder_operations", name="load_json") + +__all__ = [ + "get_nnunet_trainer", + "get_nnunet_monai_predictor", + "get_network_from_nnunet_plans", + "convert_nnunet_to_monai_bundle", + "convert_monai_bundle_to_nnunet", + "ModelnnUNetWrapper", + "EnsembleProbabilitiesToSegmentation", +] + +# Constants +NNUNET_CHECKPOINT_FILENAME = "nnunet_checkpoint.pth" +PLANS_JSON_FILENAME = "plans.json" +DATASET_JSON_FILENAME = "dataset.json" + + +# Convert a single nnUNet model checkpoint to MONAI bundle format +# The function saves the converted model checkpoint and configuration files in the specified bundle root folder. +def convert_nnunet_to_monai_bundle(nnunet_config: dict, bundle_root_folder: str, fold: int = 0) -> None: + """ + Convert nnUNet model checkpoints and configuration to MONAI bundle format. + + Parameters + ---------- + nnunet_config : dict + Configuration dictionary for nnUNet, containing keys such as 'dataset_name_or_id', 'nnunet_configuration', + 'nnunet_trainer', and 'nnunet_plans'. + bundle_root_folder : str + Root folder where the MONAI bundle will be saved. + fold : int, optional + Fold number of the nnUNet model to be converted, by default 0. + + Returns + ------- + None + """ + + nnunet_trainer = "nnUNetTrainer" + nnunet_plans = "nnUNetPlans" + nnunet_configuration = "3d_fullres" + + if "nnunet_trainer" in nnunet_config: + nnunet_trainer = nnunet_config["nnunet_trainer"] + + if "nnunet_plans" in nnunet_config: + nnunet_plans = nnunet_config["nnunet_plans"] + + if "nnunet_configuration" in nnunet_config: + nnunet_configuration = nnunet_config["nnunet_configuration"] + + from nnunetv2.utilities.dataset_name_id_conversion import maybe_convert_to_dataset_name + + dataset_name = maybe_convert_to_dataset_name(nnunet_config["dataset_name_or_id"]) + nnunet_model_folder = Path(os.environ["nnUNet_results"]).joinpath( + dataset_name, f"{nnunet_trainer}__{nnunet_plans}__{nnunet_configuration}" + ) + + nnunet_checkpoint_final = torch.load( + Path(nnunet_model_folder).joinpath(f"fold_{fold}", "checkpoint_final.pth"), weights_only=False + ) + nnunet_checkpoint_best = torch.load( + Path(nnunet_model_folder).joinpath(f"fold_{fold}", "checkpoint_best.pth"), weights_only=False + ) + + nnunet_checkpoint = {} + nnunet_checkpoint["inference_allowed_mirroring_axes"] = nnunet_checkpoint_final["inference_allowed_mirroring_axes"] + nnunet_checkpoint["init_args"] = nnunet_checkpoint_final["init_args"] + nnunet_checkpoint["trainer_name"] = nnunet_checkpoint_final["trainer_name"] + + Path(bundle_root_folder).joinpath("models", nnunet_configuration).mkdir(parents=True, exist_ok=True) + + torch.save( + nnunet_checkpoint, Path(bundle_root_folder).joinpath("models", nnunet_configuration, NNUNET_CHECKPOINT_FILENAME) + ) + + Path(bundle_root_folder).joinpath("models", nnunet_configuration, f"fold_{fold}").mkdir(parents=True, exist_ok=True) + # This might not be needed, comment it out for now + # monai_last_checkpoint = {} + # monai_last_checkpoint["network_weights"] = nnunet_checkpoint_final["network_weights"] + # torch.save(monai_last_checkpoint, Path(bundle_root_folder).joinpath("models", nnunet_configuration, f"fold_{fold}", "model.pt")) + + monai_best_checkpoint = {} + monai_best_checkpoint["network_weights"] = nnunet_checkpoint_best["network_weights"] + torch.save( + monai_best_checkpoint, + Path(bundle_root_folder).joinpath("models", nnunet_configuration, f"fold_{fold}", "best_model.pt"), + ) + + if not os.path.exists(os.path.join(bundle_root_folder, "models", "jsonpkls", PLANS_JSON_FILENAME)): + shutil.copy( + Path(nnunet_model_folder).joinpath(PLANS_JSON_FILENAME), + Path(bundle_root_folder).joinpath("models", "jsonpkls", PLANS_JSON_FILENAME), + ) + + if not os.path.exists(os.path.join(bundle_root_folder, "models", "jsonpkls", DATASET_JSON_FILENAME)): + shutil.copy( + Path(nnunet_model_folder).joinpath(DATASET_JSON_FILENAME), + Path(bundle_root_folder).joinpath("models", "jsonpkls", DATASET_JSON_FILENAME), + ) + + +# A function to convert all nnunet models (configs and folds) to MONAI bundle format. +# The function iterates through all folds and configurations, converting each model to the specified bundle format. +# The number of folds, configurations, plans and dataset.json will be parsed from the nnunet folder +def convert_best_nnunet_to_monai_bundle( + nnunet_config: dict, bundle_root_folder: str, inference_info_file: str = "inference_information.json" +) -> None: + """ + Convert all nnUNet models (configs and folds) to MONAI bundle format. + + Parameters + ---------- + nnunet_config : dict + Configuration dictionary for nnUNet. Expected keys are: + - "dataset_name_or_id": str, name or ID of the dataset. + - "nnunet_configuration": str, configuration name. + - "nnunet_trainer": str, optional, name of the nnU-Net trainer (default is "nnUNetTrainer"). + - "nnunet_plans": str, optional, name of the nnU-Net plans (default is "nnUNetPlans"). + bundle_root_folder : str + Path to the root folder of the MONAI bundle. + inference_info : str, optional + Path to the inference information file (default is "inference_information.json"). + + Returns + ------- + None + """ + from batchgenerators.utilities.file_and_folder_operations import subfiles + from nnunetv2.utilities.dataset_name_id_conversion import maybe_convert_to_dataset_name + + dataset_name = nnunet_config["dataset_name_or_id"] + + inference_info_path = Path(os.environ["nnUNet_results"]).joinpath( + maybe_convert_to_dataset_name(dataset_name), inference_info_file + ) + + if not os.path.exists(inference_info_path): + raise FileNotFoundError(f"Inference information file not found: {inference_info_path}") + inference_info = load_json(inference_info_path) + + # Get the best model or ensemble from the inference information + if "best_model_or_ensemble" not in inference_info: + raise KeyError(f"Key 'best_model_or_ensemble' not found in inference information file: {inference_info_path}") + best_model_dict = inference_info["best_model_or_ensemble"] + + # Get the folds information + if "folds" not in inference_info: + raise KeyError(f"Key 'folds' not found in inference information file: {inference_info_path}") + folds = inference_info["folds"] # list of folds + + cascade_3d_fullres = False + for model_dict in best_model_dict["selected_model_or_models"]: + if model_dict["configuration"] == "3d_cascade_fullres": + cascade_3d_fullres = True + + print("Converting model: ", model_dict["configuration"]) + nnunet_model_folder = Path(os.environ["nnUNet_results"]).joinpath( + maybe_convert_to_dataset_name(dataset_name), + f"{model_dict['trainer']}__{model_dict['plans_identifier']}__{model_dict['configuration']}", + ) + nnunet_config["nnunet_configuration"] = model_dict["configuration"] + nnunet_config["nnunet_trainer"] = model_dict["trainer"] + nnunet_config["nnunet_plans"] = model_dict["plans_identifier"] + + if not os.path.exists(nnunet_model_folder): + raise FileNotFoundError(f"Model folder not found: {nnunet_model_folder}") + + for fold in folds: + print("Converting fold: ", fold, " of model: ", model_dict["configuration"]) + convert_nnunet_to_monai_bundle(nnunet_config, bundle_root_folder, fold) + + # IF model is a cascade model, 3d_lowres is also needed + if cascade_3d_fullres: + # check if 3d_lowres is already in the bundle + if not os.path.exists(os.path.join(bundle_root_folder, "models", "3d_lowres")): + # copy the 3d_lowres model folder from nnunet results + nnunet_model_folder = Path(os.environ["nnUNet_results"]).joinpath( + maybe_convert_to_dataset_name(dataset_name), + f"{model_dict['trainer']}__{model_dict['plans_identifier']}__3d_lowres", + ) + if not os.path.exists(nnunet_model_folder): + raise FileNotFoundError(f"Model folder not found: {nnunet_model_folder}") + # copy the 3d_lowres model folder to the bundle root folder + nnunet_config["nnunet_configuration"] = "3d_lowres" + nnunet_config["nnunet_trainer"] = best_model_dict["selected_model_or_models"][-1][ + "trainer" + ] # Using the same trainer as the cascade model + nnunet_config["nnunet_plans"] = best_model_dict["selected_model_or_models"][-1][ + "plans_identifier" + ] # Using the same plans id as the cascade model + for fold in folds: + print("Converting fold: ", fold, " of model: ", "3d_lowres") + convert_nnunet_to_monai_bundle(nnunet_config, bundle_root_folder, fold) + + # Finally if postprocessing is needed (for ensemble models) + if "postprocessing_file" in best_model_dict: + postprocessing_file_path = best_model_dict["postprocessing_file"] + if not os.path.exists(postprocessing_file_path): + raise FileNotFoundError(f"Postprocessing file not found: {postprocessing_file_path}") + shutil.copy(postprocessing_file_path, Path(bundle_root_folder).joinpath("models", "postprocessing.pkl")) + + +def convert_monai_bundle_to_nnunet(nnunet_config: dict, bundle_root_folder: str, fold: int = 0) -> None: + """ + Convert a MONAI bundle to nnU-Net format. + + Parameters + ---------- + nnunet_config : dict + Configuration dictionary for nnU-Net. Expected keys are: + - "dataset_name_or_id": str, name or ID of the dataset. + - "nnunet_trainer": str, optional, name of the nnU-Net trainer (default is "nnUNetTrainer"). + - "nnunet_plans": str, optional, name of the nnU-Net plans (default is "nnUNetPlans"). + bundle_root_folder : str + Path to the root folder of the MONAI bundle. + fold : int, optional + Fold number for cross-validation (default is 0). + + Returns + ------- + None + """ + from odict import odict + + nnunet_trainer: str = "nnUNetTrainer" + nnunet_plans: str = "nnUNetPlans" + + if "nnunet_trainer" in nnunet_config: + nnunet_trainer = nnunet_config["nnunet_trainer"] + + if "nnunet_plans" in nnunet_config: + nnunet_plans = nnunet_config["nnunet_plans"] + + from nnunetv2.training.logging.nnunet_logger import nnUNetLogger + from nnunetv2.utilities.dataset_name_id_conversion import maybe_convert_to_dataset_name + + def subfiles( + folder: Union[str, Path], prefix: Optional[str] = None, suffix: Optional[str] = None, sort: bool = True + ) -> list[str]: + res = [ + i.name + for i in Path(folder).iterdir() + if i.is_file() + and (prefix is None or i.name.startswith(prefix)) + and (suffix is None or i.name.endswith(suffix)) + ] + if sort: + res.sort() + return res + + nnunet_model_folder: Path = Path(os.environ["nnUNet_results"]).joinpath( + maybe_convert_to_dataset_name(nnunet_config["dataset_name_or_id"]), + f"{nnunet_trainer}__{nnunet_plans}__3d_fullres", + ) + + nnunet_preprocess_model_folder: Path = Path(os.environ["nnUNet_preprocessed"]).joinpath( + maybe_convert_to_dataset_name(nnunet_config["dataset_name_or_id"]) + ) + + Path(nnunet_model_folder).joinpath(f"fold_{fold}").mkdir(parents=True, exist_ok=True) + + nnunet_checkpoint: dict = torch.load(f"{bundle_root_folder}/models/{NNUNET_CHECKPOINT_FILENAME}", weights_only=False) + latest_checkpoints: list[str] = subfiles( + Path(bundle_root_folder).joinpath("models", f"fold_{fold}"), prefix="checkpoint_epoch", sort=True + ) + epochs: list[int] = [] + for latest_checkpoint in latest_checkpoints: + epochs.append(int(latest_checkpoint[len("checkpoint_epoch=") : -len(".pt")])) + + epochs.sort() + final_epoch: int = epochs[-1] + monai_last_checkpoint: dict = torch.load( + f"{bundle_root_folder}/models/fold_{fold}/checkpoint_epoch={final_epoch}.pt", weights_only=False + ) + + best_checkpoints: list[str] = subfiles( + Path(bundle_root_folder).joinpath("models", f"fold_{fold}"), prefix="checkpoint_key_metric", sort=True + ) + key_metrics: list[str] = [] + for best_checkpoint in best_checkpoints: + key_metrics.append(str(best_checkpoint[len("checkpoint_key_metric=") : -len(".pt")])) + + key_metrics.sort() + best_key_metric: str = key_metrics[-1] + monai_best_checkpoint: dict = torch.load( + f"{bundle_root_folder}/models/fold_{fold}/checkpoint_key_metric={best_key_metric}.pt", weights_only=False + ) + + if "optimizer_state" in monai_last_checkpoint: + nnunet_checkpoint["optimizer_state"] = monai_last_checkpoint["optimizer_state"] + + nnunet_checkpoint["network_weights"] = odict() + + for key in monai_last_checkpoint["network_weights"]: + nnunet_checkpoint["network_weights"][key] = monai_last_checkpoint["network_weights"][key] + + nnunet_checkpoint["current_epoch"] = final_epoch + nnunet_checkpoint["logging"] = nnUNetLogger().get_checkpoint() + nnunet_checkpoint["_best_ema"] = 0 + nnunet_checkpoint["grad_scaler_state"] = None + + torch.save(nnunet_checkpoint, Path(nnunet_model_folder).joinpath(f"fold_{fold}", "checkpoint_final.pth")) + + nnunet_checkpoint["network_weights"] = odict() + + if "optimizer_state" in monai_last_checkpoint: + nnunet_checkpoint["optimizer_state"] = monai_best_checkpoint["optimizer_state"] + + for key in monai_best_checkpoint["network_weights"]: + nnunet_checkpoint["network_weights"][key] = monai_best_checkpoint["network_weights"][key] + + torch.save(nnunet_checkpoint, Path(nnunet_model_folder).joinpath(f"fold_{fold}", "checkpoint_best.pth")) + + if not os.path.exists(os.path.join(nnunet_model_folder, DATASET_JSON_FILENAME)): + shutil.copy(f"{bundle_root_folder}/models/jsonpkls/{DATASET_JSON_FILENAME}", nnunet_model_folder) + if not os.path.exists(os.path.join(nnunet_model_folder, PLANS_JSON_FILENAME)): + shutil.copy(f"{bundle_root_folder}/models/jsonpkls/{PLANS_JSON_FILENAME}", nnunet_model_folder) + if not os.path.exists(os.path.join(nnunet_model_folder, "dataset_fingerprint.json")): + shutil.copy(f"{nnunet_preprocess_model_folder}/dataset_fingerprint.json", nnunet_model_folder) + if not os.path.exists(os.path.join(nnunet_model_folder, NNUNET_CHECKPOINT_FILENAME)): + shutil.copy(f"{bundle_root_folder}/models/{NNUNET_CHECKPOINT_FILENAME}", nnunet_model_folder) + + +# This function loads a nnUNet network from the provided plans and dataset files. +# It initializes the network architecture and loads the model weights if a checkpoint is provided. +def get_network_from_nnunet_plans( + plans_file: str, + dataset_file: str, + configuration: str, + model_ckpt: Optional[str] = None, + model_key_in_ckpt: str = "model", +) -> Union[torch.nn.Module, Any]: + """ + Load and initialize a nnUNet network based on nnUNet plans and configuration. + + Parameters + ---------- + plans_file : str + Path to the JSON file containing the nnUNet plans. + dataset_file : str + Path to the JSON file containing the dataset information. + configuration : str + The configuration name to be used from the plans. + model_ckpt : Optional[str], optional + Path to the model checkpoint file. If None, the network is returned without loading weights (default is None). + model_key_in_ckpt : str, optional + The key in the checkpoint file that contains the model state dictionary (default is "model"). + + Returns + ------- + network : torch.nn.Module + The initialized neural network, with weights loaded if `model_ckpt` is provided. + """ + from batchgenerators.utilities.file_and_folder_operations import load_json + from nnunetv2.utilities.get_network_from_plans import get_network_from_plans + from nnunetv2.utilities.label_handling.label_handling import determine_num_input_channels + from nnunetv2.utilities.plans_handling.plans_handler import PlansManager + + plans = load_json(plans_file) + dataset_json = load_json(dataset_file) + + plans_manager = PlansManager(plans) + configuration_manager = plans_manager.get_configuration(configuration) + num_input_channels = determine_num_input_channels(plans_manager, configuration_manager, dataset_json) + label_manager = plans_manager.get_label_manager(dataset_json) + + enable_deep_supervision = True + + network = get_network_from_plans( + configuration_manager.network_arch_class_name, + configuration_manager.network_arch_init_kwargs, + configuration_manager.network_arch_init_kwargs_req_import, + num_input_channels, + label_manager.num_segmentation_heads, + allow_init=True, + deep_supervision=enable_deep_supervision, + ) + + if model_ckpt is None: + return network + else: + state_dict = torch.load(model_ckpt, weights_only=False) + network.load_state_dict(state_dict[model_key_in_ckpt]) + return network + + +def get_nnunet_trainer( + dataset_name_or_id: Union[str, int], + configuration: str, + fold: Union[int, str], + trainer_class_name: str = "nnUNetTrainer", + plans_identifier: str = "nnUNetPlans", + use_compressed_data: bool = False, + continue_training: bool = False, + only_run_validation: bool = False, + disable_checkpointing: bool = False, + device: str = "cuda", + pretrained_model: Optional[str] = None, +) -> Any: # type: ignore + """ + Get the nnUNet trainer instance based on the provided configuration. + The returned nnUNet trainer can be used to initialize the SupervisedTrainer for training, including the network, + optimizer, loss function, DataLoader, etc. + + Example:: + + from monai.apps import SupervisedTrainer + from monai.bundle.nnunet import get_nnunet_trainer + + dataset_name_or_id = 'Task009_Spleen' + fold = 0 + configuration = '3d_fullres' + nnunet_trainer = get_nnunet_trainer(dataset_name_or_id, configuration, fold) + + trainer = SupervisedTrainer( + device=nnunet_trainer.device, + max_epochs=nnunet_trainer.num_epochs, + train_data_loader=nnunet_trainer.dataloader_train, + network=nnunet_trainer.network, + optimizer=nnunet_trainer.optimizer, + loss_function=nnunet_trainer.loss_function, + epoch_length=nnunet_trainer.num_iterations_per_epoch, + ) + + Parameters + ---------- + dataset_name_or_id : Union[str, int] + The name or ID of the dataset to be used. + configuration : str + The configuration name for the training. + fold : Union[int, str] + The fold number or 'all' for cross-validation. + trainer_class_name : str, optional + The class name of the trainer to be used. Default is 'nnUNetTrainer'. + For a complete list of supported trainers, check: + https://github.com/MIC-DKFZ/nnUNet/tree/master/nnunetv2/training/nnUNetTrainer/variants + plans_identifier : str, optional + Identifier for the plans to be used. Default is 'nnUNetPlans'. + use_compressed_data : bool, optional + Whether to use compressed data. Default is False. + continue_training : bool, optional + Whether to continue training from a checkpoint. Default is False. + only_run_validation : bool, optional + Whether to only run validation. Default is False. + disable_checkpointing : bool, optional + Whether to disable checkpointing. Default is False. + device : str, optional + The device to be used for training. Default is 'cuda'. + pretrained_model : Optional[str], optional + Path to the pretrained model file. + + Returns + ------- + nnunet_trainer : object + The nnUNet trainer instance. + """ + # From nnUNet/nnunetv2/run/run_training.py#run_training + if isinstance(fold, str): + if fold != "all": + try: + fold = int(fold) + except ValueError as e: + print( + f'Unable to convert given value for fold to int: {fold}. fold must bei either "all" or an integer!' + ) + raise e + + from nnunetv2.run.run_training import get_trainer_from_args, maybe_load_checkpoint + + nnunet_trainer = get_trainer_from_args( + str(dataset_name_or_id), + configuration, + fold, + trainer_class_name, + plans_identifier, + device=torch.device(device), + ) + if disable_checkpointing: + nnunet_trainer.disable_checkpointing = disable_checkpointing + + assert not (continue_training and only_run_validation), "Cannot set --c and --val flag at the same time. Dummy." + + maybe_load_checkpoint(nnunet_trainer, continue_training, only_run_validation) + nnunet_trainer.on_train_start() # Added to Initialize Trainer + if torch.cuda.is_available(): + cudnn.deterministic = False + cudnn.benchmark = True + + if pretrained_model is not None: + state_dict = torch.load(pretrained_model, weights_only=False) + if "network_weights" in state_dict: + nnunet_trainer.network._orig_mod.load_state_dict(state_dict["network_weights"]) + return nnunet_trainer + + +def get_nnunet_monai_predictor( + model_folder: Union[str, Path], + model_name: str = "model.pt", + dataset_json: dict = None, + plans: dict = None, + nnunet_config: dict = None, + save_probabilities: bool = False, + save_files: bool = False, + use_folds: Optional[Union[int, str]] = None, +) -> ModelnnUNetWrapper: + """ + Initializes and returns a `nnUNetMONAIModelWrapper` containing the corresponding `nnUNetPredictor`. + The model folder should contain the following files, created during training: + + - dataset.json: from the nnUNet results folder + - plans.json: from the nnUNet results folder + - nnunet_checkpoint.pth: The nnUNet checkpoint file, containing the nnUNet training configuration + - model.pt: The checkpoint file containing the model weights. + + The returned wrapper object can be used for inference with MONAI framework: + Example:: + + from monai.bundle.nnunet import get_nnunet_monai_predictor + + model_folder = 'path/to/monai_bundle/model' + model_name = 'model.pt' + wrapper = get_nnunet_monai_predictor(model_folder, model_name) + + # Perform inference + input_data = ... + output = wrapper(input_data) + + + Parameters + ---------- + model_folder : Union[str, Path] + The folder where the model is stored. + model_name : str, optional + The name of the model file, by default "model.pt". + dataset_json : dict, optional + The dataset JSON file containing dataset information. + plans : dict, optional + The plans JSON file containing model configuration. + nnunet_config : dict, optional + The nnUNet configuration dictionary containing model parameters. + + Returns + ------- + ModelnnUNetWrapper + A wrapper object that contains the nnUNetPredictor and the loaded model. + """ + + from nnunetv2.inference.predict_from_raw_data import nnUNetPredictor + + predictor = nnUNetPredictor( + tile_step_size=0.5, + use_gaussian=True, + use_mirroring=True, + device=torch.device("cuda", 0), + verbose=True, + verbose_preprocessing=False, + allow_tqdm=True, + ) + # initializes the network architecture, loads the checkpoint + print("nnunet_predictor: Model Folder: ", model_folder) + print("nnunet_predictor: Model name: ", model_name) + print("nnunet_predictor: use_folds: ", use_folds) + wrapper = ModelnnUNetWrapper( + predictor, + model_folder=model_folder, + checkpoint_name=model_name, + dataset_json=dataset_json, + plans=plans, + nnunet_config=nnunet_config, + save_probabilities=save_probabilities, + save_files=save_files, + use_folds=use_folds, + ) + return wrapper + + +def get_nnunet_monai_predictors_for_ensemble( + model_list: list, + model_path: Union[str, Path], + model_name: str = "model.pt", + use_folds: Optional[Union[int, str]] = None, +) -> Tuple[ModelnnUNetWrapper, ...]: + network_list = [] + for model_config in model_list: + model_folder = Path(model_path).joinpath(model_config) + network_list.append( + get_nnunet_monai_predictor( + model_folder=model_folder, + model_name=model_name, + save_probabilities=True, + save_files=True, + use_folds=use_folds, + ) + ) + return tuple(network_list) + + +import os +from typing import Dict, List, Union + +import numpy as np +from nnunetv2.ensembling.ensemble import average_probabilities +from nnunetv2.utilities.label_handling.label_handling import LabelManager +from nnunetv2.utilities.plans_handling.plans_handler import PlansManager + +from monai.config import KeysCollection +from monai.data.meta_tensor import MetaTensor +from monai.transforms import MapTransform + + +class EnsembleProbabilitiesToSegmentation(MapTransform): + """ + MONAI transform that loads .npz probability files from metadata['saved_file'] for a given key, + averages them, and converts to final segmentation using nnU-Net's LabelManager. + Returns a MetaTensor segmentation result (instead of saving to disk). + """ + + def __init__( + self, + keys: KeysCollection, + dataset_json_path: str, + plans_json_path: str, + allow_missing_keys: bool = False, + output_key: str = "pred", + ): + super().__init__(keys, allow_missing_keys) + + # Load required nnU-Net configs + self.plans_manager = PlansManager(plans_json_path) + self.dataset_json = self._load_json(dataset_json_path) + self.label_manager = self.plans_manager.get_label_manager(self.dataset_json) + self.output_key = output_key + + def _load_json(self, path: str) -> Dict: + import json + + with open(path, "r") as f: + return json.load(f) + + def __call__(self, data: Dict) -> Dict: + d = dict(data) + all_files = [] + for key in self.keys: + meta = d[key].meta if isinstance(d[key], MetaTensor) else d.get("meta", {}) + saved_file = meta.get("saved_file", None) + + # Support multiple files for ensemble + if isinstance(saved_file, str): + saved_file = [saved_file] + elif not isinstance(saved_file, list): + raise ValueError(f"'saved_file' in meta must be str or List[str], got {type(saved_file)}") + + for f in saved_file: + if not os.path.exists(f): + raise FileNotFoundError(f"Probability file not found: {f}") + all_files.append(f) + + print("All files to average: ", all_files) + # Step 1: average probabilities + avg_probs = average_probabilities(all_files) + + # Step 2: convert to segmentation + segmentation = self.label_manager.convert_logits_to_segmentation(avg_probs) # shape: (H, W, D) + + # Step 3: wrap as MetaTensor and attach meta + seg_tensor = MetaTensor(segmentation[None].astype(np.uint8)) # add channel dim + seg_tensor.meta = dict(meta) + + # Replace the key or store in new key + d[self.output_key] = seg_tensor + return d + + +class ModelnnUNetWrapper(torch.nn.Module): + """ + A wrapper class for nnUNet model integration with MONAI framework. + The wrapper can be use to integrate the nnUNet Bundle within MONAI framework for inference. + + Parameters + ---------- + predictor : nnUNetPredictor + The nnUNet predictor object used for inference. + model_folder : Union[str, Path] + The folder path where the model and related files are stored. + model_name : str, optional + The name of the model file, by default "model.pt". + dataset_json : dict, optional + The dataset JSON file containing dataset information. + plans : dict, optional + The plans JSON file containing model configuration. + nnunet_config : dict, optional + The nnUNet configuration dictionary containing model parameters. + + Attributes + ---------- + predictor : nnUNetPredictor + The nnUNet predictor object used for inference. + network_weights : torch.nn.Module + The network weights of the model. + + Notes + ----- + This class integrates nnUNet model with MONAI framework by loading necessary configurations, + restoring network architecture, and setting up the predictor for inference. + """ + + def __init__( + self, + predictor: object, + model_folder: Union[str, Path], + checkpoint_name: str = None, + dataset_json: dict = None, + plans: dict = None, + nnunet_config: dict = None, + save_probabilities: bool = False, + save_files: bool = False, + tmp_dir: str = "tmp", + use_folds: Union[int, str, Tuple[Union[int, str], ...], List[Union[int, str]]] = None, + ): + + super().__init__() + self.predictor = predictor + + if not checkpoint_name: + raise ValueError("Model name is required. Please provide a valid model name.") + + self.tmp_dir = tmp_dir + self.save_probabilities = save_probabilities + self.save_files = save_files + + # Set up model paths + model_training_output_dir = model_folder + model_parent_dir = Path(model_training_output_dir).parent + + # Import required modules + from nnunetv2.utilities.plans_handling.plans_handler import PlansManager + + # Load dataset and plans if not provided + if dataset_json is None: + dataset_json = load_json(join(Path(model_parent_dir), "jsonpkls", DATASET_JSON_FILENAME)) + if plans is None: + plans = load_json(join(Path(model_parent_dir), "jsonpkls", PLANS_JSON_FILENAME)) + + plans_manager = PlansManager(plans) + parameters = [] + + # Get configuration from nnunet_checkpoint.pth or provided config + if nnunet_config is None: + checkpoint_path = join(Path(model_training_output_dir), NNUNET_CHECKPOINT_FILENAME) + if not os.path.exists(checkpoint_path): + raise ValueError( + f"Checkpoint file not found at {checkpoint_path}. Please ensure the model is trained and the checkpoint exists." + ) + + checkpoint = torch.load(checkpoint_path, weights_only=False, map_location=torch.device("cpu")) + trainer_name = checkpoint["trainer_name"] + configuration_name = checkpoint["init_args"]["configuration"] + inference_allowed_mirroring_axes = ( + checkpoint["inference_allowed_mirroring_axes"] + if "inference_allowed_mirroring_axes" in checkpoint.keys() + else None + ) + else: + trainer_name = nnunet_config["trainer_name"] + configuration_name = nnunet_config["configuration"] + inference_allowed_mirroring_axes = nnunet_config["inference_allowed_mirroring_axes"] + + # Store configuration name + self.configuration_name = configuration_name + + # Handle folds + if isinstance(use_folds, str) or isinstance(use_folds, int): + use_folds = [use_folds] + + if use_folds is None: + use_folds = self.predictor.auto_detect_available_folds(model_training_output_dir, checkpoint_name) + + # Load model parameters from each fold + for f in use_folds: + f = int(f) if f != "all" else f + fold_checkpoint_path = join(model_training_output_dir, f"fold_{f}", checkpoint_name) + monai_checkpoint = torch.load(fold_checkpoint_path, map_location=torch.device("cpu"), weights_only=False) + + if "network_weights" in monai_checkpoint.keys(): + parameters.append(monai_checkpoint["network_weights"]) + else: + parameters.append(monai_checkpoint) + + # Get configuration manager and setup network + configuration_manager = plans_manager.get_configuration(configuration_name) + + # Import required nnUNet modules + import nnunetv2 + from nnunetv2.utilities.find_class_by_name import recursive_find_python_class + from nnunetv2.utilities.label_handling.label_handling import determine_num_input_channels + + # Determine input channels and find trainer class + num_input_channels = determine_num_input_channels(plans_manager, configuration_manager, dataset_json) + trainer_class = recursive_find_python_class( + join(nnunetv2.__path__[0], "training", "nnUNetTrainer"), trainer_name, "nnunetv2.training.nnUNetTrainer" + ) + + if trainer_class is None: + raise RuntimeError(f"Unable to locate trainer class {trainer_name} in nnunetv2.training.nnUNetTrainer.") + + # Build network architecture + network = trainer_class.build_network_architecture( + configuration_manager.network_arch_class_name, + configuration_manager.network_arch_init_kwargs, + configuration_manager.network_arch_init_kwargs_req_import, + num_input_channels, + plans_manager.get_label_manager(dataset_json).num_segmentation_heads, + enable_deep_supervision=False, + ) + + # Configure predictor with all required settings + predictor.plans_manager = plans_manager + predictor.configuration_manager = configuration_manager + predictor.list_of_parameters = parameters + predictor.network = network + predictor.dataset_json = dataset_json + predictor.trainer_name = trainer_name + predictor.allowed_mirroring_axes = inference_allowed_mirroring_axes + predictor.label_manager = plans_manager.get_label_manager(dataset_json) + + # Store network weights reference + self.network_weights = self.predictor.network + + def forward(self, x: MetaTensor) -> MetaTensor: + """ + Forward pass for the nnUNet model. + + Args: + x (MetaTensor): Input tensor for inference. + + Returns: + MetaTensor: The output tensor with the same metadata as the input. + + Raises: + TypeError: If the input is not a MetaTensor. + """ + if not isinstance(x, MetaTensor): + raise TypeError("Input must be a MetaTensor.") + + # Extract spatial shape from input + spatial_shape = list(x.shape[-3:]) # [H, W, D] or [X, Y, Z] + + # Get spacing information from metadata + properties_or_list_of_properties = {} + + if "pixdim" in x.meta: + # Get spacing from pixdim + if x.meta["pixdim"].ndim == 1: + properties_or_list_of_properties["spacing"] = x.meta["pixdim"][1:4].tolist() + else: + properties_or_list_of_properties["spacing"] = x.meta["pixdim"][0][1:4].numpy().tolist() + + elif "affine" in x.meta: + # Get spacing from affine matrix + affine = x.meta["affine"][0].cpu().numpy() if x.meta["affine"].ndim == 3 else x.meta["affine"].cpu().numpy() + spacing = np.array( + [ + np.sqrt(np.sum(affine[:3, 0] ** 2)), + np.sqrt(np.sum(affine[:3, 1] ** 2)), + np.sqrt(np.sum(affine[:3, 2] ** 2)), + ] + ) + properties_or_list_of_properties["spacing"] = spacing + else: + # Default spacing if no metadata available + properties_or_list_of_properties["spacing"] = [1.0, 1.0, 1.0] + + # Add spatial shape to properties + properties_or_list_of_properties["spatial_shape"] = spatial_shape + + # Convert input tensor to numpy array + image_or_list_of_images = x.cpu().numpy()[0, :] + + # Setup output file path if saving enabled + outfile = None + if self.save_files: + # Get original filename from metadata + infile = x.meta["filename_or_obj"] + if isinstance(infile, list): + infile = infile[0] + + # Create output path + outfile_name = os.path.basename(infile).split(".")[0] + outfolder = Path(self.tmp_dir).joinpath(self.configuration_name) + os.makedirs(outfolder, exist_ok=True) + outfile = str(Path(outfolder).joinpath(outfile_name)) + + # Extract 4x4 affine matrix for SimpleITK compatibility + if "affine" in x.meta: + # Get affine matrix with proper shape + if x.meta["affine"].shape == (1, 4, 4): + affine = x.meta["affine"][0].cpu().numpy() + elif x.meta["affine"].shape == (4, 4): + affine = x.meta["affine"].cpu().numpy() + else: + raise ValueError(f"Unexpected affine shape: {x.meta['affine'].shape}") + + # Calculate spacing, origin and direction + spacing = tuple(np.linalg.norm(affine[:3, i]) for i in range(3)) + origin = tuple(float(v) for v in affine[:3, 3]) + direction_matrix = affine[:3, :3] / spacing + direction = tuple(direction_matrix.flatten().round(6)) + + # Add to properties dict for SimpleITK + properties_or_list_of_properties["sitk_stuff"] = { + "spacing": spacing, + "origin": origin, + "direction": direction, + } + # Handle cascade models by loading segmentation from previous stage + previous_segmentation = None + if self.configuration_name == "3d_cascade_fullres": + # For cascade models, we need the lowres prediction + lowres_predictions_folder = os.path.join(self.tmp_dir, "3d_lowres") + + if outfile: + seg_file = os.path.join(lowres_predictions_folder, outfile_name + ".nii.gz") + # Load the lowres segmentation from file + rw = self.predictor.plans_manager.image_reader_writer_class() + previous_segmentation, _ = rw.read_seg(seg_file) + + if previous_segmentation is None: + raise ValueError("Failed to load previous segmentation for cascade model.") + else: + raise ValueError("Output file name is required for 3d_cascade_fullres configuration.") + + # Run prediction using nnUNet predictor + prediction_output = self.predictor.predict_from_list_of_npy_arrays( + image_or_list_of_images, + previous_segmentation, + properties_or_list_of_properties, + save_probabilities=self.save_probabilities, + truncated_ofname=outfile, + num_processes=2, + num_processes_segmentation_export=2, + ) + + # Process prediction output based on save_files setting + if not self.save_files: + # Return the prediction output directly + out_tensors = [] + for out in prediction_output: + # Add batch and channel dimensions + out_tensors.append(torch.from_numpy(np.expand_dims(np.expand_dims(out, 0), 0))) + # Concatenate along batch dimension + out_tensor = torch.cat(out_tensors, 0) + + return MetaTensor(out_tensor, meta=x.meta) + else: + # Return a placeholder tensor with file path in metadata + saved_path = outfile + ".npz" + if not os.path.exists(saved_path): + raise FileNotFoundError(f"Expected saved file not found: {saved_path}") + + # Create placeholder tensor with same spatial dimensions + shape = properties_or_list_of_properties["spatial_shape"] + dummy_tensor = torch.zeros((1, 1, *shape), dtype=torch.float32) + + # Create metadata with file path + meta_with_filepath = dict(x.meta) + meta_with_filepath["saved_file"] = saved_path + + return MetaTensor(dummy_tensor, meta=meta_with_filepath) diff --git a/examples/apps/cchmc_nnunet_fifteen_ckpt_app/my_app/nnunet_seg_operator.py b/examples/apps/cchmc_nnunet_fifteen_ckpt_app/my_app/nnunet_seg_operator.py new file mode 100644 index 00000000..ad52fb08 --- /dev/null +++ b/examples/apps/cchmc_nnunet_fifteen_ckpt_app/my_app/nnunet_seg_operator.py @@ -0,0 +1,426 @@ +# Copyright 2021-2025 MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +from pathlib import Path +from typing import Dict, List + +import torch +from numpy import int16, uint8 + +# Import custom transforms +from post_transforms import CalculateVolumeFromMaskd, ExtractVolumeToTextd, LabelToContourd, OverlayImageLabeld + +# Import from MONAI deploy +from monai.deploy.utils.importutil import optional_import + +Dataset, _ = optional_import("monai.data", name="Dataset") +DataLoader, _ = optional_import("monai.data", name="DataLoader") +import os + +# Try importing from local version first, then fall back to MONAI if not available +# This approach works regardless of how the file is imported (as module or script) +import sys + +# Add current directory to path to ensure the local module is found +current_dir = os.path.dirname(os.path.abspath(__file__)) +if current_dir not in sys.path: + sys.path.insert(0, current_dir) + +try: + # Try local version first + from nnunet_bundle import EnsembleProbabilitiesToSegmentation, get_nnunet_monai_predictors_for_ensemble +except ImportError: + # Fall back to MONAI version if local version fails + from monai.apps.nnunet.nnunet_bundle import ( + get_nnunet_monai_predictors_for_ensemble, + EnsembleProbabilitiesToSegmentation, + ) + +from monai.deploy.core import AppContext, Fragment, Model, Operator, OperatorSpec +from monai.deploy.operators.monai_seg_inference_operator import InMemImageReader + +# Import MONAI transforms +from monai.transforms import Compose, KeepLargestConnectedComponentd, Lambdad, LoadImaged, SaveImaged, Transposed + + +class NNUnetSegOperator(Operator): + """ + Operator that performs segmentation inference with nnU-Net ensemble models. + + This operator loads and runs multiple nnU-Net models in an ensemble fashion, + processes the results, and outputs segmentation masks, volume measurements, + and visualization overlays. + """ + + def __init__( + self, + fragment: Fragment, + *args, + app_context: AppContext, + model_path: Path, + output_folder: Path = Path.cwd() / "output", + output_labels: List[int] = None, + model_list: List[str] = None, + model_name: str = "best_model.pt", + save_probabilities: bool = False, + save_files: bool = False, + **kwargs, + ): + """ + Initialize the nnU-Net segmentation operator. + + Args: + fragment: The fragment this operator belongs to + app_context: The application context + model_path: Path to the nnU-Net model directory + output_folder: Directory to save output files + output_labels: List of label indices to include in outputs + model_list: List of nnU-Net model types to use in ensemble + model_name: Name of the model checkpoint file + save_probabilities: Whether to save probability maps + save_files: Whether to save intermediate files + """ + # Initialize logger + self._logger = logging.getLogger(f"{__name__}.{type(self).__name__}") + + # Set up data keys + self._input_dataset_key = "image" + self._pred_dataset_key = "pred" + + # Model configuration + self.model_path = self._find_model_file_path(model_path) + self.model_list = model_list or ["3d_fullres", "3d_lowres", "3d_cascade_fullres"] + self.model_name = model_name + self.save_probabilities = save_probabilities + self.save_files = save_files + self.prediction_keys = [f"pred_{model}" for model in self.model_list] + + # Output configuration + self.output_folder = output_folder + self.output_folder.mkdir(parents=True, exist_ok=True) + self.output_labels = output_labels if output_labels is not None else [1] + + # Store app context + self.app_context = app_context + + # I/O names for operator + self.input_name_image = "image" + self.output_name_seg = "seg_image" + self.output_name_text = "result_text" + self.output_name_sc_path = "dicom_sc_dir" + + # Call parent constructor + super().__init__(fragment, *args, **kwargs) + + def _find_model_file_path(self, model_path: Path) -> Path: + """ + Validates and returns the model directory path. + + Args: + model_path: Path to the model directory + + Returns: + Validated Path object to the model directory + + Raises: + ValueError: If model_path is invalid or doesn't exist + """ + # When executing as MAP, model_path is typically a directory (/opt/holoscan/models) + # nnU-Net expects a directory structure with model subdirectories + if not model_path: + raise ValueError("Model path not provided") + + if not model_path.is_dir(): + raise ValueError(f"Model path should be a directory, got: {model_path}") + + return model_path + + def _load_nnunet_models(self): + """ + Loads nnU-Net ensemble models using MONAI's nnU-Net bundle functionality + and registers them in the app_context. + + Raises: + RuntimeError: If model loading fails + """ + # Determine device based on availability + _device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + self._logger.info(f"Loading nnU-Net ensemble models from: {self.model_path} on {_device}") + + try: + # Get nnU-Net ensemble predictors (returns tuple of ModelnnUNetWrapper objects) + network_def = get_nnunet_monai_predictors_for_ensemble( + model_list=self.model_list, model_path=str(self.model_path), model_name=self.model_name + ) + + # Move models to device and set to evaluation mode + ensemble_predictors = [] + for predictor in network_def: + predictor.to(_device) + predictor.eval() + ensemble_predictors.append(predictor) + + # Create a MONAI Model object to encapsulate the ensemble + loaded_model = Model(self.model_path, name="nnunet_ensemble") + loaded_model.predictor = ensemble_predictors + + # Register the loaded Model object in the application context + self.app_context.models = loaded_model + + self._logger.info(f"Successfully loaded {len(ensemble_predictors)} nnU-Net models: {self.model_list}") + + except Exception as e: + self._logger.error(f"Failed to load nnU-Net models: {str(e)}") + raise + + def setup(self, spec: OperatorSpec): + """ + Sets up the operator by configuring input and output specifications. + + Args: + spec: The operator specification to configure + """ + # Define input - expects a DICOM image + spec.input(self.input_name_image) + + # Define outputs: + # 1. Segmentation output (for DICOM SEG) + spec.output(self.output_name_seg) + + # 2. Measurement results text (for DICOM SR) + spec.output(self.output_name_text) + + # 3. Directory path for visualization overlays (for DICOM SC) + spec.output(self.output_name_sc_path) + + def _convert_dicom_metadata_datatype(self, metadata: Dict) -> Dict: + """ + Converts pydicom-specific metadata types to corresponding native Python types. + + This addresses an issue with pydicom types in metadata for images converted from DICOM series. + Reference issue: https://github.com/Project-MONAI/monai-deploy-app-sdk/issues/185 + + Args: + metadata: Dictionary containing image metadata + + Returns: + Dictionary with converted metadata types + """ + if not metadata: + return metadata + + # Convert known metadata attributes to appropriate Python types + known_conversions = {"SeriesInstanceUID": str, "row_pixel_spacing": float, "col_pixel_spacing": float} + + for key, conversion_func in known_conversions.items(): + if key in metadata: + try: + metadata[key] = conversion_func(metadata[key]) + except Exception: + self._logger.warning(f"Failed to convert {key} to {conversion_func.__name__}") + + # Log converted metadata at debug level + if self._logger.isEnabledFor(logging.DEBUG): + self._logger.debug("Converted Image object metadata:") + for k, v in metadata.items(): + self._logger.debug(f"{k}: {v}, type {type(v)}") + + return metadata + + def compute(self, op_input, op_output, context): + """ + Main compute method that processes input, runs inference, and emits outputs. + """ + # Get input image + input_image = op_input.receive(self.input_name_image) + if not input_image: + raise ValueError("Input image is not found.") + + # Load nnU-Net ensemble models + self._logger.info("Loading nnU-Net ensemble models") + self._load_nnunet_models() + + # Perform inference using our custom implementation + data_dict = self.compute_impl(input_image, context)[0] + + # Squeeze the batch dimension + data_dict[self._pred_dataset_key] = data_dict[self._pred_dataset_key].squeeze(0) + data_dict[self._input_dataset_key] = data_dict[self._input_dataset_key].squeeze(0) + + # Squeeze the batch dimension of affine meta data + data_dict[self._pred_dataset_key].affine = data_dict[self._pred_dataset_key].affine.squeeze(0) + data_dict[self._input_dataset_key].affine = data_dict[self._input_dataset_key].affine.squeeze(0) + + # Log shape information + self._logger.info(f"Segmentation prediction shape: {data_dict[self._pred_dataset_key].shape}") + self._logger.info(f"Segmentation image shape: {data_dict[self._input_dataset_key].shape}") + + # Get post transforms for MAP outputs + post_transforms = self.post_process_stage2() + + # Apply postprocessing transforms for MAP outputs + data_dict = post_transforms(data_dict) + + self._logger.info( + f"Segmentation prediction shape after post processing: {data_dict[self._pred_dataset_key].shape}" + ) + + # DICOM SEG output + op_output.emit(data_dict[self._pred_dataset_key].squeeze(0).numpy().astype(uint8), self.output_name_seg) + + # DICOM SR output - extract result text + result_text = self.get_result_text_from_transforms(post_transforms) + if not result_text: + raise ValueError("Result text could not be generated.") + + self._logger.info(f"Calculated Organ Volumes: {result_text}") + op_output.emit(result_text, self.output_name_text) + + # DICOM SC output + dicom_sc_dir = self.output_folder / "temp" + self._logger.info(f"Temporary DICOM SC saved at: {dicom_sc_dir}") + op_output.emit(dicom_sc_dir, self.output_name_sc_path) + + def pre_process(self, img_reader) -> Compose: + """Composes transforms for preprocessing the input image before predicting on nnU-Net models.""" + my_key = self._input_dataset_key + + return Compose( + [ + LoadImaged(keys=my_key, reader=img_reader, ensure_channel_first=True), + Transposed(keys=my_key, indices=[0, 3, 2, 1]), + ] + ) + + def compute_impl(self, input_image, context) -> List[Dict]: + """ + Performs the actual nnU-Net ensemble inference using ModelnnUNetWrapper. + This function handles the complete inference pipeline including preprocessing, + ensemble prediction, and postprocessing. + """ + + if not input_image: + raise ValueError("Input is None.") + + # Need to try to convert the data type of a few metadata attributes. + # input_img_metadata = self._convert_dicom_metadata_datatype(input_image.metadata()) + # Need to give a name to the image as in-mem Image obj has no name. + img_name = "Img_in_context" + + # This operator gets an in-memory Image object, so a specialized ImageReader is needed. + _reader = InMemImageReader(input_image) + + # Apply preprocessing transforms + pre_transforms = self.pre_process(_reader) + + # Create data dictionary + data_dict = {self._input_dataset_key: img_name} + + # Create dataset and dataloader + dataset = Dataset(data=[data_dict], transform=pre_transforms) + dataloader = DataLoader(dataset, batch_size=1, shuffle=False, num_workers=0) + + out_dict = [] + for d in dataloader: + preprocessed_image = d[self._input_dataset_key] + self._logger.info(f"Input shape: {preprocessed_image.shape}") + + # Get the loaded ensemble models from app context + if not hasattr(self.app_context, "models") or self.app_context.models is None: + raise RuntimeError("nnU-Net models not loaded. Call _load_nnunet_models first.") + + ensemble_predictors = self.app_context.models.predictor + + # Perform ensemble inference + self._logger.info("Running nnU-Net ensemble inference...") + + for i, predictor in enumerate(ensemble_predictors): + model_key = self.prediction_keys[i] + self._logger.info(f"Running inference with model: {model_key}") + + # Run inference with individual model + prediction = predictor(preprocessed_image) + d[model_key] = prediction + + self._logger.info("Inference complete, applying postprocessing...") + + # Apply postprocessing transforms (includes ensemble combination) + post_transforms1 = self.post_process_stage1() + d = post_transforms1(d) + out_dict.append(d) + return out_dict + + def post_process_stage1(self) -> Compose: + """Composes transforms for postprocessing the nnU-Net prediction results.""" + pred_key = self._pred_dataset_key + return Compose( + [ + # nnU-Net ensemble post-processing + EnsembleProbabilitiesToSegmentation( + keys=self.prediction_keys, + dataset_json_path=str(self.model_path / "jsonpkls/dataset.json"), + plans_json_path=str(self.model_path / "jsonpkls/plans.json"), + output_key=pred_key, + ), + # Add batch dimension to final prediction + Lambdad(keys=[pred_key], func=lambda x: x.unsqueeze(0)), + # Transpose dimensions back to original format + Transposed(keys=[self._input_dataset_key, pred_key], indices=(0, 1, 4, 3, 2)), + ] + ) + + def post_process_stage2(self) -> Compose: + """Composes transforms for postprocessing MAP outputs""" + pred_key = self._pred_dataset_key + + # Define labels for the segmentation output + labels = {"background": 0, "airway": 1} + + return Compose( + [ + # Keep only largest connected component for each label + KeepLargestConnectedComponentd(keys=pred_key, applied_labels=[1]), + # Calculate volume from segmentation mask + CalculateVolumeFromMaskd(keys=pred_key, label_names=labels), + # Extract volume data to text format + ExtractVolumeToTextd( + keys=[pred_key + "_volumes"], label_names=labels, output_labels=self.output_labels + ), + # Convert labels to contours + LabelToContourd(keys=pred_key, output_labels=self.output_labels), + # Create overlay of image and contours + OverlayImageLabeld(image_key=self._input_dataset_key, label_key=pred_key, overlay_key="overlay"), + # Save overlays as DICOM SC + SaveImaged( + keys="overlay", + output_ext=".dcm", + output_dir=self.output_folder / "temp", + separate_folder=False, + output_dtype=int16, + ), + ] + ) + + def get_result_text_from_transforms(self, post_transforms: Compose) -> str: + """ + Extracts result_text from the ExtractVolumeToTextd transform in the transform pipeline. + + Args: + post_transforms: Composed transforms that include ExtractVolumeToTextd + + Returns: + The extracted result text or empty string if not found + """ + for transform in post_transforms.transforms: + if isinstance(transform, ExtractVolumeToTextd): + return transform.result_text + return "" diff --git a/examples/apps/cchmc_nnunet_fifteen_ckpt_app/my_app/post_transforms.py b/examples/apps/cchmc_nnunet_fifteen_ckpt_app/my_app/post_transforms.py new file mode 100644 index 00000000..813a508b --- /dev/null +++ b/examples/apps/cchmc_nnunet_fifteen_ckpt_app/my_app/post_transforms.py @@ -0,0 +1,390 @@ +# Copyright 2021-2025 MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json +import logging +import os +from typing import List, Optional + +import matplotlib.cm as cm +import numpy as np + +from monai.config import KeysCollection +from monai.data import MetaTensor +from monai.transforms import LabelToContour, MapTransform + + +# Calculate segmentation volumes in ml +class CalculateVolumeFromMaskd(MapTransform): + """ + Dictionary-based transform to calculate the volume of predicted organ masks. + + Args: + keys (list): The keys corresponding to the predicted organ masks in the dictionary. + label_names (list): The list of organ names corresponding to the masks. + """ + + def __init__(self, keys, label_names): + self._logger = logging.getLogger(f"{__name__}.{type(self).__name__}") + super().__init__(keys) + self.label_names = label_names + + def __call__(self, data): + # Initialize a dictionary to store the volumes of each organ + pred_volumes = {} + + for key in self.keys: + for label_name in self.label_names.keys(): + # self._logger.info('Key: ', key, ' organ_name: ', label_name) + if label_name != "background": + # Get the predicted mask from the dictionary + pred_mask = data[key] + # Calculate the voxel size in cubic millimeters (voxel size should be in the metadata) + # Assuming the metadata contains 'spatial_shape' with voxel dimensions in mm + if hasattr(pred_mask, "affine"): + # voxel size + # Ensure the affine matrix is collapsed to shape (4, 4) + affine_matrix = np.squeeze(pred_mask.affine) + if affine_matrix.shape != (4, 4): + raise ValueError(f"Affine matrix must have shape (4, 4), but got {affine_matrix.shape}") + + # Calculate voxel size + voxel_size = np.abs(np.linalg.det(affine_matrix[:3, :3])) + # print(f"Voxel Size (mm³): {voxel_size}") + else: + raise ValueError("Affine transformation matrix with voxel spacing information is required.") + + # Calculate the volume in cubic millimeters + label_volume_mm3 = np.sum(pred_mask == self.label_names[label_name]) * voxel_size + + # Convert to milliliters (1 ml = 1000 mm^3) + label_volume_ml = label_volume_mm3 / 1000.0 + + # Store the result in the pred_volumes dictionary + # convert to int - radiologists prefer whole number with no decimals + pred_volumes[label_name] = int(round(label_volume_ml, 2)) + + # Add the calculated volumes to the data dictionary + key_name = key + "_volumes" + + data[key_name] = pred_volumes + # self._logger.info('pred_volumes: ', pred_volumes) + return data + + +class LabelToContourd(MapTransform): + def __init__(self, keys: KeysCollection, output_labels: list, allow_missing_keys: bool = False): + + self._logger = logging.getLogger(f"{__name__}.{type(self).__name__}") + super().__init__(keys, allow_missing_keys) + + self.output_labels = output_labels + + def __call__(self, data): + d = dict(data) + for key in self.keys: + label_image = d[key] + assert isinstance(label_image, MetaTensor), "Input image must be a MetaTensor." + + # Initialize the contour image with the same shape as the label image + contour_image = np.zeros_like(label_image.cpu().numpy()) + + if label_image.ndim == 4: # Check if the label image is 4D with a channel dimension + # Process each 2D slice independently along the last axis (z-axis) + for i in range(label_image.shape[-1]): + slice_image = label_image[:, :, :, i].cpu().numpy() + + # Extract unique labels excluding background (assumed to be 0) + unique_labels = np.unique(slice_image) + unique_labels = unique_labels[unique_labels != 0] + + slice_contour = np.zeros_like(slice_image) + + # Generate contours for each label in the slice + for label in unique_labels: + # skip contour generation for labels that are not in output_labels + if label not in self.output_labels: + continue + + # Create a binary mask for the current label + binary_mask = np.zeros_like(slice_image) + binary_mask[slice_image == label] = 1.0 + + # Apply LabelToContour to the 2D slice (replace this with actual contour logic) + binary_mask = binary_mask.astype(np.float32) # Convert to float32 for LabelToContour + thick_edges = LabelToContour()(binary_mask) + + # Convert the edges back to binary mask + thick_edges = (thick_edges > 0).astype(np.uint8) + + # Assign the label value to the contour image at the edge positions + slice_contour[thick_edges > 0] = label + + # Stack the processed slice back into the 4D contour image + contour_image[:, :, :, i] = slice_contour + else: + # If the label image is not 4D, process it directly + slice_image = label_image.cpu().numpy() + unique_labels = np.unique(slice_image) + unique_labels = unique_labels[unique_labels != 0] + + for label in unique_labels: + binary_mask = np.zeros_like(slice_image) + binary_mask[slice_image == label] = 1.0 + + thick_edges = LabelToContour()(binary_mask) + contour_image[thick_edges > 0] = label + + # Convert the contour image back to a MetaTensor with the original metadata + contour_image_meta = MetaTensor(contour_image, meta=label_image.meta) # , affine=label_image.affine) + + # Store the contour MetaTensor in the output dictionary + d[key] = contour_image_meta + + return d + + +class OverlayImageLabeld(MapTransform): + def __init__( + self, + image_key: KeysCollection, + label_key: str, + overlay_key: str = "overlay", + alpha: float = 0.7, + allow_missing_keys: bool = False, + ): + + self._logger = logging.getLogger(f"{__name__}.{type(self).__name__}") + super().__init__(image_key, allow_missing_keys) + + self.image_key = image_key + self.label_key = label_key + self.overlay_key = overlay_key + self.alpha = alpha + self.jet_colormap = cm.get_cmap("jet", 256) # Get the Jet colormap with 256 discrete colors + + def apply_jet_colormap(self, label_volume): + """ + Apply the Jet colormap to a 3D label volume using matplotlib's colormap. + """ + assert label_volume.ndim == 3, "Label volume should have 3 dimensions (H, W, D) after removing channel." + + label_volume_normalized = (label_volume / label_volume.max()) * 255.0 + label_volume_uint8 = label_volume_normalized.astype(np.uint8) + + # Apply the colormap to each label + label_rgb = self.jet_colormap(label_volume_uint8)[:, :, :, :3] # Only take the RGB channels + + label_rgb = (label_rgb * 255).astype(np.uint8) + # Rearrange axes to get (3, H, W, D) + label_rgb = np.transpose(label_rgb, (3, 0, 1, 2)) + + assert label_rgb.shape == ( + 3, + *label_volume.shape, + ), f"Label RGB shape should be (3,H, W, D) but got {label_rgb.shape}" + + return label_rgb + + def convert_to_rgb(self, image_volume): + """ + Convert a single-channel grayscale 3D image to an RGB 3D image. + """ + assert image_volume.ndim == 3, "Image volume should have 3 dimensions (H, W, D) after removing channel." + + image_volume_normalized = (image_volume - image_volume.min()) / (image_volume.max() - image_volume.min()) + image_rgb = np.stack([image_volume_normalized] * 3, axis=0) + image_rgb = (image_rgb * 255).astype(np.uint8) + + assert image_rgb.shape == ( + 3, + *image_volume.shape, + ), f"Image RGB shape should be (3,H, W, D) but got {image_rgb.shape}" + + return image_rgb + + def _create_overlay(self, image_volume, label_volume): + # Convert the image volume and label volume to RGB + image_rgb = self.convert_to_rgb(image_volume) + label_rgb = self.apply_jet_colormap(label_volume) + + # Create an alpha-blended overlay + overlay = image_rgb.copy() + mask = label_volume > 0 + + # Apply the overlay where the mask is present + for i in range(3): # For each color channel + overlay[i, mask] = (self.alpha * label_rgb[i, mask] + (1 - self.alpha) * overlay[i, mask]).astype(np.uint8) + + assert ( + overlay.shape == image_rgb.shape + ), f"Overlay shape should match image RGB shape: {overlay.shape} vs {image_rgb.shape}" + + return overlay + + def __call__(self, data): + d = dict(data) + + # Get the image and label tensors + image = d[self.image_key] # Expecting shape (1, H, W, D) + label = d[self.label_key] # Expecting shape (1, H, W, D) + + # # uncomment when running pipeline with mask (non-contour) outputs, i.e. LabelToContourd transform absent + # if image.device.type == "cuda": + # image = image.cpu() + # d[self.image_key] = image + # if label.device.type == "cuda": + # label = label.cpu() + # d[self.label_key] = label + # # ----------------------- + + # Ensure that the input has the correct dimensions + assert image.shape[0] == 1 and label.shape[0] == 1, "Image and label must have a channel dimension of 1." + assert image.shape == label.shape, f"Image and label must have the same shape: {image.shape} vs {label.shape}" + + # Remove the channel dimension for processing + image_volume = image[0] # Shape: (H, W, D) + label_volume = label[0] # Shape: (H, W, D) + + # Convert to 3D overlay + overlay = self._create_overlay(image_volume, label_volume) + + # Add the channel dimension back + # d[self.overlay_key] = np.expand_dims(overlay, axis=0) # Shape: (1, H, W, D, 3) + d[self.overlay_key] = MetaTensor(overlay, meta=label.meta, affine=label.affine) # Shape: (3, H, W, D) + + # Assert the final output shape + # assert d[self.overlay_key].shape == (1, *image_volume.shape, 3), \ + # f"Final overlay shape should be (1, H, W, D, 3) but got {d[self.overlay_key].shape}" + + assert d[self.overlay_key].shape == ( + 3, + *image_volume.shape, + ), f"Final overlay shape should be (3, H, W, D) but got {d[self.overlay_key].shape}" + + # Log the overlay creation (debugging) + self._logger.info(f"Overlay created with shape: {overlay.shape}") + # self._logger.info(f"Dictionary keys: {d.keys()}") + + # self._logger.info('overlay_image shape: ', d[self.overlay_key].shape) + return d + + +class SaveData(MapTransform): + """ + Save the output dictionary into JSON files. + + The name of the saved file will be `{key}_{output_postfix}.json`. + + Args: + keys: keys of the corresponding items to be saved in the dictionary. + output_dir: directory to save the output files. + output_postfix: a string appended to all output file names, default is `data`. + separate_folder: whether to save each file in a separate folder. Default is `True`. + print_log: whether to print logs when saving. Default is `True`. + """ + + def __init__( + self, + keys: KeysCollection, + namekey: str = "image", + output_dir: str = "./", + output_postfix: str = "data", + separate_folder: bool = False, + print_log: bool = True, + allow_missing_keys: bool = False, + ): + self._logger = logging.getLogger(f"{__name__}.{type(self).__name__}") + super().__init__(keys, allow_missing_keys) + self.output_dir = output_dir + self.output_postfix = output_postfix + self.separate_folder = separate_folder + self.print_log = print_log + self.namekey = namekey + + def __call__(self, data): + d = dict(data) + image_name = os.path.basename(d[self.namekey].meta["filename_or_obj"]).split(".")[0] + for key in self.keys: + # Get the data + output_data = d[key] + + # Determine the file name + file_name = f"{image_name}_{self.output_postfix}.json" + if self.separate_folder: + file_path = os.path.join(self.output_dir, image_name, file_name) + os.makedirs(os.path.dirname(file_path), exist_ok=True) + else: + file_path = os.path.join(self.output_dir, file_name) + + # Save the dictionary as a JSON file + with open(file_path, "w") as f: + json.dump(output_data, f) + + if self.print_log: + self._logger.info(f"Saved data to {file_path}") + + return d + + +# custom transform (not in original post_transforms.py in bundle): +class ExtractVolumeToTextd(MapTransform): + """ + Custom transform to extract volume information from the segmentation results and format it as a textual summary. + Filters organ volumes based on output_labels for DICOM SR write, while including all organs for MongoDB write. + The upstream CalculateVolumeFromMaskd transform calculates organ volumes and stores them in the dictionary + under the pred_key + '_volumes' key. The input dictionary is outputted unchanged as to not affect downstream operators. + + Args: + keys: keys of the corresponding items to be saved in the dictionary. + label_names: dictionary mapping organ names to their corresponding label indices. + output_labels: list of target label indices for organs to include in the DICOM SR output. + """ + + def __init__( + self, + keys: KeysCollection, + label_names: dict, + output_labels: List[int], + allow_missing_keys: bool = False, + ): + self._logger = logging.getLogger(f"{__name__}.{type(self).__name__}") + super().__init__(keys, allow_missing_keys) + + self.label_names = label_names + self.output_labels = output_labels + + self.result_text: Optional[str] = None + + def __call__(self, data): + d = dict(data) + # use the first key in `keys` to access the volume data (e.g., pred_key + '_volumes') + volumes_key = self.keys[0] + organ_volumes = d.get(volumes_key, None) + + if organ_volumes is None: + raise ValueError(f"Volume data not found for key {volumes_key}.") + + # create the volume text output + volume_text = [] + + # loop through calculated organ volumes + for organ, volume in organ_volumes.items(): + # if the organ's label index is in output_labels + label_index = self.label_names.get(organ, None) + if label_index in self.output_labels: + # append organ volume for DICOM SR entry + volume_text.append(f"{organ.capitalize()} Volume: {volume} mL") + + self.result_text = "\n".join(volume_text) + + # not adding result_text to dictionary; return dictionary unchanged as to not affect downstream operators + return d diff --git a/examples/apps/cchmc_nnunet_fifteen_ckpt_app/my_app/requirements.txt b/examples/apps/cchmc_nnunet_fifteen_ckpt_app/my_app/requirements.txt new file mode 100644 index 00000000..fae3bb47 --- /dev/null +++ b/examples/apps/cchmc_nnunet_fifteen_ckpt_app/my_app/requirements.txt @@ -0,0 +1,37 @@ +# requirements.txt file specifies dependencies our Python project needs to run + +# install MONAI and necessary image processing packages (base list pulled from MONAI Bundle Spleen Seg App example) +# based on CCHMC Ped Abd MRI MONAI Bundle dependencies: +# monai, numpy, nibabel versions upgraded +# pytorch-ignite and fire dependencies added +# python 3.9 is required to install specified pytorch and monai-deploy-app-sdk versions +# einops optional dependency needed for DAE model workflow +monai[einops]==1.3.0 +torch>=1.12.0 +pytorch-ignite==0.4.11 +fire==0.4.0 +numpy>=1.24,<2.0 +nibabel==4.0.1 +# pydicom v3.0.0 removed pydicom._storage_sopclass_uids; don't meet or exceed this version +pydicom>=2.3.0,<3.0.0 +# pylibjpeg for processing compressed DICOM pixel data +pylibjpeg[all] +highdicom>=0.18.2 +itk>=5.3.0 +SimpleITK>=2.0.0 +scikit-image>=0.17.2 +Pillow>=8.0.0 +numpy-stl>=2.12.0 +trimesh>=3.8.11 +matplotlib>=3.7.2 +setuptools>=75.8.0 # for pkg_resources + +# MONAI Deploy App SDK package installation +# includes Holoscan SDK and CLI ~=3.0 +monai-deploy-app-sdk==3.0.0 + +# fine control over holoscan and holoscan-cli versions +holoscan==3.2.0 +holoscan-cli==3.2.0 +nvflare>=2.6.2,<3.0.0 +nnunetv2>=2.6.2,<3.0.0 \ No newline at end of file From 27da8dd487db3a923f66073fae281078695a0a7f Mon Sep 17 00:00:00 2001 From: Elan Somasundaram Date: Wed, 1 Oct 2025 19:41:54 -0400 Subject: [PATCH 15/21] Update README documentation Signed-off-by: Elan Somasundaram Signed-off-by: chezhia --- examples/apps/cchmc_nnunet_fifteen_ckpt_app/README.md | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/examples/apps/cchmc_nnunet_fifteen_ckpt_app/README.md b/examples/apps/cchmc_nnunet_fifteen_ckpt_app/README.md index 00263d95..c50ada26 100644 --- a/examples/apps/cchmc_nnunet_fifteen_ckpt_app/README.md +++ b/examples/apps/cchmc_nnunet_fifteen_ckpt_app/README.md @@ -35,8 +35,6 @@ The RESULTS_PATH should have "inference_information.json" file created by nnunet |----------|-------------|----------|---------| | `--dataset_name_or_id` | Name or ID of the nnUNet dataset to convert | Yes | N/A | | `--MAP_root` | Output directory for the converted MONAI bundle | No | Current directory | -| `--nnUNet_raw` | Path to nnUNet raw data directory | Yes | Uses environment variable if set | -| `--nnUNet_preprocessed` | Path to nnUNet preprocessed data directory | Yes | Uses environment variable if set | | `--nnUNet_results` | Path to nnUNet results directory with trained models | Yes | Uses environment variable if set | #### Example @@ -58,7 +56,7 @@ The conversion creates a MONAI bundle with the following structure in the specif MAP_root/ └── models/ ├── jsonpkls/ - │ ├── dataset.json # Dataset configuration + │ ├── dataset.json # Dataset configuration │ ├── plans.json # Model planning information │ ├── postprocessing.pkl # Optional postprocessing configuration ├── 3d_fullres/ # Model configuration (if present) @@ -194,7 +192,3 @@ Then, the MAP can be executed locally via the MAR command line utility; input an ``` monai-deploy run -i $HOLOSCAN_INPUT_PATH -o $HOLOSCAN_OUTPUT_PATH ${tag_prefix}-x64-workstation-dgpu-linux-amd64:${image_version} ``` - -## Scripts -Several scripts have been compiled that quickly execute useful actions (such as local model execution, MAP building, etc.) Some scripts require -the input of command line arguments; review the `scripts` folder for more details. From b42b89302622de3830febe63ba201a2a8995bf15 Mon Sep 17 00:00:00 2001 From: chezhia Date: Wed, 29 Oct 2025 23:35:41 -0400 Subject: [PATCH 16/21] flake8 fixed Signed-off-by: chezhia --- examples/apps/LICENSE | 201 ++ examples/apps/README.md | 194 ++ .../cchmc_nnunet_fifteen_ckpt_app/LICENSE | 400 ++-- .../cchmc_nnunet_fifteen_ckpt_app/README.md | 388 ++-- .../convert_nnunet_ckpts.py | 207 +- .../development_notes.md | 124 +- .../my_app/__init__.py | 58 +- .../my_app/__main__.py | 52 +- .../my_app/app.py | 514 ++--- .../my_app/app.yaml | 66 +- .../my_app/dicom_sc_writer_operator.py | 506 ++--- .../my_app/dicom_series_selector_operator.py | 1258 +++++------ .../my_app/nnunet_bundle.py | 1990 ++++++++--------- .../my_app/nnunet_seg_operator.py | 854 +++---- .../my_app/post_transforms.py | 780 +++---- .../my_app/requirements.txt | 72 +- examples/apps/convert_nnunet_ckpts.py | 103 + examples/apps/development_notes.md | 62 + 18 files changed, 4195 insertions(+), 3634 deletions(-) create mode 100644 examples/apps/LICENSE create mode 100644 examples/apps/README.md create mode 100644 examples/apps/convert_nnunet_ckpts.py create mode 100644 examples/apps/development_notes.md diff --git a/examples/apps/LICENSE b/examples/apps/LICENSE new file mode 100644 index 00000000..f49a4e16 --- /dev/null +++ b/examples/apps/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. \ No newline at end of file diff --git a/examples/apps/README.md b/examples/apps/README.md new file mode 100644 index 00000000..33eb51a5 --- /dev/null +++ b/examples/apps/README.md @@ -0,0 +1,194 @@ +# MONAI Application Package (MAP) for sample nnunet model + +This README describes the process of converting the [CCHMC Pediatric Airway Segmentation nnUnet model] into a MONAI Application Package (MAP). + +## Convert nnUNet checkpoints to MONAI compatible models + +The `convert_nnunet_ckpts.py` script simplifies the process of converting nnUNet model checkpoints to MONAI bundle format. This conversion is necessary to use nnUNet models within MONAI applications and the MONAI Deploy ecosystem. + +## Example model checkpoints + +Sample nnunet model checkpoints for a UTE MRI airway segmentation in NICU patients are available here + +https://drive.google.com/drive/folders/1lRs-IoLR47M_WFyZmuCaROJULtyPdkLm?usp=drive_link + +### Prerequisites + +Before running the conversion script, ensure that: +1. You have trained nnUNet models available +2. The nnUNet environment variables are set or you can provide them as arguments +3. Python environment with required dependencies is set up (my_app/requirements.txt) + +### Basic Usage + +The script can be executed with the following command: + +```bash +python convert_nnunet_ckpts.py --dataset_name_or_id DATASET_ID --MAP_root OUTPUT_DIR --nnUNet_results RESULTS_PATH +``` + +The RESULTS_PATH should have "inference_information.json" file created by nnunetv2 automatically, as the conversion relies on this to figure out the best model configuration and convert those for the MAP. + +### Command-line Arguments + +| Argument | Description | Required | Default | +|----------|-------------|----------|---------| +| `--dataset_name_or_id` | Name or ID of the nnUNet dataset to convert | Yes | N/A | +| `--MAP_root` | Output directory for the converted MONAI bundle | No | Current directory | +| `--nnUNet_results` | Path to nnUNet results directory with trained models | Yes | Uses environment variable if set | + +#### Example + +Convert dataset with ID 4 to models directory: + +```bash +python convert_nnunet_ckpts.py \ + --dataset_name_or_id 4 \ + --MAP_root "." \ + --nnUNet_results "/path/to/nnunet/models" +``` + +#### Output Structure + +The conversion creates a MONAI bundle with the following structure in the specified `MAP_root` directory: + +``` +MAP_root/ +└── models/ + ├── jsonpkls/ + │ ├── dataset.json # Dataset configuration + │ ├── plans.json # Model planning information + │ ├── postprocessing.pkl # Optional postprocessing configuration + ├── 3d_fullres/ # Model configuration (if present) + │ ├── nnunet_checkpoint.pth + │ └── fold_X/ # Each fold's model weights + │ └── best_model.pt + ├── 3d_lowres/ # Model configuration (if present) + └── 3d_cascade_fullres/ # Model configuration (if present) +``` + +This bundle structure is compatible with MONAI inference tools and the MONAI Deploy application ecosystem. + + +## Setting Up Environment +Instructions regarding installation of MONAI Deploy App SDK and details of the necessary system requirements can be found on the MONAI Deploy App SDK [GitHub Repository](https://github.com/Project-MONAI/monai-deploy-app-sdk) and [docs](https://docs.monai.io/projects/monai-deploy-app-sdk/en/latest/getting_started/installing_app_sdk.html). Instructions on how to create a virtual environment and install other dependencies can be found in the MONAI Deploy App SDK docs under the Creating a Segmentation App Consuming a MONAI Bundle [example](https://docs.monai.io/projects/monai-deploy-app-sdk/en/latest/getting_started/tutorials/monai_bundle_app.html). + +Per MONAI, MONAI Deploy App SDK is required to be run in a Linux environment, specifically Ubuntu 22.04 on X86-64, as this is the only X86 platform that the underlying Holoscan SDK has been tested to support as of now. This project uses Poetry for dependency management, which simplifies setting up the environment with all required dependencies. + +### System Requirements +- **Operating System:** Linux (Ubuntu 22.04 recommended) +- **Architecture:** x86_64 +- **GPU:** NVIDIA GPU (recommended for inference) +- **Python:** 3.10 or newer (project requires >=3.10,<3.13) + + + +## Executing Model Bundle Pythonically +Prior to MAP building, the exported model bundle can be executed pythonically via the command line. + +Within the main directory of this downloaded repository, create a `.env` file. MONAI recommends the following `.env` structure and naming conventions: + +```env +HOLOSCAN_INPUT_PATH=${PWD}/input +HOLOSCAN_MODEL_PATH=${PWD}/models +HOLOSCAN_OUTPUT_PATH=${PWD}/output +``` + +Load in the environment variables: + +``` +source .env +``` + +If already specified, remove the directory specified by the `HOLOSCAN_OUTPUT_PATH` environment variable: + +``` +rm -rf $HOLOSCAN_OUTPUT_PATH +``` + +Execute the model bundle pythonically via the command line; the directory specified by the `HOLOSCAN_INPUT_PATH` environment variable should be created and populated with a DICOM series for testing by the user. The model bundle file should be populated within the `/model` folder to match the recommended `HOLOSCAN_MODEL_PATH` value. `HOLOSCAN_INPUT_PATH`, `HOLOSCAN_OUTPUT_PATH`, and `HOLOSCAN_MODEL_PATH` default values can be amended by updating the `.env` file appropriately. + +``` +python my_app -i "$HOLOSCAN_INPUT_PATH" -o "$HOLOSCAN_OUTPUT_PATH" -m "$HOLOSCAN_MODEL_PATH" +``` + +## Building the MAP +It is recommended that the NVIDIA Clara Holoscan base image is pulled prior to building the MAP. If this base image is not pulled prior to MAP building, it will be done so automically during the build process, which will increase the build time from around 1/2 minutes to around 10/15 minutes. Ensure the base image matches the Holoscan SDK version being used in your environment (e.g. if you are using Holoscan SDK v3.2.0, replace `${holoscan-version}` with `v3.2.0`). + +``` +docker pull nvcr.io/nvidia/clara-holoscan/holoscan:${holoscan-version}-dgpu +``` + +Execute the following command to build the MAP Docker image based on the supported NVIDIA Clara Holoscan base image. During MAP building, a Docker container based on the `moby/buildkit` Docker image will be spun up; this container (Docker BuildKit builder `holoscan_app_builder`) facilitates the MAP build. + +``` +monai-deploy package my_app -m $HOLOSCAN_MODEL_PATH -c my_app/app.yaml -t ${tag_prefix}:${image_version} --platform x86_64 -l DEBUG +``` + +As of August 2024, a new error may appear during the MAP build related to the Dockerfile, where `monai-deploy-app-sdk` v0 (which does not exist) is attempted to be installed: + +```bash +Dockerfile:78 +-------------------- + 76 | + 77 | # Install MONAI Deploy from PyPI org + 78 | >>> RUN pip install monai-deploy-app-sdk==0 + 79 | + 80 | +-------------------- +``` + +If you encounter this error, you can specify the MONAI Deploy App SDK version via `--sdk-version` directly in the build command (`3.0.0`, for example). The base image for the MAP build can also be specified via `--base-image`: + +``` +monai-deploy package my_app -m $HOLOSCAN_MODEL_PATH -c my_app/app.yaml -t ${tag_prefix}:${image_version} --platform x86_64 --base-image ${base_image} --sdk-version ${version} -l DEBUG +``` + +If using Docker Desktop, the MAP should now appear in the "Images" tab as `${tag_prefix}-x64-workstation-dgpu-linux-amd64:${image_version}`. You can also confirm MAP creation in the CLI by executing this command: + +``` +docker image ls | grep ${tag_prefix} +``` + +## Display and Extract MAP Contents +There are a few commands that can be executed in the command line to view MAP contents. + +To display some basic MAP manifests, use the `show` command. The following command will run and subsequently remove a MAP Docker container; the `show` command will display informaiton about the MAP-associated `app.json` and `pkg.json` files as command line outputs. + +``` +docker run --rm ${tag_prefix}-x64-workstation-dgpu-linux-amd64:${image_version} show +``` + +MAP manifests and other contents can also be extracted into a specific host folder using the `extract` command. + +The host folder used to store the extracted MAP contents must be created by the host, not by Docker upon running the MAP as a container. This is most applicable when MAP contents are extracted more than once; the export folder must be deleted and recreated in this case. + +``` +rm -rf `pwd`/export && mkdir -p `pwd`/export +``` + +After creating the folder for export, executing the following command will run and subsequently remove a MAP Docker container. + +``` +docker run --rm -v `pwd`/export/:/var/run/holoscan/export/ ${tag_prefix}-x64-workstation-dgpu-linux-amd64:${image_version} extract +``` + +The `extract` command will extract MAP contents to the `/export` folder, organized as follows: +- `app` folder, which contains of the all the files present in `my_app` +- `config` folder, which contains the MAP manifests (`app.json`, `pkg.json`, and `app.yaml`) +- `models` folder, which contains the model bundle used to created the MAP + +## Executing MAP Locally via the MONAI Application Runner (MAR) +The generated MAP can be tested locally using the MONAI Application Runner (MAR). + +First, clear the contents of the output directory: + +``` +rm -rf $HOLOSCAN_OUTPUT_PATH +``` + +Then, the MAP can be executed locally via the MAR command line utility; input and output directories must be specified: + +``` +monai-deploy run -i $HOLOSCAN_INPUT_PATH -o $HOLOSCAN_OUTPUT_PATH ${tag_prefix}-x64-workstation-dgpu-linux-amd64:${image_version} +``` diff --git a/examples/apps/cchmc_nnunet_fifteen_ckpt_app/LICENSE b/examples/apps/cchmc_nnunet_fifteen_ckpt_app/LICENSE index 753842b6..f49a4e16 100644 --- a/examples/apps/cchmc_nnunet_fifteen_ckpt_app/LICENSE +++ b/examples/apps/cchmc_nnunet_fifteen_ckpt_app/LICENSE @@ -1,201 +1,201 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and limitations under the License. \ No newline at end of file diff --git a/examples/apps/cchmc_nnunet_fifteen_ckpt_app/README.md b/examples/apps/cchmc_nnunet_fifteen_ckpt_app/README.md index c50ada26..33eb51a5 100644 --- a/examples/apps/cchmc_nnunet_fifteen_ckpt_app/README.md +++ b/examples/apps/cchmc_nnunet_fifteen_ckpt_app/README.md @@ -1,194 +1,194 @@ -# MONAI Application Package (MAP) for sample nnunet model - -This README describes the process of converting the [CCHMC Pediatric Airway Segmentation nnUnet model] into a MONAI Application Package (MAP). - -## Convert nnUNet checkpoints to MONAI compatible models - -The `convert_nnunet_ckpts.py` script simplifies the process of converting nnUNet model checkpoints to MONAI bundle format. This conversion is necessary to use nnUNet models within MONAI applications and the MONAI Deploy ecosystem. - -## Example model checkpoints - -Sample nnunet model checkpoints for a UTE MRI airway segmentation in NICU patients are available here - -https://drive.google.com/drive/folders/1lRs-IoLR47M_WFyZmuCaROJULtyPdkLm?usp=drive_link - -### Prerequisites - -Before running the conversion script, ensure that: -1. You have trained nnUNet models available -2. The nnUNet environment variables are set or you can provide them as arguments -3. Python environment with required dependencies is set up (my_app/requirements.txt) - -### Basic Usage - -The script can be executed with the following command: - -```bash -python convert_nnunet_ckpts.py --dataset_name_or_id DATASET_ID --MAP_root OUTPUT_DIR --nnUNet_results RESULTS_PATH -``` - -The RESULTS_PATH should have "inference_information.json" file created by nnunetv2 automatically, as the conversion relies on this to figure out the best model configuration and convert those for the MAP. - -### Command-line Arguments - -| Argument | Description | Required | Default | -|----------|-------------|----------|---------| -| `--dataset_name_or_id` | Name or ID of the nnUNet dataset to convert | Yes | N/A | -| `--MAP_root` | Output directory for the converted MONAI bundle | No | Current directory | -| `--nnUNet_results` | Path to nnUNet results directory with trained models | Yes | Uses environment variable if set | - -#### Example - -Convert dataset with ID 4 to models directory: - -```bash -python convert_nnunet_ckpts.py \ - --dataset_name_or_id 4 \ - --MAP_root "." \ - --nnUNet_results "/path/to/nnunet/models" -``` - -#### Output Structure - -The conversion creates a MONAI bundle with the following structure in the specified `MAP_root` directory: - -``` -MAP_root/ -└── models/ - ├── jsonpkls/ - │ ├── dataset.json # Dataset configuration - │ ├── plans.json # Model planning information - │ ├── postprocessing.pkl # Optional postprocessing configuration - ├── 3d_fullres/ # Model configuration (if present) - │ ├── nnunet_checkpoint.pth - │ └── fold_X/ # Each fold's model weights - │ └── best_model.pt - ├── 3d_lowres/ # Model configuration (if present) - └── 3d_cascade_fullres/ # Model configuration (if present) -``` - -This bundle structure is compatible with MONAI inference tools and the MONAI Deploy application ecosystem. - - -## Setting Up Environment -Instructions regarding installation of MONAI Deploy App SDK and details of the necessary system requirements can be found on the MONAI Deploy App SDK [GitHub Repository](https://github.com/Project-MONAI/monai-deploy-app-sdk) and [docs](https://docs.monai.io/projects/monai-deploy-app-sdk/en/latest/getting_started/installing_app_sdk.html). Instructions on how to create a virtual environment and install other dependencies can be found in the MONAI Deploy App SDK docs under the Creating a Segmentation App Consuming a MONAI Bundle [example](https://docs.monai.io/projects/monai-deploy-app-sdk/en/latest/getting_started/tutorials/monai_bundle_app.html). - -Per MONAI, MONAI Deploy App SDK is required to be run in a Linux environment, specifically Ubuntu 22.04 on X86-64, as this is the only X86 platform that the underlying Holoscan SDK has been tested to support as of now. This project uses Poetry for dependency management, which simplifies setting up the environment with all required dependencies. - -### System Requirements -- **Operating System:** Linux (Ubuntu 22.04 recommended) -- **Architecture:** x86_64 -- **GPU:** NVIDIA GPU (recommended for inference) -- **Python:** 3.10 or newer (project requires >=3.10,<3.13) - - - -## Executing Model Bundle Pythonically -Prior to MAP building, the exported model bundle can be executed pythonically via the command line. - -Within the main directory of this downloaded repository, create a `.env` file. MONAI recommends the following `.env` structure and naming conventions: - -```env -HOLOSCAN_INPUT_PATH=${PWD}/input -HOLOSCAN_MODEL_PATH=${PWD}/models -HOLOSCAN_OUTPUT_PATH=${PWD}/output -``` - -Load in the environment variables: - -``` -source .env -``` - -If already specified, remove the directory specified by the `HOLOSCAN_OUTPUT_PATH` environment variable: - -``` -rm -rf $HOLOSCAN_OUTPUT_PATH -``` - -Execute the model bundle pythonically via the command line; the directory specified by the `HOLOSCAN_INPUT_PATH` environment variable should be created and populated with a DICOM series for testing by the user. The model bundle file should be populated within the `/model` folder to match the recommended `HOLOSCAN_MODEL_PATH` value. `HOLOSCAN_INPUT_PATH`, `HOLOSCAN_OUTPUT_PATH`, and `HOLOSCAN_MODEL_PATH` default values can be amended by updating the `.env` file appropriately. - -``` -python my_app -i "$HOLOSCAN_INPUT_PATH" -o "$HOLOSCAN_OUTPUT_PATH" -m "$HOLOSCAN_MODEL_PATH" -``` - -## Building the MAP -It is recommended that the NVIDIA Clara Holoscan base image is pulled prior to building the MAP. If this base image is not pulled prior to MAP building, it will be done so automically during the build process, which will increase the build time from around 1/2 minutes to around 10/15 minutes. Ensure the base image matches the Holoscan SDK version being used in your environment (e.g. if you are using Holoscan SDK v3.2.0, replace `${holoscan-version}` with `v3.2.0`). - -``` -docker pull nvcr.io/nvidia/clara-holoscan/holoscan:${holoscan-version}-dgpu -``` - -Execute the following command to build the MAP Docker image based on the supported NVIDIA Clara Holoscan base image. During MAP building, a Docker container based on the `moby/buildkit` Docker image will be spun up; this container (Docker BuildKit builder `holoscan_app_builder`) facilitates the MAP build. - -``` -monai-deploy package my_app -m $HOLOSCAN_MODEL_PATH -c my_app/app.yaml -t ${tag_prefix}:${image_version} --platform x86_64 -l DEBUG -``` - -As of August 2024, a new error may appear during the MAP build related to the Dockerfile, where `monai-deploy-app-sdk` v0 (which does not exist) is attempted to be installed: - -```bash -Dockerfile:78 --------------------- - 76 | - 77 | # Install MONAI Deploy from PyPI org - 78 | >>> RUN pip install monai-deploy-app-sdk==0 - 79 | - 80 | --------------------- -``` - -If you encounter this error, you can specify the MONAI Deploy App SDK version via `--sdk-version` directly in the build command (`3.0.0`, for example). The base image for the MAP build can also be specified via `--base-image`: - -``` -monai-deploy package my_app -m $HOLOSCAN_MODEL_PATH -c my_app/app.yaml -t ${tag_prefix}:${image_version} --platform x86_64 --base-image ${base_image} --sdk-version ${version} -l DEBUG -``` - -If using Docker Desktop, the MAP should now appear in the "Images" tab as `${tag_prefix}-x64-workstation-dgpu-linux-amd64:${image_version}`. You can also confirm MAP creation in the CLI by executing this command: - -``` -docker image ls | grep ${tag_prefix} -``` - -## Display and Extract MAP Contents -There are a few commands that can be executed in the command line to view MAP contents. - -To display some basic MAP manifests, use the `show` command. The following command will run and subsequently remove a MAP Docker container; the `show` command will display informaiton about the MAP-associated `app.json` and `pkg.json` files as command line outputs. - -``` -docker run --rm ${tag_prefix}-x64-workstation-dgpu-linux-amd64:${image_version} show -``` - -MAP manifests and other contents can also be extracted into a specific host folder using the `extract` command. - -The host folder used to store the extracted MAP contents must be created by the host, not by Docker upon running the MAP as a container. This is most applicable when MAP contents are extracted more than once; the export folder must be deleted and recreated in this case. - -``` -rm -rf `pwd`/export && mkdir -p `pwd`/export -``` - -After creating the folder for export, executing the following command will run and subsequently remove a MAP Docker container. - -``` -docker run --rm -v `pwd`/export/:/var/run/holoscan/export/ ${tag_prefix}-x64-workstation-dgpu-linux-amd64:${image_version} extract -``` - -The `extract` command will extract MAP contents to the `/export` folder, organized as follows: -- `app` folder, which contains of the all the files present in `my_app` -- `config` folder, which contains the MAP manifests (`app.json`, `pkg.json`, and `app.yaml`) -- `models` folder, which contains the model bundle used to created the MAP - -## Executing MAP Locally via the MONAI Application Runner (MAR) -The generated MAP can be tested locally using the MONAI Application Runner (MAR). - -First, clear the contents of the output directory: - -``` -rm -rf $HOLOSCAN_OUTPUT_PATH -``` - -Then, the MAP can be executed locally via the MAR command line utility; input and output directories must be specified: - -``` -monai-deploy run -i $HOLOSCAN_INPUT_PATH -o $HOLOSCAN_OUTPUT_PATH ${tag_prefix}-x64-workstation-dgpu-linux-amd64:${image_version} -``` +# MONAI Application Package (MAP) for sample nnunet model + +This README describes the process of converting the [CCHMC Pediatric Airway Segmentation nnUnet model] into a MONAI Application Package (MAP). + +## Convert nnUNet checkpoints to MONAI compatible models + +The `convert_nnunet_ckpts.py` script simplifies the process of converting nnUNet model checkpoints to MONAI bundle format. This conversion is necessary to use nnUNet models within MONAI applications and the MONAI Deploy ecosystem. + +## Example model checkpoints + +Sample nnunet model checkpoints for a UTE MRI airway segmentation in NICU patients are available here + +https://drive.google.com/drive/folders/1lRs-IoLR47M_WFyZmuCaROJULtyPdkLm?usp=drive_link + +### Prerequisites + +Before running the conversion script, ensure that: +1. You have trained nnUNet models available +2. The nnUNet environment variables are set or you can provide them as arguments +3. Python environment with required dependencies is set up (my_app/requirements.txt) + +### Basic Usage + +The script can be executed with the following command: + +```bash +python convert_nnunet_ckpts.py --dataset_name_or_id DATASET_ID --MAP_root OUTPUT_DIR --nnUNet_results RESULTS_PATH +``` + +The RESULTS_PATH should have "inference_information.json" file created by nnunetv2 automatically, as the conversion relies on this to figure out the best model configuration and convert those for the MAP. + +### Command-line Arguments + +| Argument | Description | Required | Default | +|----------|-------------|----------|---------| +| `--dataset_name_or_id` | Name or ID of the nnUNet dataset to convert | Yes | N/A | +| `--MAP_root` | Output directory for the converted MONAI bundle | No | Current directory | +| `--nnUNet_results` | Path to nnUNet results directory with trained models | Yes | Uses environment variable if set | + +#### Example + +Convert dataset with ID 4 to models directory: + +```bash +python convert_nnunet_ckpts.py \ + --dataset_name_or_id 4 \ + --MAP_root "." \ + --nnUNet_results "/path/to/nnunet/models" +``` + +#### Output Structure + +The conversion creates a MONAI bundle with the following structure in the specified `MAP_root` directory: + +``` +MAP_root/ +└── models/ + ├── jsonpkls/ + │ ├── dataset.json # Dataset configuration + │ ├── plans.json # Model planning information + │ ├── postprocessing.pkl # Optional postprocessing configuration + ├── 3d_fullres/ # Model configuration (if present) + │ ├── nnunet_checkpoint.pth + │ └── fold_X/ # Each fold's model weights + │ └── best_model.pt + ├── 3d_lowres/ # Model configuration (if present) + └── 3d_cascade_fullres/ # Model configuration (if present) +``` + +This bundle structure is compatible with MONAI inference tools and the MONAI Deploy application ecosystem. + + +## Setting Up Environment +Instructions regarding installation of MONAI Deploy App SDK and details of the necessary system requirements can be found on the MONAI Deploy App SDK [GitHub Repository](https://github.com/Project-MONAI/monai-deploy-app-sdk) and [docs](https://docs.monai.io/projects/monai-deploy-app-sdk/en/latest/getting_started/installing_app_sdk.html). Instructions on how to create a virtual environment and install other dependencies can be found in the MONAI Deploy App SDK docs under the Creating a Segmentation App Consuming a MONAI Bundle [example](https://docs.monai.io/projects/monai-deploy-app-sdk/en/latest/getting_started/tutorials/monai_bundle_app.html). + +Per MONAI, MONAI Deploy App SDK is required to be run in a Linux environment, specifically Ubuntu 22.04 on X86-64, as this is the only X86 platform that the underlying Holoscan SDK has been tested to support as of now. This project uses Poetry for dependency management, which simplifies setting up the environment with all required dependencies. + +### System Requirements +- **Operating System:** Linux (Ubuntu 22.04 recommended) +- **Architecture:** x86_64 +- **GPU:** NVIDIA GPU (recommended for inference) +- **Python:** 3.10 or newer (project requires >=3.10,<3.13) + + + +## Executing Model Bundle Pythonically +Prior to MAP building, the exported model bundle can be executed pythonically via the command line. + +Within the main directory of this downloaded repository, create a `.env` file. MONAI recommends the following `.env` structure and naming conventions: + +```env +HOLOSCAN_INPUT_PATH=${PWD}/input +HOLOSCAN_MODEL_PATH=${PWD}/models +HOLOSCAN_OUTPUT_PATH=${PWD}/output +``` + +Load in the environment variables: + +``` +source .env +``` + +If already specified, remove the directory specified by the `HOLOSCAN_OUTPUT_PATH` environment variable: + +``` +rm -rf $HOLOSCAN_OUTPUT_PATH +``` + +Execute the model bundle pythonically via the command line; the directory specified by the `HOLOSCAN_INPUT_PATH` environment variable should be created and populated with a DICOM series for testing by the user. The model bundle file should be populated within the `/model` folder to match the recommended `HOLOSCAN_MODEL_PATH` value. `HOLOSCAN_INPUT_PATH`, `HOLOSCAN_OUTPUT_PATH`, and `HOLOSCAN_MODEL_PATH` default values can be amended by updating the `.env` file appropriately. + +``` +python my_app -i "$HOLOSCAN_INPUT_PATH" -o "$HOLOSCAN_OUTPUT_PATH" -m "$HOLOSCAN_MODEL_PATH" +``` + +## Building the MAP +It is recommended that the NVIDIA Clara Holoscan base image is pulled prior to building the MAP. If this base image is not pulled prior to MAP building, it will be done so automically during the build process, which will increase the build time from around 1/2 minutes to around 10/15 minutes. Ensure the base image matches the Holoscan SDK version being used in your environment (e.g. if you are using Holoscan SDK v3.2.0, replace `${holoscan-version}` with `v3.2.0`). + +``` +docker pull nvcr.io/nvidia/clara-holoscan/holoscan:${holoscan-version}-dgpu +``` + +Execute the following command to build the MAP Docker image based on the supported NVIDIA Clara Holoscan base image. During MAP building, a Docker container based on the `moby/buildkit` Docker image will be spun up; this container (Docker BuildKit builder `holoscan_app_builder`) facilitates the MAP build. + +``` +monai-deploy package my_app -m $HOLOSCAN_MODEL_PATH -c my_app/app.yaml -t ${tag_prefix}:${image_version} --platform x86_64 -l DEBUG +``` + +As of August 2024, a new error may appear during the MAP build related to the Dockerfile, where `monai-deploy-app-sdk` v0 (which does not exist) is attempted to be installed: + +```bash +Dockerfile:78 +-------------------- + 76 | + 77 | # Install MONAI Deploy from PyPI org + 78 | >>> RUN pip install monai-deploy-app-sdk==0 + 79 | + 80 | +-------------------- +``` + +If you encounter this error, you can specify the MONAI Deploy App SDK version via `--sdk-version` directly in the build command (`3.0.0`, for example). The base image for the MAP build can also be specified via `--base-image`: + +``` +monai-deploy package my_app -m $HOLOSCAN_MODEL_PATH -c my_app/app.yaml -t ${tag_prefix}:${image_version} --platform x86_64 --base-image ${base_image} --sdk-version ${version} -l DEBUG +``` + +If using Docker Desktop, the MAP should now appear in the "Images" tab as `${tag_prefix}-x64-workstation-dgpu-linux-amd64:${image_version}`. You can also confirm MAP creation in the CLI by executing this command: + +``` +docker image ls | grep ${tag_prefix} +``` + +## Display and Extract MAP Contents +There are a few commands that can be executed in the command line to view MAP contents. + +To display some basic MAP manifests, use the `show` command. The following command will run and subsequently remove a MAP Docker container; the `show` command will display informaiton about the MAP-associated `app.json` and `pkg.json` files as command line outputs. + +``` +docker run --rm ${tag_prefix}-x64-workstation-dgpu-linux-amd64:${image_version} show +``` + +MAP manifests and other contents can also be extracted into a specific host folder using the `extract` command. + +The host folder used to store the extracted MAP contents must be created by the host, not by Docker upon running the MAP as a container. This is most applicable when MAP contents are extracted more than once; the export folder must be deleted and recreated in this case. + +``` +rm -rf `pwd`/export && mkdir -p `pwd`/export +``` + +After creating the folder for export, executing the following command will run and subsequently remove a MAP Docker container. + +``` +docker run --rm -v `pwd`/export/:/var/run/holoscan/export/ ${tag_prefix}-x64-workstation-dgpu-linux-amd64:${image_version} extract +``` + +The `extract` command will extract MAP contents to the `/export` folder, organized as follows: +- `app` folder, which contains of the all the files present in `my_app` +- `config` folder, which contains the MAP manifests (`app.json`, `pkg.json`, and `app.yaml`) +- `models` folder, which contains the model bundle used to created the MAP + +## Executing MAP Locally via the MONAI Application Runner (MAR) +The generated MAP can be tested locally using the MONAI Application Runner (MAR). + +First, clear the contents of the output directory: + +``` +rm -rf $HOLOSCAN_OUTPUT_PATH +``` + +Then, the MAP can be executed locally via the MAR command line utility; input and output directories must be specified: + +``` +monai-deploy run -i $HOLOSCAN_INPUT_PATH -o $HOLOSCAN_OUTPUT_PATH ${tag_prefix}-x64-workstation-dgpu-linux-amd64:${image_version} +``` diff --git a/examples/apps/cchmc_nnunet_fifteen_ckpt_app/convert_nnunet_ckpts.py b/examples/apps/cchmc_nnunet_fifteen_ckpt_app/convert_nnunet_ckpts.py index 4f0d57f4..e1691e89 100644 --- a/examples/apps/cchmc_nnunet_fifteen_ckpt_app/convert_nnunet_ckpts.py +++ b/examples/apps/cchmc_nnunet_fifteen_ckpt_app/convert_nnunet_ckpts.py @@ -1,104 +1,103 @@ -#!/usr/bin/env python3 -# Copyright (c) MONAI Consortium -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -""" -Convert nnUNet checkpoints to MONAI bundle format. -This script follows the logic in the conversion notebook but imports from local apps.nnunet_bundle. -""" - -import argparse -import os -import sys - -# Add the current directory to the path to find the local module -current_dir = os.path.dirname(os.path.abspath(__file__)) -if current_dir not in sys.path: - sys.path.insert(0, current_dir) - -# Try importing from local apps.nnunet_bundle instead of from MONAI -try: - from my_app.nnunet_bundle import convert_best_nnunet_to_monai_bundle -except ImportError: - # If local import fails, try to find the module in alternate locations - try: - from monai.apps.nnunet_bundle import convert_best_nnunet_to_monai_bundle - except ImportError: - print( - "Error: Could not import convert_best_nnunet_to_monai_bundle from my_app.nnunet_bundle or apps.nnunet_bundle" - ) - print("Please ensure that nnunet_bundle.py is properly installed in your project.") - sys.exit(1) - - -def parse_args(): - parser = argparse.ArgumentParser(description="Convert nnUNet checkpoints to MONAI bundle format.") - parser.add_argument( - "--dataset_name_or_id", type=str, required=True, help="The name or ID of the dataset to convert." - ) - parser.add_argument( - "--MAP_root", - type=str, - default=os.getcwd(), - help="The root directory where the Medical Application Package (MAP) will be created. Defaults to current directory.", - ) - - parser.add_argument( - "--nnUNet_results", - type=str, - required=False, - default=None, - help="Path to nnUNet results directory with trained models.", - ) - return parser.parse_args() - - -def main(): - args = parse_args() - - # Create the nnUNet config dictionary - nnunet_config = { - "dataset_name_or_id": args.dataset_name_or_id, - } - - # Create the MAP root directory - map_root = args.MAP_root - os.makedirs(map_root, exist_ok=True) - - # Set nnUNet environment variables if provided - if args.nnUNet_results: - os.environ["nnUNet_results"] = args.nnUNet_results - print(f"Set nnUNet_results to: {args.nnUNet_results}") - - # Check if required environment variables are set - required_env_vars = ["nnUNet_results"] - missing_vars = [var for var in required_env_vars if var not in os.environ] - - if missing_vars: - print(f"Error: The following required nnUNet environment variables are not set: {', '.join(missing_vars)}") - print("Please provide them as arguments or set them in your environment before running this script.") - sys.exit(1) - - print(f"Converting nnUNet checkpoints for dataset {nnunet_config['dataset_name_or_id']} to MONAI bundle format...") - print(f"MAP will be created at: {map_root}") - print(f" nnUNet_results: {os.environ.get('nnUNet_results')}") - - # Convert the nnUNet checkpoints to MONAI bundle format - try: - convert_best_nnunet_to_monai_bundle(nnunet_config, map_root) - print(f"Successfully converted nnUNet checkpoints to MONAI bundle at: {map_root}/models") - except Exception as e: - print(f"Error converting nnUNet checkpoints: {e}") - sys.exit(1) - - -if __name__ == "__main__": - main() +# Copyright (c) MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Convert nnUNet checkpoints to MONAI bundle format. +This script follows the logic in the conversion notebook but imports from local apps.nnunet_bundle. +""" + +import argparse +import os +import sys + +# Add the current directory to the path to find the local module +current_dir = os.path.dirname(os.path.abspath(__file__)) +if current_dir not in sys.path: + sys.path.insert(0, current_dir) + +# Try importing from local apps.nnunet_bundle instead of from MONAI +try: + from my_app.nnunet_bundle import convert_best_nnunet_to_monai_bundle +except ImportError: + # If local import fails, try to find the module in alternate locations + try: + from monai.apps.nnunet_bundle import convert_best_nnunet_to_monai_bundle + except ImportError: + print( + "Error: Could not import convert_best_nnunet_to_monai_bundle from my_app.nnunet_bundle or apps.nnunet_bundle" + ) + print("Please ensure that nnunet_bundle.py is properly installed in your project.") + sys.exit(1) + + +def parse_args(): + parser = argparse.ArgumentParser(description="Convert nnUNet checkpoints to MONAI bundle format.") + parser.add_argument( + "--dataset_name_or_id", type=str, required=True, help="The name or ID of the dataset to convert." + ) + parser.add_argument( + "--MAP_root", + type=str, + default=os.getcwd(), + help="The root directory where the Medical Application Package (MAP) will be created. Defaults to current directory.", + ) + + parser.add_argument( + "--nnUNet_results", + type=str, + required=False, + default=None, + help="Path to nnUNet results directory with trained models.", + ) + return parser.parse_args() + + +def main(): + args = parse_args() + + # Create the nnUNet config dictionary + nnunet_config = { + "dataset_name_or_id": args.dataset_name_or_id, + } + + # Create the MAP root directory + map_root = args.MAP_root + os.makedirs(map_root, exist_ok=True) + + # Set nnUNet environment variables if provided + if args.nnUNet_results: + os.environ["nnUNet_results"] = args.nnUNet_results + print(f"Set nnUNet_results to: {args.nnUNet_results}") + + # Check if required environment variables are set + required_env_vars = ["nnUNet_results"] + missing_vars = [var for var in required_env_vars if var not in os.environ] + + if missing_vars: + print(f"Error: The following required nnUNet environment variables are not set: {', '.join(missing_vars)}") + print("Please provide them as arguments or set them in your environment before running this script.") + sys.exit(1) + + print(f"Converting nnUNet checkpoints for dataset {nnunet_config['dataset_name_or_id']} to MONAI bundle format...") + print(f"MAP will be created at: {map_root}") + print(f" nnUNet_results: {os.environ.get('nnUNet_results')}") + + # Convert the nnUNet checkpoints to MONAI bundle format + try: + convert_best_nnunet_to_monai_bundle(nnunet_config, map_root) + print(f"Successfully converted nnUNet checkpoints to MONAI bundle at: {map_root}/models") + except Exception as e: + print(f"Error converting nnUNet checkpoints: {e}") + sys.exit(1) + + +if __name__ == "__main__": + main() diff --git a/examples/apps/cchmc_nnunet_fifteen_ckpt_app/development_notes.md b/examples/apps/cchmc_nnunet_fifteen_ckpt_app/development_notes.md index 5e647e38..f1e91b99 100644 --- a/examples/apps/cchmc_nnunet_fifteen_ckpt_app/development_notes.md +++ b/examples/apps/cchmc_nnunet_fifteen_ckpt_app/development_notes.md @@ -1,62 +1,62 @@ -# Development Notes - -## Implementation Notes for nnUNet MAP - - -* Initial Tests show volume and Dice agreement with Bundle, need to do more thorough testing. - -1. For each model configuration the output gets written to .npz file by nnunet inference functions. - -2. These file paths are then used by the EnsembleProbabilities Transform function to create the final output. - -3. If nnunet postprocessing is used, use the largest connected component transform in the MAP. There could be minor differences in the implementation, will do thorough analysis later. - -3. Need to better understand the use of "context" in compute and compute_impl as input arguments. - -4. Investigate keeping the probabilities in the memory, to help with speedup. - -5. Need to investigate the current traceability provisions in the operators implemented. - - -## Implementation Details - -### Testing Strategy - -Tests should be conducted to: -1. Compare MAP output with native nnUNet output -2. Measure performance (time, memory usage) -3. Validate with various input formats and sizes -4. Test error handling and edge cases - - -### nnUNet Integration - -The current implementation relies on the nnUNet's native inference approach which outputs intermediate .npz files for each model configuration. While this works, it introduces file I/O overhead which could potentially be optimized. - -### Ensemble Prediction Flow - -1. Multiple nnUNet models (3d_fullres, 3d_lowres, 3d_cascade_fullres) are loaded -2. Each model performs inference separately -3. Results are written to temporary .npz files -4. EnsembleProbabilitiesToSegmentation transform reads these files -5. Final segmentation is produced by combining results - -### Potential Optimizations - -- Keep probability maps in memory instead of writing to disk -- Parallelize model inference where applicable -- Streamline the ensemble computation process - -### Context Usage - -The `context` parameter in `compute` and `compute_impl` functions appears to be used for storing and retrieving models. Further investigation is needed to fully understand how this context is managed and whether it's being used optimally. - -### Traceability - -Current traceability in the operators may need improvement. Consider adding: - -- More detailed logging -- Performance metrics -- Input/output validation steps -- Error handling with informative messages - +# Development Notes + +## Implementation Notes for nnUNet MAP + + +* Initial Tests show volume and Dice agreement with Bundle, need to do more thorough testing. + +1. For each model configuration the output gets written to .npz file by nnunet inference functions. + +2. These file paths are then used by the EnsembleProbabilities Transform function to create the final output. + +3. If nnunet postprocessing is used, use the largest connected component transform in the MAP. There could be minor differences in the implementation, will do thorough analysis later. + +3. Need to better understand the use of "context" in compute and compute_impl as input arguments. + +4. Investigate keeping the probabilities in the memory, to help with speedup. + +5. Need to investigate the current traceability provisions in the operators implemented. + + +## Implementation Details + +### Testing Strategy + +Tests should be conducted to: +1. Compare MAP output with native nnUNet output +2. Measure performance (time, memory usage) +3. Validate with various input formats and sizes +4. Test error handling and edge cases + + +### nnUNet Integration + +The current implementation relies on the nnUNet's native inference approach which outputs intermediate .npz files for each model configuration. While this works, it introduces file I/O overhead which could potentially be optimized. + +### Ensemble Prediction Flow + +1. Multiple nnUNet models (3d_fullres, 3d_lowres, 3d_cascade_fullres) are loaded +2. Each model performs inference separately +3. Results are written to temporary .npz files +4. EnsembleProbabilitiesToSegmentation transform reads these files +5. Final segmentation is produced by combining results + +### Potential Optimizations + +- Keep probability maps in memory instead of writing to disk +- Parallelize model inference where applicable +- Streamline the ensemble computation process + +### Context Usage + +The `context` parameter in `compute` and `compute_impl` functions appears to be used for storing and retrieving models. Further investigation is needed to fully understand how this context is managed and whether it's being used optimally. + +### Traceability + +Current traceability in the operators may need improvement. Consider adding: + +- More detailed logging +- Performance metrics +- Input/output validation steps +- Error handling with informative messages + diff --git a/examples/apps/cchmc_nnunet_fifteen_ckpt_app/my_app/__init__.py b/examples/apps/cchmc_nnunet_fifteen_ckpt_app/my_app/__init__.py index 52274b46..06014cc7 100644 --- a/examples/apps/cchmc_nnunet_fifteen_ckpt_app/my_app/__init__.py +++ b/examples/apps/cchmc_nnunet_fifteen_ckpt_app/my_app/__init__.py @@ -1,29 +1,29 @@ -# Copyright 2021-2025 MONAI Consortium -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# __init__.py is used to initialize a Python package -# ensures that the directory __init__.py resides in is included at the start of the sys.path -# this is useful when you want to import modules from this directory, even if it’s not the -# directory where your Python script is running. - -# give access to operating system and Python interpreter -import os -import sys - -# grab absolute path of directory containing __init__.py -_current_dir = os.path.abspath(os.path.dirname(__file__)) - -# if sys.path is not the same as the directory containing the __init__.py file -if sys.path and os.path.abspath(sys.path[0]) != _current_dir: - # insert directory containing __init__.py file at the beginning of sys.path - sys.path.insert(0, _current_dir) -# delete variable -del _current_dir +# Copyright 2021-2025 MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# __init__.py is used to initialize a Python package +# ensures that the directory __init__.py resides in is included at the start of the sys.path +# this is useful when you want to import modules from this directory, even if it’s not the +# directory where your Python script is running. + +# give access to operating system and Python interpreter +import os +import sys + +# grab absolute path of directory containing __init__.py +_current_dir = os.path.abspath(os.path.dirname(__file__)) + +# if sys.path is not the same as the directory containing the __init__.py file +if sys.path and os.path.abspath(sys.path[0]) != _current_dir: + # insert directory containing __init__.py file at the beginning of sys.path + sys.path.insert(0, _current_dir) +# delete variable +del _current_dir diff --git a/examples/apps/cchmc_nnunet_fifteen_ckpt_app/my_app/__main__.py b/examples/apps/cchmc_nnunet_fifteen_ckpt_app/my_app/__main__.py index 0a6920ed..afd32bef 100644 --- a/examples/apps/cchmc_nnunet_fifteen_ckpt_app/my_app/__main__.py +++ b/examples/apps/cchmc_nnunet_fifteen_ckpt_app/my_app/__main__.py @@ -1,26 +1,26 @@ -# Copyright 2021-2025 MONAI Consortium -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# __main__.py is needed for MONAI Application Packager to detect the main app code (app.py) when -# app.py is executed in the application folder path -# e.g., python my_app - -import logging - -# import UTEAirwayNNUnetApp class from app.py -from app import UTEAirwayNNUnetApp - -# if __main__.py is being run directly -if __name__ == "__main__": - logging.info(f"Begin {__name__}") - # create and run an instance of UTEAirwayNNUnetApp - UTEAirwayNNUnetApp().run() - logging.info(f"End {__name__}") +# Copyright 2021-2025 MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# __main__.py is needed for MONAI Application Packager to detect the main app code (app.py) when +# app.py is executed in the application folder path +# e.g., python my_app + +import logging + +# import UTEAirwayNNUnetApp class from app.py +from app import UTEAirwayNNUnetApp + +# if __main__.py is being run directly +if __name__ == "__main__": + logging.info(f"Begin {__name__}") + # create and run an instance of UTEAirwayNNUnetApp + UTEAirwayNNUnetApp().run() + logging.info(f"End {__name__}") diff --git a/examples/apps/cchmc_nnunet_fifteen_ckpt_app/my_app/app.py b/examples/apps/cchmc_nnunet_fifteen_ckpt_app/my_app/app.py index a944fada..0161c647 100644 --- a/examples/apps/cchmc_nnunet_fifteen_ckpt_app/my_app/app.py +++ b/examples/apps/cchmc_nnunet_fifteen_ckpt_app/my_app/app.py @@ -1,257 +1,257 @@ -# Copyright 2021-2025 MONAI Consortium -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import logging -from pathlib import Path - -# custom DICOMSCWriterOperator (Secondary Capture) -from dicom_sc_writer_operator import DICOMSCWriterOperator - -# custom DICOMSeriesSelectorOperator -from dicom_series_selector_operator import DICOMSeriesSelectorOperator - -# custom inference operator -from nnunet_seg_operator import NNUnetSegOperator - -# required for setting SegmentDescription attributes -# direct import as this is not part of App SDK package -from pydicom.sr.codedict import codes - -from monai.deploy.conditions import CountCondition -from monai.deploy.core import Application -from monai.deploy.operators.dicom_data_loader_operator import DICOMDataLoaderOperator -from monai.deploy.operators.dicom_seg_writer_operator import DICOMSegmentationWriterOperator, SegmentDescription -from monai.deploy.operators.dicom_series_to_volume_operator import DICOMSeriesToVolumeOperator -from monai.deploy.operators.dicom_text_sr_writer_operator import DICOMTextSRWriterOperator, EquipmentInfo, ModelInfo - - -# inherit new Application class instance, AIAbdomenSegApp, from MONAI Application base class -# base class provides support for chaining up operators and executing application -class UTEAirwayNNUnetApp(Application): - """Demonstrates inference with nnU-Net ensemble models for airway segmentation. - - This application loads a set of DICOM instances, selects the appropriate series, converts the series to - 3D volume image, performs inference with the NNUnetSegOperator, including pre-processing - and post-processing, saves a DICOM SEG (airway contour), a DICOM Secondary Capture (airway contour overlay), - and a DICOM SR (airway volume). - - Pertinent MONAI Bundle: - This MAP is designed to work with a MONAI bundle compatible with nnU-Net. - """ - - def __init__(self, *args, **kwargs): - """Creates an application instance.""" - self._logger = logging.getLogger("{}.{}".format(__name__, type(self).__name__)) - super().__init__(*args, **kwargs) - - def run(self, *args, **kwargs): - # this method calls the base class to run; can be omitted if simply calling through - self._logger.info(f"Begin {self.run.__name__}") - super().run(*args, **kwargs) - self._logger.info(f"End {self.run.__name__}") - - # use compose method to instantiate operators and connect them to form a Directed Acyclic Graph (DAG) - def compose(self): - """Creates the app specific operators and chain them up in the processing DAG.""" - - logging.info(f"Begin {self.compose.__name__}") - - # use Commandline options over environment variables to init context - app_context = Application.init_app_context(self.argv) - app_input_path = Path(app_context.input_path) - app_output_path = Path(app_context.output_path) - model_path = Path(app_context.model_path) - - # Temporary bug fix for MAP execution where model path copy is messed up - need fix to app-sdk package function - # Check if the model_path has a subfolder named 'models' and set model_path to that subfolder if it exists - models_subfolder = model_path / "models" - if models_subfolder.exists() and models_subfolder.is_dir(): - self._logger.info(f"Found 'models' subfolder in {model_path}. Setting model_path to {models_subfolder}") - model_path = models_subfolder - - # create the custom operator(s) as well as SDK built-in operator(s) - # DICOM Data Loader op - study_loader_op = DICOMDataLoaderOperator( - self, CountCondition(self, 1), input_folder=app_input_path, name="study_loader_op" - ) - - # custom DICOM Series Selector op - # all_matched and sort_by_sop_instance_count = True; want all series that meet the selection criteria - # to be matched, and SOP sorting - series_selector_op = DICOMSeriesSelectorOperator( - self, rules=Sample_Rules_Text, all_matched=True, sort_by_sop_instance_count=True, name="series_selector_op" - ) - - # DICOM Series to Volume op - series_to_vol_op = DICOMSeriesToVolumeOperator(self, name="series_to_vol_op") - - # custom inference op - # output_labels specifies which of the organ segmentations are desired in the DICOM SEG, DICOM SC, and DICOM SR outputs - # 1 = airway - output_labels = [1] - nnunet_seg_op = NNUnetSegOperator( - self, - app_context=app_context, - model_path=model_path, - output_folder=app_output_path, - output_labels=output_labels, - name="nnunet_seg_op", - ) - - # create DICOM Seg writer providing the required segment description for each segment with - # the actual algorithm and the pertinent organ/tissue; the segment_label, algorithm_name, - # and algorithm_version are of DICOM VR LO type, limited to 64 chars - # https://dicom.nema.org/medical/dicom/current/output/chtml/part05/sect_6.2.html - - # general algorithm information - _algorithm_name = "UTE_nnunet_airway" - _algorithm_family = codes.DCM.ArtificialIntelligence - _algorithm_version = "1.0.0" - - segment_descriptions = [ - SegmentDescription( - segment_label="Airway", - segmented_property_category=codes.SCT.BodyStructure, - segmented_property_type=codes.SCT.TracheaAndBronchus, - algorithm_name=_algorithm_name, - algorithm_family=_algorithm_family, - algorithm_version=_algorithm_version, - ), - ] - - # model info is algorithm information - my_model_info = ModelInfo( - creator="UTE", # institution name - name=_algorithm_name, # algorithm name - version=_algorithm_version, # algorithm version - uid="1.0.0", # MAP version - ) - - # equipment info is MONAI Deploy App SDK information - my_equipment = EquipmentInfo( - manufacturer="The MONAI Consortium", - manufacturer_model="MONAI Deploy App SDK", - software_version_number="3.0.0", # MONAI Deploy App SDK version - ) - - # custom tags - add AlgorithmName for monitoring purposes - custom_tags_seg = { - "SeriesDescription": "AI Generated DICOM SEG; Not for Clinical Use.", - "AlgorithmName": f"{my_model_info.name}:{my_model_info.version}:{my_model_info.uid}", - } - custom_tags_sr = { - "SeriesDescription": "AI Generated DICOM SR; Not for Clinical Use.", - "AlgorithmName": f"{my_model_info.name}:{my_model_info.version}:{my_model_info.uid}", - } - custom_tags_sc = { - "SeriesDescription": "AI Generated DICOM Secondary Capture; Not for Clinical Use.", - "AlgorithmName": f"{my_model_info.name}:{my_model_info.version}:{my_model_info.uid}", - } - - # DICOM SEG Writer op writes content from segment_descriptions to output DICOM images as DICOM tags - dicom_seg_writer = DICOMSegmentationWriterOperator( - self, - segment_descriptions=segment_descriptions, - model_info=my_model_info, - custom_tags=custom_tags_seg, - # store DICOM SEG in SEG subdirectory; necessary for routing in CCHMC MDE workflow definition - output_folder=app_output_path / "SEG", - # omit_empty_frames is a default parameteter (type bool) of DICOMSegmentationWriterOperator - # dictates whether or not to omit frames that contain no segmented pixels from the output segmentation - # default value is True; changed to False to ensure input and output DICOM series #'s match - omit_empty_frames=False, - name="dicom_seg_writer", - ) - - # DICOM SR Writer op - dicom_sr_writer = DICOMTextSRWriterOperator( - self, - # copy_tags is a default parameteter (type bool) of DICOMTextSRWriterOperator; default value is True - # dictates whether or not to copy DICOM attributes from the selected DICOM series - # changed to True to copy DICOM attributes so DICOM SR has same Study UID - copy_tags=True, - model_info=my_model_info, - equipment_info=my_equipment, - custom_tags=custom_tags_sr, - # store DICOM SR in SR subdirectory; necessary for routing in CCHMC MDE workflow definition - output_folder=app_output_path / "SR", - ) - - # custom DICOM SC Writer op - dicom_sc_writer = DICOMSCWriterOperator( - self, - model_info=my_model_info, - equipment_info=my_equipment, - custom_tags=custom_tags_sc, - # store DICOM SC in SC subdirectory; necessary for routing in CCHMC MDE workflow definition - output_folder=app_output_path / "SC", - ) - - # create the processing pipeline, by specifying the source and destination operators, and - # ensuring the output from the former matches the input of the latter, in both name and type - # instantiate and connect operators using self.add_flow(); specify current operator, next operator, and tuple to match I/O - self.add_flow(study_loader_op, series_selector_op, {("dicom_study_list", "dicom_study_list")}) - self.add_flow( - series_selector_op, series_to_vol_op, {("study_selected_series_list", "study_selected_series_list")} - ) - self.add_flow(series_to_vol_op, nnunet_seg_op, {("image", "image")}) - - # note below the dicom_seg_writer, dicom_sr_writer, and dicom_sc_writer each require two inputs, - # each coming from a source operator - - # DICOM SEG - self.add_flow( - series_selector_op, dicom_seg_writer, {("study_selected_series_list", "study_selected_series_list")} - ) - self.add_flow(nnunet_seg_op, dicom_seg_writer, {("seg_image", "seg_image")}) - - # DICOM SR - self.add_flow( - series_selector_op, dicom_sr_writer, {("study_selected_series_list", "study_selected_series_list")} - ) - self.add_flow(nnunet_seg_op, dicom_sr_writer, {("result_text", "text")}) - - # DICOM SC - self.add_flow( - series_selector_op, dicom_sc_writer, {("study_selected_series_list", "study_selected_series_list")} - ) - self.add_flow(nnunet_seg_op, dicom_sc_writer, {("dicom_sc_dir", "dicom_sc_dir")}) - - logging.info(f"End {self.compose.__name__}") - - -# series selection rule in JSON, which selects for Axial T2 MR series: -# StudyDescription (Type 3): matches any value -# Modality (Type 1): matches "MR" value (case-insensitive); filters out non-MR modalities -# ImageOrientationPatient (Type 1): matches Axial orientations; filters out Sagittal and Coronal orientations -# MRAcquisitionType (Type 2): matches "2D" value (case-insensitive); filters out 3D acquisitions -# RepetitionTime (Type 2C): matches values greater than 1200; filters for T2 acquisitions -# EchoTime (Type 2): matches values bewtween 75 and 100 (inclusive); filters out SSH series -# EchoTrainLength (Type 2): matches values less than 50; filters out SSH series -# FlipAngle (Type 3): matches values greater than 75; filters for T2 acquisitions -# all valid series will be selected; downstream operators only perform inference and write outputs for 1st selected series -# please see more detail in DICOMSeriesSelectorOperator - -Sample_Rules_Text = """ -""" - -# if executing application code using python interpreter: -if __name__ == "__main__": - # creates the app and test it standalone; when running is this mode, please note the following: - # -m , for model file path - # -i , for input DICOM MR series folder - # -o , for the output folder, default $PWD/output - # e.g. - # monai-deploy exec app.py -i input -m model/ls_swinunetr_FT.pt - # - logging.info(f"Begin {__name__}") - UTEAirwayNNUnetApp().run() - logging.info(f"End {__name__}") +# Copyright 2021-2025 MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +from pathlib import Path + +# custom DICOMSCWriterOperator (Secondary Capture) +from dicom_sc_writer_operator import DICOMSCWriterOperator + +# custom DICOMSeriesSelectorOperator +from dicom_series_selector_operator import DICOMSeriesSelectorOperator + +# custom inference operator +from nnunet_seg_operator import NNUnetSegOperator + +# required for setting SegmentDescription attributes +# direct import as this is not part of App SDK package +from pydicom.sr.codedict import codes + +from monai.deploy.conditions import CountCondition +from monai.deploy.core import Application +from monai.deploy.operators.dicom_data_loader_operator import DICOMDataLoaderOperator +from monai.deploy.operators.dicom_seg_writer_operator import DICOMSegmentationWriterOperator, SegmentDescription +from monai.deploy.operators.dicom_series_to_volume_operator import DICOMSeriesToVolumeOperator +from monai.deploy.operators.dicom_text_sr_writer_operator import DICOMTextSRWriterOperator, EquipmentInfo, ModelInfo + + +# inherit new Application class instance, AIAbdomenSegApp, from MONAI Application base class +# base class provides support for chaining up operators and executing application +class UTEAirwayNNUnetApp(Application): + """Demonstrates inference with nnU-Net ensemble models for airway segmentation. + + This application loads a set of DICOM instances, selects the appropriate series, converts the series to + 3D volume image, performs inference with the NNUnetSegOperator, including pre-processing + and post-processing, saves a DICOM SEG (airway contour), a DICOM Secondary Capture (airway contour overlay), + and a DICOM SR (airway volume). + + Pertinent MONAI Bundle: + This MAP is designed to work with a MONAI bundle compatible with nnU-Net. + """ + + def __init__(self, *args, **kwargs): + """Creates an application instance.""" + self._logger = logging.getLogger("{}.{}".format(__name__, type(self).__name__)) + super().__init__(*args, **kwargs) + + def run(self, *args, **kwargs): + # this method calls the base class to run; can be omitted if simply calling through + self._logger.info(f"Begin {self.run.__name__}") + super().run(*args, **kwargs) + self._logger.info(f"End {self.run.__name__}") + + # use compose method to instantiate operators and connect them to form a Directed Acyclic Graph (DAG) + def compose(self): + """Creates the app specific operators and chain them up in the processing DAG.""" + + logging.info(f"Begin {self.compose.__name__}") + + # use Commandline options over environment variables to init context + app_context = Application.init_app_context(self.argv) + app_input_path = Path(app_context.input_path) + app_output_path = Path(app_context.output_path) + model_path = Path(app_context.model_path) + + # Temporary bug fix for MAP execution where model path copy is messed up - need fix to app-sdk package function + # Check if the model_path has a subfolder named 'models' and set model_path to that subfolder if it exists + models_subfolder = model_path / "models" + if models_subfolder.exists() and models_subfolder.is_dir(): + self._logger.info(f"Found 'models' subfolder in {model_path}. Setting model_path to {models_subfolder}") + model_path = models_subfolder + + # create the custom operator(s) as well as SDK built-in operator(s) + # DICOM Data Loader op + study_loader_op = DICOMDataLoaderOperator( + self, CountCondition(self, 1), input_folder=app_input_path, name="study_loader_op" + ) + + # custom DICOM Series Selector op + # all_matched and sort_by_sop_instance_count = True; want all series that meet the selection criteria + # to be matched, and SOP sorting + series_selector_op = DICOMSeriesSelectorOperator( + self, rules=Sample_Rules_Text, all_matched=True, sort_by_sop_instance_count=True, name="series_selector_op" + ) + + # DICOM Series to Volume op + series_to_vol_op = DICOMSeriesToVolumeOperator(self, name="series_to_vol_op") + + # custom inference op + # output_labels specifies which of the organ segmentations are desired in the DICOM SEG, DICOM SC, and DICOM SR outputs + # 1 = airway + output_labels = [1] + nnunet_seg_op = NNUnetSegOperator( + self, + app_context=app_context, + model_path=model_path, + output_folder=app_output_path, + output_labels=output_labels, + name="nnunet_seg_op", + ) + + # create DICOM Seg writer providing the required segment description for each segment with + # the actual algorithm and the pertinent organ/tissue; the segment_label, algorithm_name, + # and algorithm_version are of DICOM VR LO type, limited to 64 chars + # https://dicom.nema.org/medical/dicom/current/output/chtml/part05/sect_6.2.html + + # general algorithm information + _algorithm_name = "UTE_nnunet_airway" + _algorithm_family = codes.DCM.ArtificialIntelligence + _algorithm_version = "1.0.0" + + segment_descriptions = [ + SegmentDescription( + segment_label="Airway", + segmented_property_category=codes.SCT.BodyStructure, + segmented_property_type=codes.SCT.TracheaAndBronchus, + algorithm_name=_algorithm_name, + algorithm_family=_algorithm_family, + algorithm_version=_algorithm_version, + ), + ] + + # model info is algorithm information + my_model_info = ModelInfo( + creator="UTE", # institution name + name=_algorithm_name, # algorithm name + version=_algorithm_version, # algorithm version + uid="1.0.0", # MAP version + ) + + # equipment info is MONAI Deploy App SDK information + my_equipment = EquipmentInfo( + manufacturer="The MONAI Consortium", + manufacturer_model="MONAI Deploy App SDK", + software_version_number="3.0.0", # MONAI Deploy App SDK version + ) + + # custom tags - add AlgorithmName for monitoring purposes + custom_tags_seg = { + "SeriesDescription": "AI Generated DICOM SEG; Not for Clinical Use.", + "AlgorithmName": f"{my_model_info.name}:{my_model_info.version}:{my_model_info.uid}", + } + custom_tags_sr = { + "SeriesDescription": "AI Generated DICOM SR; Not for Clinical Use.", + "AlgorithmName": f"{my_model_info.name}:{my_model_info.version}:{my_model_info.uid}", + } + custom_tags_sc = { + "SeriesDescription": "AI Generated DICOM Secondary Capture; Not for Clinical Use.", + "AlgorithmName": f"{my_model_info.name}:{my_model_info.version}:{my_model_info.uid}", + } + + # DICOM SEG Writer op writes content from segment_descriptions to output DICOM images as DICOM tags + dicom_seg_writer = DICOMSegmentationWriterOperator( + self, + segment_descriptions=segment_descriptions, + model_info=my_model_info, + custom_tags=custom_tags_seg, + # store DICOM SEG in SEG subdirectory; necessary for routing in CCHMC MDE workflow definition + output_folder=app_output_path / "SEG", + # omit_empty_frames is a default parameteter (type bool) of DICOMSegmentationWriterOperator + # dictates whether or not to omit frames that contain no segmented pixels from the output segmentation + # default value is True; changed to False to ensure input and output DICOM series #'s match + omit_empty_frames=False, + name="dicom_seg_writer", + ) + + # DICOM SR Writer op + dicom_sr_writer = DICOMTextSRWriterOperator( + self, + # copy_tags is a default parameteter (type bool) of DICOMTextSRWriterOperator; default value is True + # dictates whether or not to copy DICOM attributes from the selected DICOM series + # changed to True to copy DICOM attributes so DICOM SR has same Study UID + copy_tags=True, + model_info=my_model_info, + equipment_info=my_equipment, + custom_tags=custom_tags_sr, + # store DICOM SR in SR subdirectory; necessary for routing in CCHMC MDE workflow definition + output_folder=app_output_path / "SR", + ) + + # custom DICOM SC Writer op + dicom_sc_writer = DICOMSCWriterOperator( + self, + model_info=my_model_info, + equipment_info=my_equipment, + custom_tags=custom_tags_sc, + # store DICOM SC in SC subdirectory; necessary for routing in CCHMC MDE workflow definition + output_folder=app_output_path / "SC", + ) + + # create the processing pipeline, by specifying the source and destination operators, and + # ensuring the output from the former matches the input of the latter, in both name and type + # instantiate and connect operators using self.add_flow(); specify current operator, next operator, and tuple to match I/O + self.add_flow(study_loader_op, series_selector_op, {("dicom_study_list", "dicom_study_list")}) + self.add_flow( + series_selector_op, series_to_vol_op, {("study_selected_series_list", "study_selected_series_list")} + ) + self.add_flow(series_to_vol_op, nnunet_seg_op, {("image", "image")}) + + # note below the dicom_seg_writer, dicom_sr_writer, and dicom_sc_writer each require two inputs, + # each coming from a source operator + + # DICOM SEG + self.add_flow( + series_selector_op, dicom_seg_writer, {("study_selected_series_list", "study_selected_series_list")} + ) + self.add_flow(nnunet_seg_op, dicom_seg_writer, {("seg_image", "seg_image")}) + + # DICOM SR + self.add_flow( + series_selector_op, dicom_sr_writer, {("study_selected_series_list", "study_selected_series_list")} + ) + self.add_flow(nnunet_seg_op, dicom_sr_writer, {("result_text", "text")}) + + # DICOM SC + self.add_flow( + series_selector_op, dicom_sc_writer, {("study_selected_series_list", "study_selected_series_list")} + ) + self.add_flow(nnunet_seg_op, dicom_sc_writer, {("dicom_sc_dir", "dicom_sc_dir")}) + + logging.info(f"End {self.compose.__name__}") + + +# series selection rule in JSON, which selects for Axial T2 MR series: +# StudyDescription (Type 3): matches any value +# Modality (Type 1): matches "MR" value (case-insensitive); filters out non-MR modalities +# ImageOrientationPatient (Type 1): matches Axial orientations; filters out Sagittal and Coronal orientations +# MRAcquisitionType (Type 2): matches "2D" value (case-insensitive); filters out 3D acquisitions +# RepetitionTime (Type 2C): matches values greater than 1200; filters for T2 acquisitions +# EchoTime (Type 2): matches values bewtween 75 and 100 (inclusive); filters out SSH series +# EchoTrainLength (Type 2): matches values less than 50; filters out SSH series +# FlipAngle (Type 3): matches values greater than 75; filters for T2 acquisitions +# all valid series will be selected; downstream operators only perform inference and write outputs for 1st selected series +# please see more detail in DICOMSeriesSelectorOperator + +Sample_Rules_Text = """ +""" + +# if executing application code using python interpreter: +if __name__ == "__main__": + # creates the app and test it standalone; when running is this mode, please note the following: + # -m , for model file path + # -i , for input DICOM MR series folder + # -o , for the output folder, default $PWD/output + # e.g. + # monai-deploy exec app.py -i input -m model/ls_swinunetr_FT.pt + # + logging.info(f"Begin {__name__}") + UTEAirwayNNUnetApp().run() + logging.info(f"End {__name__}") diff --git a/examples/apps/cchmc_nnunet_fifteen_ckpt_app/my_app/app.yaml b/examples/apps/cchmc_nnunet_fifteen_ckpt_app/my_app/app.yaml index bd94c326..7833d614 100644 --- a/examples/apps/cchmc_nnunet_fifteen_ckpt_app/my_app/app.yaml +++ b/examples/apps/cchmc_nnunet_fifteen_ckpt_app/my_app/app.yaml @@ -1,34 +1,34 @@ -# Copyright 2021-2025 MONAI Consortium -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. ---- - -# app.yaml is a configuration file that specifies MAP settings -# used by MONAI App SDK to understand how to run our app in a MAP and what resources it needs - -# specifies high-level information about our app -application: - title: MONAI Deploy App Package - CCHMC Pediatric Airway Segmentation using nnUNet - description: This application segments the airway from a MRI scan using a nnUNet model trained - version: 0.0.1 - inputFormats: ["file"] - outputFormats: ["file"] - -# specifies the resources our app needs to run -# per MONAI docs (https://docs.monai.io/projects/monai-deploy-app-sdk/en/latest/developing_with_sdk/executing_packaged_app_locally.html) -# MAR does not validate all of the resource requirements embedded in the MAP to ensure they are met in host system -# e.g, MAR will throw an error if gpu requirement is not met on host system; however, gpuMemory parameter doesn't appear to be validated -resources: - cpu: 4 - gpu: 1 - memory: 4Gi - # during MAP execution, for an input DICOM Series of 72 instances, GPU usage peaks at just under 8100 MiB ~= 8.5 GB ~= 7.9 Gi - gpuMemory: 8Gi +# Copyright 2021-2025 MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +--- + +# app.yaml is a configuration file that specifies MAP settings +# used by MONAI App SDK to understand how to run our app in a MAP and what resources it needs + +# specifies high-level information about our app +application: + title: MONAI Deploy App Package - CCHMC Pediatric Airway Segmentation using nnUNet + description: This application segments the airway from a MRI scan using a nnUNet model trained + version: 0.0.1 + inputFormats: ["file"] + outputFormats: ["file"] + +# specifies the resources our app needs to run +# per MONAI docs (https://docs.monai.io/projects/monai-deploy-app-sdk/en/latest/developing_with_sdk/executing_packaged_app_locally.html) +# MAR does not validate all of the resource requirements embedded in the MAP to ensure they are met in host system +# e.g, MAR will throw an error if gpu requirement is not met on host system; however, gpuMemory parameter doesn't appear to be validated +resources: + cpu: 4 + gpu: 1 + memory: 4Gi + # during MAP execution, for an input DICOM Series of 72 instances, GPU usage peaks at just under 8100 MiB ~= 8.5 GB ~= 7.9 Gi + gpuMemory: 8Gi \ No newline at end of file diff --git a/examples/apps/cchmc_nnunet_fifteen_ckpt_app/my_app/dicom_sc_writer_operator.py b/examples/apps/cchmc_nnunet_fifteen_ckpt_app/my_app/dicom_sc_writer_operator.py index ce37f327..9479485e 100644 --- a/examples/apps/cchmc_nnunet_fifteen_ckpt_app/my_app/dicom_sc_writer_operator.py +++ b/examples/apps/cchmc_nnunet_fifteen_ckpt_app/my_app/dicom_sc_writer_operator.py @@ -1,253 +1,253 @@ -# Copyright 2021-2025 MONAI Consortium -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import logging -import os -from pathlib import Path -from typing import Dict, Optional, Union - -import pydicom - -from monai.deploy.core import Fragment, Operator, OperatorSpec -from monai.deploy.core.domain.dicom_series import DICOMSeries -from monai.deploy.core.domain.dicom_series_selection import StudySelectedSeries -from monai.deploy.operators.dicom_utils import EquipmentInfo, ModelInfo, write_common_modules -from monai.deploy.utils.importutil import optional_import -from monai.deploy.utils.version import get_sdk_semver - -dcmread, _ = optional_import("pydicom", name="dcmread") -dcmwrite, _ = optional_import("pydicom.filewriter", name="dcmwrite") -generate_uid, _ = optional_import("pydicom.uid", name="generate_uid") -ImplicitVRLittleEndian, _ = optional_import("pydicom.uid", name="ImplicitVRLittleEndian") -Dataset, _ = optional_import("pydicom.dataset", name="Dataset") -FileDataset, _ = optional_import("pydicom.dataset", name="FileDataset") -Sequence, _ = optional_import("pydicom.sequence", name="Sequence") - - -class DICOMSCWriterOperator(Operator): - """Class to write a new DICOM Secondary Capture (DICOM SC) instance with source DICOM Series metadata included. - - Named inputs: - dicom_sc_dir: file path of temporary DICOM SC (w/o source DICOM Series metadata). - study_selected_series_list: DICOM Series for copying metadata from. - - Named output: - None. - - File output: - New, updated DICOM SC file (with source DICOM Series metadata) in the provided output folder. - """ - - # file extension for the generated DICOM Part 10 file - DCM_EXTENSION = ".dcm" - # the default output folder for saving the generated DICOM instance file - # DEFAULT_OUTPUT_FOLDER = Path(os.path.join(os.path.dirname(__file__))) / "output" - DEFAULT_OUTPUT_FOLDER = Path.cwd() / "output" - - def __init__( - self, - fragment: Fragment, - *args, - output_folder: Union[str, Path], - model_info: ModelInfo, - equipment_info: Optional[EquipmentInfo] = None, - custom_tags: Optional[Dict[str, str]] = None, - **kwargs, - ): - """Class to write a new DICOM Secondary Capture (DICOM SC) instance with source DICOM Series metadata. - - Args: - output_folder (str or Path): The folder for saving the generated DICOM SC instance file. - model_info (ModelInfo): Object encapsulating model creator, name, version and UID. - equipment_info (EquipmentInfo, optional): Object encapsulating info for DICOM Equipment Module. - Defaults to None. - custom_tags (Dict[str, str], optional): Dictionary for setting custom DICOM tags using Keywords and str values only. - Defaults to None. - - Raises: - ValueError: If result cannot be found either in memory or from file. - """ - - self._logger = logging.getLogger(f"{__name__}.{type(self).__name__}") - - # need to init the output folder until the execution context supports dynamic FS path - # not trying to create the folder to avoid exception on init - self.output_folder = Path(output_folder) if output_folder else DICOMSCWriterOperator.DEFAULT_OUTPUT_FOLDER - self.input_name_sc_dir = "dicom_sc_dir" - self.input_name_study_series = "study_selected_series_list" - - # for copying DICOM attributes from a provided DICOMSeries - # required input for write_common_modules; will always be True for this implementation - self.copy_tags = True - - self.model_info = model_info if model_info else ModelInfo() - self.equipment_info = equipment_info if equipment_info else EquipmentInfo() - self.custom_tags = custom_tags - - # set own Modality and SOP Class UID - # Standard SOP Classes: https://dicom.nema.org/dicom/2013/output/chtml/part04/sect_B.5.html - # Modality, e.g., - # "OT" for PDF - # "SR" for Structured Report. - # Media Storage SOP Class UID, e.g., - # "1.2.840.10008.5.1.4.1.1.88.11" for Basic Text SR Storage - # "1.2.840.10008.5.1.4.1.1.104.1" for Encapsulated PDF Storage, - # "1.2.840.10008.5.1.4.1.1.88.34" for Comprehensive 3D SR IOD - # "1.2.840.10008.5.1.4.1.1.66.4" for Segmentation Storage - self.modality_type = "OT" # OT Modality for Secondary Capture - self.sop_class_uid = ( - "1.2.840.10008.5.1.4.1.1.7.4" # SOP Class UID for Multi-frame True Color Secondary Capture Image Storage - ) - # custom OverlayImageLabeld post-processing transform creates an RBG overlay - - # equipment version may be different from contributing equipment version - try: - self.software_version_number = get_sdk_semver() # SDK Version - except Exception: - self.software_version_number = "" - self.operators_name = f"AI Algorithm {self.model_info.name}" - - super().__init__(fragment, *args, **kwargs) - - def setup(self, spec: OperatorSpec): - """Set up the named input(s), and output(s) if applicable. - - This operator does not have an output for the next operator, rather file output only. - - Args: - spec (OperatorSpec): The Operator specification for inputs and outputs etc. - """ - - spec.input(self.input_name_sc_dir) - spec.input(self.input_name_study_series) - - def compute(self, op_input, op_output, context): - """Performs computation for this operator and handles I/O. - - For now, only a single result content is supported, which could be in memory or an accessible file. - The DICOM Series used during inference is required (and copy_tags is hardcoded to True). - - When there are multiple selected series in the input, the first series' containing study will - be used for retrieving DICOM Study module attributes, e.g. StudyInstanceUID. - - Raises: - NotADirectoryError: When temporary DICOM SC path is not a directory. - FileNotFoundError: When result object not in the input, and result file not found either. - ValueError: Content object and file path not in the inputs, or no DICOM series provided. - IOError: If the input content is blank. - """ - - # receive the temporary DICOM SC file path and study selected series list - dicom_sc_dir = Path(op_input.receive(self.input_name_sc_dir)) - if not dicom_sc_dir: - raise IOError("Temporary DICOM SC path is read but blank.") - if not dicom_sc_dir.is_dir(): - raise NotADirectoryError(f"Provided temporary DICOM SC path is not a directory: {dicom_sc_dir}") - self._logger.info(f"Received temporary DICOM SC path: {dicom_sc_dir}") - - study_selected_series_list = op_input.receive(self.input_name_study_series) - if not study_selected_series_list or len(study_selected_series_list) < 1: - raise ValueError("Missing input, list of 'StudySelectedSeries'.") - - # retrieve the DICOM Series used during inference in order to grab appropriate study/series level tags - # this will be the 1st Series in study_selected_series_list - dicom_series = None - for study_selected_series in study_selected_series_list: - if not isinstance(study_selected_series, StudySelectedSeries): - raise ValueError(f"Element in input is not expected type, {StudySelectedSeries}.") - selected_series = study_selected_series.selected_series[0] - dicom_series = selected_series.series - break - - # log basic DICOM metadata for the retrieved DICOM Series - self._logger.debug(f"Dicom Series: {dicom_series}") - - # the output folder should come from the execution context when it is supported - self.output_folder.mkdir(parents=True, exist_ok=True) - - # write the new DICOM SC instance - self.write(dicom_sc_dir, dicom_series, self.output_folder) - - def write(self, dicom_sc_dir, dicom_series: DICOMSeries, output_dir: Path): - """Writes a new, updated DICOM SC instance and deletes the temporary DICOM SC instance. - The new, updated DICOM SC instance is the temporary DICOM SC instance with source - DICOM Series metadata copied. - - Args: - dicom_sc_dir: temporary DICOM SC file path. - dicom_series (DICOMSeries): DICOMSeries object encapsulating the original series. - - Returns: - None - - File output: - New, updated DICOM SC file (with source DICOM Series metadata) in the provided output folder. - """ - - if not isinstance(output_dir, Path): - raise ValueError("output_dir is not a valid Path.") - - output_dir.mkdir(parents=True, exist_ok=True) # just in case - - # find the temporary DICOM SC file in the directory; there should only be one .dcm file present - dicom_files = list(dicom_sc_dir.glob("*.dcm")) - dicom_sc_file = dicom_files[0] - - # load the temporary DICOM SC file using pydicom - dicom_sc_dataset = pydicom.dcmread(dicom_sc_file) - self._logger.info(f"Loaded temporary DICOM SC file: {dicom_sc_file}") - - # use write_common_modules to copy metadata from dicom_series - # this will copy metadata and return an updated Dataset - ds = write_common_modules( - dicom_series, - self.copy_tags, # always True for this implementation - self.modality_type, - self.sop_class_uid, - self.model_info, - self.equipment_info, - ) - - # Secondary Capture specific tags - ds.ImageType = ["DERIVED", "SECONDARY"] - - # for now, only allow str Keywords and str value - if self.custom_tags: - for k, v in self.custom_tags.items(): - if isinstance(k, str) and isinstance(v, str): - try: - ds.update({k: v}) - except Exception as ex: - # best effort for now - logging.warning(f"Tag {k} was not written, due to {ex}") - - # merge the copied metadata into the loaded temporary DICOM SC file (dicom_sc_dataset) - for tag, value in ds.items(): - dicom_sc_dataset[tag] = value - - # save the updated DICOM SC file to the output folder - # instance file name is the same as the new SOP instance UID - output_file_path = self.output_folder.joinpath( - f"{dicom_sc_dataset.SOPInstanceUID}{DICOMSCWriterOperator.DCM_EXTENSION}" - ) - dicom_sc_dataset.save_as(output_file_path) - self._logger.info(f"Saved updated DICOM SC file at: {output_file_path}") - - # remove the temporary DICOM SC file - os.remove(dicom_sc_file) - self._logger.info(f"Removed temporary DICOM SC file: {dicom_sc_file}") - - # check if the temp directory is empty, then delete it - if not any(dicom_sc_dir.iterdir()): - os.rmdir(dicom_sc_dir) - self._logger.info(f"Removed temporary directory: {dicom_sc_dir}") - else: - self._logger.warning(f"Temporary directory {dicom_sc_dir} is not empty, skipping removal.") +# Copyright 2021-2025 MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +import os +from pathlib import Path +from typing import Dict, Optional, Union + +import pydicom + +from monai.deploy.core import Fragment, Operator, OperatorSpec +from monai.deploy.core.domain.dicom_series import DICOMSeries +from monai.deploy.core.domain.dicom_series_selection import StudySelectedSeries +from monai.deploy.operators.dicom_utils import EquipmentInfo, ModelInfo, write_common_modules +from monai.deploy.utils.importutil import optional_import +from monai.deploy.utils.version import get_sdk_semver + +dcmread, _ = optional_import("pydicom", name="dcmread") +dcmwrite, _ = optional_import("pydicom.filewriter", name="dcmwrite") +generate_uid, _ = optional_import("pydicom.uid", name="generate_uid") +ImplicitVRLittleEndian, _ = optional_import("pydicom.uid", name="ImplicitVRLittleEndian") +Dataset, _ = optional_import("pydicom.dataset", name="Dataset") +FileDataset, _ = optional_import("pydicom.dataset", name="FileDataset") +Sequence, _ = optional_import("pydicom.sequence", name="Sequence") + + +class DICOMSCWriterOperator(Operator): + """Class to write a new DICOM Secondary Capture (DICOM SC) instance with source DICOM Series metadata included. + + Named inputs: + dicom_sc_dir: file path of temporary DICOM SC (w/o source DICOM Series metadata). + study_selected_series_list: DICOM Series for copying metadata from. + + Named output: + None. + + File output: + New, updated DICOM SC file (with source DICOM Series metadata) in the provided output folder. + """ + + # file extension for the generated DICOM Part 10 file + DCM_EXTENSION = ".dcm" + # the default output folder for saving the generated DICOM instance file + # DEFAULT_OUTPUT_FOLDER = Path(os.path.join(os.path.dirname(__file__))) / "output" + DEFAULT_OUTPUT_FOLDER = Path.cwd() / "output" + + def __init__( + self, + fragment: Fragment, + *args, + output_folder: Union[str, Path], + model_info: ModelInfo, + equipment_info: Optional[EquipmentInfo] = None, + custom_tags: Optional[Dict[str, str]] = None, + **kwargs, + ): + """Class to write a new DICOM Secondary Capture (DICOM SC) instance with source DICOM Series metadata. + + Args: + output_folder (str or Path): The folder for saving the generated DICOM SC instance file. + model_info (ModelInfo): Object encapsulating model creator, name, version and UID. + equipment_info (EquipmentInfo, optional): Object encapsulating info for DICOM Equipment Module. + Defaults to None. + custom_tags (Dict[str, str], optional): Dictionary for setting custom DICOM tags using Keywords and str values only. + Defaults to None. + + Raises: + ValueError: If result cannot be found either in memory or from file. + """ + + self._logger = logging.getLogger(f"{__name__}.{type(self).__name__}") + + # need to init the output folder until the execution context supports dynamic FS path + # not trying to create the folder to avoid exception on init + self.output_folder = Path(output_folder) if output_folder else DICOMSCWriterOperator.DEFAULT_OUTPUT_FOLDER + self.input_name_sc_dir = "dicom_sc_dir" + self.input_name_study_series = "study_selected_series_list" + + # for copying DICOM attributes from a provided DICOMSeries + # required input for write_common_modules; will always be True for this implementation + self.copy_tags = True + + self.model_info = model_info if model_info else ModelInfo() + self.equipment_info = equipment_info if equipment_info else EquipmentInfo() + self.custom_tags = custom_tags + + # set own Modality and SOP Class UID + # Standard SOP Classes: https://dicom.nema.org/dicom/2013/output/chtml/part04/sect_B.5.html + # Modality, e.g., + # "OT" for PDF + # "SR" for Structured Report. + # Media Storage SOP Class UID, e.g., + # "1.2.840.10008.5.1.4.1.1.88.11" for Basic Text SR Storage + # "1.2.840.10008.5.1.4.1.1.104.1" for Encapsulated PDF Storage, + # "1.2.840.10008.5.1.4.1.1.88.34" for Comprehensive 3D SR IOD + # "1.2.840.10008.5.1.4.1.1.66.4" for Segmentation Storage + self.modality_type = "OT" # OT Modality for Secondary Capture + self.sop_class_uid = ( + "1.2.840.10008.5.1.4.1.1.7.4" # SOP Class UID for Multi-frame True Color Secondary Capture Image Storage + ) + # custom OverlayImageLabeld post-processing transform creates an RBG overlay + + # equipment version may be different from contributing equipment version + try: + self.software_version_number = get_sdk_semver() # SDK Version + except Exception: + self.software_version_number = "" + self.operators_name = f"AI Algorithm {self.model_info.name}" + + super().__init__(fragment, *args, **kwargs) + + def setup(self, spec: OperatorSpec): + """Set up the named input(s), and output(s) if applicable. + + This operator does not have an output for the next operator, rather file output only. + + Args: + spec (OperatorSpec): The Operator specification for inputs and outputs etc. + """ + + spec.input(self.input_name_sc_dir) + spec.input(self.input_name_study_series) + + def compute(self, op_input, op_output, context): + """Performs computation for this operator and handles I/O. + + For now, only a single result content is supported, which could be in memory or an accessible file. + The DICOM Series used during inference is required (and copy_tags is hardcoded to True). + + When there are multiple selected series in the input, the first series' containing study will + be used for retrieving DICOM Study module attributes, e.g. StudyInstanceUID. + + Raises: + NotADirectoryError: When temporary DICOM SC path is not a directory. + FileNotFoundError: When result object not in the input, and result file not found either. + ValueError: Content object and file path not in the inputs, or no DICOM series provided. + IOError: If the input content is blank. + """ + + # receive the temporary DICOM SC file path and study selected series list + dicom_sc_dir = Path(op_input.receive(self.input_name_sc_dir)) + if not dicom_sc_dir: + raise IOError("Temporary DICOM SC path is read but blank.") + if not dicom_sc_dir.is_dir(): + raise NotADirectoryError(f"Provided temporary DICOM SC path is not a directory: {dicom_sc_dir}") + self._logger.info(f"Received temporary DICOM SC path: {dicom_sc_dir}") + + study_selected_series_list = op_input.receive(self.input_name_study_series) + if not study_selected_series_list or len(study_selected_series_list) < 1: + raise ValueError("Missing input, list of 'StudySelectedSeries'.") + + # retrieve the DICOM Series used during inference in order to grab appropriate study/series level tags + # this will be the 1st Series in study_selected_series_list + dicom_series = None + for study_selected_series in study_selected_series_list: + if not isinstance(study_selected_series, StudySelectedSeries): + raise ValueError(f"Element in input is not expected type, {StudySelectedSeries}.") + selected_series = study_selected_series.selected_series[0] + dicom_series = selected_series.series + break + + # log basic DICOM metadata for the retrieved DICOM Series + self._logger.debug(f"Dicom Series: {dicom_series}") + + # the output folder should come from the execution context when it is supported + self.output_folder.mkdir(parents=True, exist_ok=True) + + # write the new DICOM SC instance + self.write(dicom_sc_dir, dicom_series, self.output_folder) + + def write(self, dicom_sc_dir, dicom_series: DICOMSeries, output_dir: Path): + """Writes a new, updated DICOM SC instance and deletes the temporary DICOM SC instance. + The new, updated DICOM SC instance is the temporary DICOM SC instance with source + DICOM Series metadata copied. + + Args: + dicom_sc_dir: temporary DICOM SC file path. + dicom_series (DICOMSeries): DICOMSeries object encapsulating the original series. + + Returns: + None + + File output: + New, updated DICOM SC file (with source DICOM Series metadata) in the provided output folder. + """ + + if not isinstance(output_dir, Path): + raise ValueError("output_dir is not a valid Path.") + + output_dir.mkdir(parents=True, exist_ok=True) # just in case + + # find the temporary DICOM SC file in the directory; there should only be one .dcm file present + dicom_files = list(dicom_sc_dir.glob("*.dcm")) + dicom_sc_file = dicom_files[0] + + # load the temporary DICOM SC file using pydicom + dicom_sc_dataset = pydicom.dcmread(dicom_sc_file) + self._logger.info(f"Loaded temporary DICOM SC file: {dicom_sc_file}") + + # use write_common_modules to copy metadata from dicom_series + # this will copy metadata and return an updated Dataset + ds = write_common_modules( + dicom_series, + self.copy_tags, # always True for this implementation + self.modality_type, + self.sop_class_uid, + self.model_info, + self.equipment_info, + ) + + # Secondary Capture specific tags + ds.ImageType = ["DERIVED", "SECONDARY"] + + # for now, only allow str Keywords and str value + if self.custom_tags: + for k, v in self.custom_tags.items(): + if isinstance(k, str) and isinstance(v, str): + try: + ds.update({k: v}) + except Exception as ex: + # best effort for now + logging.warning(f"Tag {k} was not written, due to {ex}") + + # merge the copied metadata into the loaded temporary DICOM SC file (dicom_sc_dataset) + for tag, value in ds.items(): + dicom_sc_dataset[tag] = value + + # save the updated DICOM SC file to the output folder + # instance file name is the same as the new SOP instance UID + output_file_path = self.output_folder.joinpath( + f"{dicom_sc_dataset.SOPInstanceUID}{DICOMSCWriterOperator.DCM_EXTENSION}" + ) + dicom_sc_dataset.save_as(output_file_path) + self._logger.info(f"Saved updated DICOM SC file at: {output_file_path}") + + # remove the temporary DICOM SC file + os.remove(dicom_sc_file) + self._logger.info(f"Removed temporary DICOM SC file: {dicom_sc_file}") + + # check if the temp directory is empty, then delete it + if not any(dicom_sc_dir.iterdir()): + os.rmdir(dicom_sc_dir) + self._logger.info(f"Removed temporary directory: {dicom_sc_dir}") + else: + self._logger.warning(f"Temporary directory {dicom_sc_dir} is not empty, skipping removal.") diff --git a/examples/apps/cchmc_nnunet_fifteen_ckpt_app/my_app/dicom_series_selector_operator.py b/examples/apps/cchmc_nnunet_fifteen_ckpt_app/my_app/dicom_series_selector_operator.py index ced61ea9..9249b4d9 100644 --- a/examples/apps/cchmc_nnunet_fifteen_ckpt_app/my_app/dicom_series_selector_operator.py +++ b/examples/apps/cchmc_nnunet_fifteen_ckpt_app/my_app/dicom_series_selector_operator.py @@ -1,629 +1,629 @@ -# Copyright 2021-2025 MONAI Consortium -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import logging -import numbers -import re -from json import loads as json_loads -from typing import List - -import numpy as np - -from monai.deploy.core import ConditionType, Fragment, Operator, OperatorSpec -from monai.deploy.core.domain.dicom_series import DICOMSeries -from monai.deploy.core.domain.dicom_series_selection import SelectedSeries, StudySelectedSeries -from monai.deploy.core.domain.dicom_study import DICOMStudy - - -class DICOMSeriesSelectorOperator(Operator): - """This operator selects a list of DICOM Series in a DICOM Study for a given set of selection rules. - - Named input: - dicom_study_list: A list of DICOMStudy objects. - - Named output: - study_selected_series_list: A list of StudySelectedSeries objects. Downstream receiver optional. - - This class can be considered a base class, and a derived class can override the 'filter' function with - custom logic. - - In its default implementation, this class - 1. selects a series or all matched series within the scope of a study in a list of studies - 2. uses rules defined in JSON string, see below for details - 3. supports DICOM Study and Series module attribute matching - 4. supports multiple named selections, in the scope of each DICOM study - 5. outputs a list of StudySelectedSeries objects, as well as a flat list of SelectedSeries (to be deprecated) - - The selection rules are defined in JSON, - 1. attribute "selections" value is a list of selections - 2. each selection has a "name", and its "conditions" value is a list of matching criteria - 3. each condition uses the implicit equal operator; in addition, the following are supported: - - regex, relational, and range matching for float and int types - - regex matching for str type - - inclusion and exclusion matching for set type - - image orientation check for the ImageOrientationPatient tag - 4. DICOM attribute keywords are used, and only for those defined as DICOMStudy and DICOMSeries properties - - An example selection rules: - { - "selections": [ - { - "name": "CT Series 1", - "conditions": { - "StudyDescription": "(?i)^Spleen", - "Modality": "(?i)CT", - "SeriesDescription": "(?i)^No series description|(.*?)" - } - }, - { - "name": "CT Series 2", - "conditions": { - "Modality": "CT", - "BodyPartExamined": "Abdomen", - "SeriesDescription" : "Not to be matched. For illustration only." - } - }, - { - "name": "CT Series 3", - "conditions": { - "StudyDescription": "(.*?)", - "Modality": "(?i)CT", - "ImageType": ["PRIMARY", "ORIGINAL", "AXIAL"], - "SliceThickness": [3, 5] - } - }, - { - "name": "CT Series 4", - "conditions": { - "StudyDescription": "(.*?)", - "Modality": "(?i)CT", - "ImageOrientationPatient": "Axial", - "SliceThickness": [2, ">"] - } - }, - { - "name": "CT Series 5", - "conditions": { - "StudyDescription": "(.*?)", - "Modality": "(?i)CT", - "ImageType": ["PRIMARY", "!SECONDARY"] - } - } - ] - } - """ - - def __init__( - self, - fragment: Fragment, - *args, - rules: str = "", - all_matched: bool = False, - sort_by_sop_instance_count: bool = False, - **kwargs, - ) -> None: - """Instantiate an instance. - - Args: - fragment (Fragment): An instance of the Application class which is derived from Fragment. - rules (Text): Selection rules in JSON string. - all_matched (bool): Gets all matched series in a study. Defaults to False for first match only. - sort_by_sop_instance_count (bool): If all_matched = True and multiple series are matched, sorts the matched series in - descending SOP instance count (i.e. the first Series in the returned List[StudySelectedSeries] will have the highest # - of DICOM images); Defaults to False for no sorting. - """ - - # Delay loading the rules as JSON string till compute time. - self._rules_json_str = rules if rules and rules.strip() else None - self._all_matched = all_matched # all_matched - self._sort_by_sop_instance_count = sort_by_sop_instance_count # sort_by_sop_instance_count - self.input_name_study_list = "dicom_study_list" - self.output_name_selected_series = "study_selected_series_list" - - super().__init__(fragment, *args, **kwargs) - - def setup(self, spec: OperatorSpec): - spec.input(self.input_name_study_list) - spec.output(self.output_name_selected_series).condition(ConditionType.NONE) # Receiver optional - - # Can use the config file to alter the selection rules per app run - # spec.param("selection_rules") - - def compute(self, op_input, op_output, context): - """Performs computation for this operator.""" - - dicom_study_list = op_input.receive(self.input_name_study_list) - selection_rules = self._load_rules() if self._rules_json_str else None - study_selected_series = self.filter( - selection_rules, dicom_study_list, self._all_matched, self._sort_by_sop_instance_count - ) - - # Log Series Description and Series Instance UID of the first selected DICOM Series (i.e. the one to be used for inference) - if study_selected_series and len(study_selected_series) > 0: - inference_study = study_selected_series[0] - if inference_study.selected_series and len(inference_study.selected_series) > 0: - inference_series = inference_study.selected_series[0].series - logging.info("Series Selection finalized") - logging.info( - f"Series Description of selected DICOM Series for inference: {inference_series.SeriesDescription}" - ) - logging.info( - f"Series Instance UID of selected DICOM Series for inference: {inference_series.SeriesInstanceUID}" - ) - - op_output.emit(study_selected_series, self.output_name_selected_series) - - def filter( - self, selection_rules, dicom_study_list, all_matched: bool = False, sort_by_sop_instance_count: bool = False - ) -> List[StudySelectedSeries]: - """Selects the series with the given matching rules. - - If rules object is None, all series will be returned with series instance UID as the selection name. - - Supported matching logic: - Float + Int: exact matching, relational matching, range matching, and regex matching - String: matches case insensitive, if fails then tries RegEx search - String array (set): inclusive and exclusive (via !) matching as subsets, case insensitive - ImageOrientationPatient tag: image orientation (Axial, Coronal, Sagittal) matching - - Args: - selection_rules (object): JSON object containing the matching rules. - dicom_study_list (list): A list of DICOMStudy objects. - all_matched (bool): Gets all matched series in a study. Defaults to False for first match only. - sort_by_sop_instance_count (bool): If all_matched = True and multiple series are matched, sorts the matched series in - descending SOP instance count (i.e. the first Series in the returned List[StudySelectedSeries] will have the highest # - of DICOM images); Defaults to False for no sorting. - - Returns: - list: A list of objects of type StudySelectedSeries. - - Raises: - ValueError: If the selection_rules object does not contain "selections" attribute. - """ - - if not dicom_study_list or len(dicom_study_list) < 1: - return [] - - if not selection_rules: - # Return all series if no selection rules are supplied - logging.warn("No selection rules given; select all series.") - return self._select_all_series(dicom_study_list) - - selections = selection_rules.get("selections", None) # TODO type is not json now. - # If missing selections in the rules then it is an error. - if not selections: - raise ValueError('Expected "selections" not found in the rules.') - - study_selected_series_list = [] # List of StudySelectedSeries objects - - for study in dicom_study_list: - study_selected_series = StudySelectedSeries(study) - for selection in selections: - # Get the selection name. Blank name will be handled by the SelectedSeries - selection_name = selection.get("name", "").strip() - logging.info(f"Finding series for Selection named: {selection_name}") - - # Skip if no selection conditions are provided. - conditions = selection.get("conditions", None) - if not conditions: - continue - - # Select only the first series that matches the conditions, list of one - series_list = self._select_series(conditions, study, all_matched, sort_by_sop_instance_count) - if series_list and len(series_list) > 0: - for series in series_list: - selected_series = SelectedSeries(selection_name, series, None) # No Image obj yet. - study_selected_series.add_selected_series(selected_series) - - if len(study_selected_series.selected_series) > 0: - study_selected_series_list.append(study_selected_series) - - return study_selected_series_list - - def _load_rules(self): - return json_loads(self._rules_json_str) if self._rules_json_str else None - - def _select_all_series(self, dicom_study_list: List[DICOMStudy]) -> List[StudySelectedSeries]: - """Select all series in studies - - Returns: - list: list of StudySelectedSeries objects - """ - - study_selected_series_list = [] - for study in dicom_study_list: - logging.info(f"Working on study, instance UID: {study.StudyInstanceUID}") - study_selected_series = StudySelectedSeries(study) - for series in study.get_all_series(): - logging.info(f"Working on series, instance UID: {str(series.SeriesInstanceUID)}") - selected_series = SelectedSeries("", series, None) # No selection name or Image obj. - study_selected_series.add_selected_series(selected_series) - study_selected_series_list.append(study_selected_series) - return study_selected_series_list - - def _select_series( - self, attributes: dict, study: DICOMStudy, all_matched=False, sort_by_sop_instance_count=False - ) -> List[DICOMSeries]: - """Finds series whose attributes match the given attributes. - - Args: - attributes (dict): Dictionary of attributes for matching - all_matched (bool): Gets all matched series in a study. Defaults to False for first match only. - sort_by_sop_instance_count (bool): If all_matched = True and multiple series are matched, sorts the matched series in - descending SOP instance count (i.e. the first Series in the returned List[StudySelectedSeries] will have the highest # - of DICOM images); Defaults to False for no sorting. - - Returns: - List of DICOMSeries. At most one element if all_matched is False. - - Raises: - NotImplementedError: If the value_to_match type is not supported for matching or unsupported PatientPosition value. - """ - assert isinstance(attributes, dict), '"attributes" must be a dict.' - - logging.info(f"Searching study, : {study.StudyInstanceUID}\n # of series: {len(study.get_all_series())}") - study_attr = self._get_instance_properties(study) - - found_series = [] - for series in study.get_all_series(): - logging.info(f"Working on series, instance UID: {series.SeriesInstanceUID}") - - # Combine Study and current Series properties for matching - series_attr = self._get_instance_properties(series) - series_attr.update(study_attr) - - matched = True - # Simple matching on attribute value - for key, value_to_match in attributes.items(): - logging.info(f" On attribute: {key!r} to match value: {value_to_match!r}") - # Ignore None - if not value_to_match: - continue - # Try getting the attribute value from Study and current Series prop dict - attr_value = series_attr.get(key, None) - logging.info(f" Series attribute {key} value: {attr_value}") - - # If not found, try the best at the native instance level for string VR - # This is mainly for attributes like ImageType - if not attr_value: - try: - # Can use some enhancements, especially multi-value where VM > 1 - elem = series.get_sop_instances()[0].get_native_sop_instance()[key] - if elem.VM > 1: - attr_value = [elem.repval] # repval: str representation of the element’s value - else: - attr_value = elem.value # element's value - - logging.info(f" Instance level attribute {key} value: {attr_value}") - series_attr.update({key: attr_value}) - except Exception: - logging.info(f" Attribute {key} not at instance level either") - - if not attr_value: - logging.info(f" Missing attribute: {key!r}") - matched = False - # Image orientation check - elif key == "ImageOrientationPatient": - patient_position = series_attr.get("PatientPosition") - if patient_position is None: - raise NotImplementedError( - "PatientPosition tag absent; value required for image orientation calculation" - ) - if patient_position not in ("HFP", "HFS", "HFDL", "HFDR", "FFP", "FFS", "FFDL", "FFDR"): - raise NotImplementedError(f"No support for PatientPosition value {patient_position}") - matched = self._match_image_orientation(value_to_match, attr_value) - elif isinstance(attr_value, (float, int)): - matched = self._match_numeric_condition(value_to_match, attr_value) - elif isinstance(attr_value, str): - matched = attr_value.casefold() == (value_to_match.casefold()) - if not matched: - # For str, also try RegEx search to check for a match anywhere in the string - # unless the user constrains it in the expression. - if re.search(value_to_match, attr_value, re.IGNORECASE): - matched = True - elif isinstance(attr_value, list): - # Assume multi value string attributes - meta_data_list = str(attr_value).lower() - if isinstance(value_to_match, list): - value_set = {str(element).lower() for element in value_to_match} - # split inclusion and exclusion matches using ! indicator - include_terms = {v for v in value_set if not v.startswith("!")} - exclude_terms = {v[1:] for v in value_set if v.startswith("!")} - matched = all(term in meta_data_list for term in include_terms) and all( - term not in meta_data_list for term in exclude_terms - ) - elif isinstance(value_to_match, (str, numbers.Number)): - v = str(value_to_match).lower() - # ! indicates exclusion match - if v.startswith("!"): - matched = v[1:] not in meta_data_list - else: - matched = v in meta_data_list - else: - raise NotImplementedError( - f"No support for matching condition {value_to_match} (type: {type(value_to_match)})" - ) - - if not matched: - logging.info("This series does not match the selection conditions") - break - - if matched: - logging.info(f"Selected Series, UID: {series.SeriesInstanceUID}") - found_series.append(series) - - if not all_matched: - return found_series - - # If sorting indicated and multiple series found, sort series in descending SOP instance count - if sort_by_sop_instance_count and len(found_series) > 1: - logging.info( - "Multiple series matched the selection criteria; choosing series with the highest number of DICOM images." - ) - found_series.sort(key=lambda x: len(x.get_sop_instances()), reverse=True) - - return found_series - - def _match_numeric_condition(self, value_to_match, attr_value): - """ - Helper method to match numeric conditions, supporting relational, inclusive range, regex, and exact match checks. - - Supported formats: - - [val, ">"]: match if attr_value > val - - [val, ">="]: match if attr_value >= val - - [val, "<"]: match if attr_value < val - - [val, "<="]: match if attr_value <= val - - [val, "!="]: match if attr_value != val - - [min_val, max_val]: inclusive range check - - "regex": regular expression match - - number: exact match - - Args: - value_to_match (Union[list, str, int, float]): The condition to match against. - attr_value (Union[int, float]): The attribute value from the series. - - Returns: - bool: True if the attribute value matches the condition, else False. - - Raises: - NotImplementedError: If the value_to_match condition is not supported for numeric matching. - """ - - if isinstance(value_to_match, list): - # Relational operator check: >, >=, <, <=, != - if len(value_to_match) == 2 and isinstance(value_to_match[1], str): - val = float(value_to_match[0]) - op = value_to_match[1] - if op == ">": - return attr_value > val - elif op == ">=": - return attr_value >= val - elif op == "<": - return attr_value < val - elif op == "<=": - return attr_value <= val - elif op == "!=": - return attr_value != val - else: - raise NotImplementedError( - f"Unsupported relational operator {op!r} in numeric condition. Must be one of: '>', '>=', '<', '<=', '!='" - ) - - # Inclusive range check - elif len(value_to_match) == 2 and all(isinstance(v, (int, float)) for v in value_to_match): - return value_to_match[0] <= attr_value <= value_to_match[1] - - else: - raise NotImplementedError(f"No support for numeric matching condition {value_to_match}") - - # Regular expression match - elif isinstance(value_to_match, str): - return bool(re.fullmatch(value_to_match, str(attr_value))) - - # Exact numeric match - elif isinstance(value_to_match, (int, float)): - return value_to_match == attr_value - - else: - raise NotImplementedError(f"No support for numeric matching on this type: {type(value_to_match)}") - - def _match_image_orientation(self, value_to_match, attr_value): - """ - Helper method to calculate and match the image orientation using the ImageOrientationPatient tag. - The following PatientPosition values are supported and have been tested: - - "HFP" - - "HFS" - - "HFDL" - - "HFDR" - - "FFP" - - "FFS" - - "FFDL" - - "FFDR" - - Supported image orientation inputs for matching (case-insensitive): - - "Axial" - - "Coronal" - - "Sagittal" - - Args: - value_to_match (str): The image orientation condition to match against. - attr_value (List[str]): Raw ImageOrientationPatient tag value from the series. - - Returns: - bool: True if the computed orientation matches the expected orientation, else False. - - Raises: - ValueError: If the expected orientation is invalid or the normal vector cannot be computed. - """ - - # Validate image orientation to match input - value_to_match = value_to_match.strip().lower().capitalize() - allowed_orientations = {"Axial", "Coronal", "Sagittal"} - if value_to_match not in allowed_orientations: - raise ValueError(f"Invalid orientation string {value_to_match!r}. Must be one of: {allowed_orientations}") - - # Format ImageOrientationPatient tag value as an array and grab row and column cosines - iop_str = attr_value[0].strip("[]") - iop = [float(x.strip()) for x in iop_str.split(",")] - row_cosines = np.array(iop[:3], dtype=np.float64) - col_cosines = np.array(iop[3:], dtype=np.float64) - - # Validate DICOM constraints (normal row and column cosines + should be orthogonal) - # Throw warnings if tolerance exceeded - tolerance = 1e-4 - row_norm = np.linalg.norm(row_cosines) - col_norm = np.linalg.norm(col_cosines) - dot_product = np.dot(row_cosines, col_cosines) - - if abs(row_norm - 1.0) > tolerance: - logging.warn(f"Row direction cosine normal is {row_norm}, deviates from 1 by more than {tolerance}") - if abs(col_norm - 1.0) > tolerance: - logging.warn(f"Column direction cosine normal is {col_norm}, deviates from 1 by more than {tolerance}") - if abs(dot_product) > tolerance: - logging.warn(f"Row and Column cosines are not orthogonal: dot product = {dot_product}") - - # Normalize row and column vectors - row_cosines /= np.linalg.norm(row_cosines) - col_cosines /= np.linalg.norm(col_cosines) - - # Compute and validate slice normal - normal = np.cross(row_cosines, col_cosines) - if np.linalg.norm(normal) == 0: - raise ValueError("Invalid normal vector computed from IOP") - - # Normalize the slice normal - normal /= np.linalg.norm(normal) - - # Identify the dominant image orientation - axis_labels = ["Sagittal", "Coronal", "Axial"] - major_axis = np.argmax(np.abs(normal)) - computed_orientation = axis_labels[major_axis] - - logging.info(f" Computed orientation from ImageOrientationPatient value: {computed_orientation}") - - return bool(computed_orientation == value_to_match) - - @staticmethod - def _get_instance_properties(obj: object): - if not obj: - return {} - else: - return {x: getattr(obj, x, None) for x in type(obj).__dict__ if isinstance(type(obj).__dict__[x], property)} - - -# Module functions -# Helper function to get console output of the selection content when testing the script -def _print_instance_properties(obj: object, pre_fix: str = "", print_val=True): - print(f"{pre_fix}Instance of {type(obj)}") - for attribute in [x for x in type(obj).__dict__ if isinstance(type(obj).__dict__[x], property)]: - attr_val = getattr(obj, attribute, None) - print(f"{pre_fix} {attribute}: {type(attr_val)} {attr_val if print_val else ''}") - - -def test(): - from pathlib import Path - - from monai.deploy.operators.dicom_data_loader_operator import DICOMDataLoaderOperator - - current_file_dir = Path(__file__).parent.resolve() - data_path = current_file_dir.joinpath("../../../inputs/spleen_ct/dcm").absolute() - - fragment = Fragment() - loader = DICOMDataLoaderOperator(fragment, name="loader_op") - selector = DICOMSeriesSelectorOperator(fragment, name="selector_op") - study_list = loader.load_data_to_studies(data_path) - sample_selection_rule = json_loads(Sample_Rules_Text) - print(f"Selection rules in JSON:\n{sample_selection_rule}") - study_selected_series_list = selector.filter(sample_selection_rule, study_list) - - for sss_obj in study_selected_series_list: - _print_instance_properties(sss_obj, pre_fix="", print_val=False) - study = sss_obj.study - pre_fix = " " - print(f"{pre_fix}==== Details of the study ====") - _print_instance_properties(study, pre_fix, print_val=False) - print(f"{pre_fix}==============================") - - # The following commented code block accesses and prints the flat list of all selected series. - # for ss_obj in sss_obj.selected_series: - # pre_fix = " " - # _print_instance_properties(ss_obj, pre_fix, print_val=False) - # pre_fix = " " - # print(f"{pre_fix}==== Details of the series ====") - # _print_instance_properties(ss_obj, pre_fix) - # print(f"{pre_fix}===============================") - - # The following block uses hierarchical grouping by selection name, and prints the list of series for each. - for selection_name, ss_list in sss_obj.series_by_selection_name.items(): - pre_fix = " " - print(f"{pre_fix}Selection name: {selection_name}") - for ss_obj in ss_list: - pre_fix = " " - _print_instance_properties(ss_obj, pre_fix, print_val=False) - print(f"{pre_fix}==== Details of the series ====") - _print_instance_properties(ss_obj, pre_fix) - print(f"{pre_fix}===============================") - - print(f" A total of {len(sss_obj.selected_series)} series selected for study {study.StudyInstanceUID}") - - -# Sample rule used for testing -Sample_Rules_Text = """ -{ - "selections": [ - { - "name": "CT Series 1", - "conditions": { - "StudyDescription": "(?i)^Spleen", - "Modality": "(?i)CT", - "SeriesDescription": "(?i)^No series description|(.*?)" - } - }, - { - "name": "CT Series 2", - "conditions": { - "Modality": "CT", - "BodyPartExamined": "Abdomen", - "SeriesDescription" : "Not to be matched" - } - }, - { - "name": "CT Series 3", - "conditions": { - "StudyDescription": "(.*?)", - "Modality": "(?i)CT", - "ImageType": ["PRIMARY", "ORIGINAL", "AXIAL"], - "SliceThickness": [3, 5] - } - }, - { - "name": "CT Series 4", - "conditions": { - "StudyDescription": "(.*?)", - "Modality": "(?i)MR", - "ImageOrientationPatient": "Axial", - "SliceThickness": [2, ">"] - } - }, - { - "name": "CT Series 5", - "conditions": { - "StudyDescription": "(.*?)", - "Modality": "(?i)CT", - "ImageType": ["PRIMARY", "!SECONDARY"] - } - } - ] -} -""" - -if __name__ == "__main__": - test() +# Copyright 2021-2025 MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +import numbers +import re +from json import loads as json_loads +from typing import List + +import numpy as np + +from monai.deploy.core import ConditionType, Fragment, Operator, OperatorSpec +from monai.deploy.core.domain.dicom_series import DICOMSeries +from monai.deploy.core.domain.dicom_series_selection import SelectedSeries, StudySelectedSeries +from monai.deploy.core.domain.dicom_study import DICOMStudy + + +class DICOMSeriesSelectorOperator(Operator): + """This operator selects a list of DICOM Series in a DICOM Study for a given set of selection rules. + + Named input: + dicom_study_list: A list of DICOMStudy objects. + + Named output: + study_selected_series_list: A list of StudySelectedSeries objects. Downstream receiver optional. + + This class can be considered a base class, and a derived class can override the 'filter' function with + custom logic. + + In its default implementation, this class + 1. selects a series or all matched series within the scope of a study in a list of studies + 2. uses rules defined in JSON string, see below for details + 3. supports DICOM Study and Series module attribute matching + 4. supports multiple named selections, in the scope of each DICOM study + 5. outputs a list of StudySelectedSeries objects, as well as a flat list of SelectedSeries (to be deprecated) + + The selection rules are defined in JSON, + 1. attribute "selections" value is a list of selections + 2. each selection has a "name", and its "conditions" value is a list of matching criteria + 3. each condition uses the implicit equal operator; in addition, the following are supported: + - regex, relational, and range matching for float and int types + - regex matching for str type + - inclusion and exclusion matching for set type + - image orientation check for the ImageOrientationPatient tag + 4. DICOM attribute keywords are used, and only for those defined as DICOMStudy and DICOMSeries properties + + An example selection rules: + { + "selections": [ + { + "name": "CT Series 1", + "conditions": { + "StudyDescription": "(?i)^Spleen", + "Modality": "(?i)CT", + "SeriesDescription": "(?i)^No series description|(.*?)" + } + }, + { + "name": "CT Series 2", + "conditions": { + "Modality": "CT", + "BodyPartExamined": "Abdomen", + "SeriesDescription" : "Not to be matched. For illustration only." + } + }, + { + "name": "CT Series 3", + "conditions": { + "StudyDescription": "(.*?)", + "Modality": "(?i)CT", + "ImageType": ["PRIMARY", "ORIGINAL", "AXIAL"], + "SliceThickness": [3, 5] + } + }, + { + "name": "CT Series 4", + "conditions": { + "StudyDescription": "(.*?)", + "Modality": "(?i)CT", + "ImageOrientationPatient": "Axial", + "SliceThickness": [2, ">"] + } + }, + { + "name": "CT Series 5", + "conditions": { + "StudyDescription": "(.*?)", + "Modality": "(?i)CT", + "ImageType": ["PRIMARY", "!SECONDARY"] + } + } + ] + } + """ + + def __init__( + self, + fragment: Fragment, + *args, + rules: str = "", + all_matched: bool = False, + sort_by_sop_instance_count: bool = False, + **kwargs, + ) -> None: + """Instantiate an instance. + + Args: + fragment (Fragment): An instance of the Application class which is derived from Fragment. + rules (Text): Selection rules in JSON string. + all_matched (bool): Gets all matched series in a study. Defaults to False for first match only. + sort_by_sop_instance_count (bool): If all_matched = True and multiple series are matched, sorts the matched series in + descending SOP instance count (i.e. the first Series in the returned List[StudySelectedSeries] will have the highest # + of DICOM images); Defaults to False for no sorting. + """ + + # Delay loading the rules as JSON string till compute time. + self._rules_json_str = rules if rules and rules.strip() else None + self._all_matched = all_matched # all_matched + self._sort_by_sop_instance_count = sort_by_sop_instance_count # sort_by_sop_instance_count + self.input_name_study_list = "dicom_study_list" + self.output_name_selected_series = "study_selected_series_list" + + super().__init__(fragment, *args, **kwargs) + + def setup(self, spec: OperatorSpec): + spec.input(self.input_name_study_list) + spec.output(self.output_name_selected_series).condition(ConditionType.NONE) # Receiver optional + + # Can use the config file to alter the selection rules per app run + # spec.param("selection_rules") + + def compute(self, op_input, op_output, context): + """Performs computation for this operator.""" + + dicom_study_list = op_input.receive(self.input_name_study_list) + selection_rules = self._load_rules() if self._rules_json_str else None + study_selected_series = self.filter( + selection_rules, dicom_study_list, self._all_matched, self._sort_by_sop_instance_count + ) + + # Log Series Description and Series Instance UID of the first selected DICOM Series (i.e. the one to be used for inference) + if study_selected_series and len(study_selected_series) > 0: + inference_study = study_selected_series[0] + if inference_study.selected_series and len(inference_study.selected_series) > 0: + inference_series = inference_study.selected_series[0].series + logging.info("Series Selection finalized") + logging.info( + f"Series Description of selected DICOM Series for inference: {inference_series.SeriesDescription}" + ) + logging.info( + f"Series Instance UID of selected DICOM Series for inference: {inference_series.SeriesInstanceUID}" + ) + + op_output.emit(study_selected_series, self.output_name_selected_series) + + def filter( + self, selection_rules, dicom_study_list, all_matched: bool = False, sort_by_sop_instance_count: bool = False + ) -> List[StudySelectedSeries]: + """Selects the series with the given matching rules. + + If rules object is None, all series will be returned with series instance UID as the selection name. + + Supported matching logic: + Float + Int: exact matching, relational matching, range matching, and regex matching + String: matches case insensitive, if fails then tries RegEx search + String array (set): inclusive and exclusive (via !) matching as subsets, case insensitive + ImageOrientationPatient tag: image orientation (Axial, Coronal, Sagittal) matching + + Args: + selection_rules (object): JSON object containing the matching rules. + dicom_study_list (list): A list of DICOMStudy objects. + all_matched (bool): Gets all matched series in a study. Defaults to False for first match only. + sort_by_sop_instance_count (bool): If all_matched = True and multiple series are matched, sorts the matched series in + descending SOP instance count (i.e. the first Series in the returned List[StudySelectedSeries] will have the highest # + of DICOM images); Defaults to False for no sorting. + + Returns: + list: A list of objects of type StudySelectedSeries. + + Raises: + ValueError: If the selection_rules object does not contain "selections" attribute. + """ + + if not dicom_study_list or len(dicom_study_list) < 1: + return [] + + if not selection_rules: + # Return all series if no selection rules are supplied + logging.warn("No selection rules given; select all series.") + return self._select_all_series(dicom_study_list) + + selections = selection_rules.get("selections", None) # TODO type is not json now. + # If missing selections in the rules then it is an error. + if not selections: + raise ValueError('Expected "selections" not found in the rules.') + + study_selected_series_list = [] # List of StudySelectedSeries objects + + for study in dicom_study_list: + study_selected_series = StudySelectedSeries(study) + for selection in selections: + # Get the selection name. Blank name will be handled by the SelectedSeries + selection_name = selection.get("name", "").strip() + logging.info(f"Finding series for Selection named: {selection_name}") + + # Skip if no selection conditions are provided. + conditions = selection.get("conditions", None) + if not conditions: + continue + + # Select only the first series that matches the conditions, list of one + series_list = self._select_series(conditions, study, all_matched, sort_by_sop_instance_count) + if series_list and len(series_list) > 0: + for series in series_list: + selected_series = SelectedSeries(selection_name, series, None) # No Image obj yet. + study_selected_series.add_selected_series(selected_series) + + if len(study_selected_series.selected_series) > 0: + study_selected_series_list.append(study_selected_series) + + return study_selected_series_list + + def _load_rules(self): + return json_loads(self._rules_json_str) if self._rules_json_str else None + + def _select_all_series(self, dicom_study_list: List[DICOMStudy]) -> List[StudySelectedSeries]: + """Select all series in studies + + Returns: + list: list of StudySelectedSeries objects + """ + + study_selected_series_list = [] + for study in dicom_study_list: + logging.info(f"Working on study, instance UID: {study.StudyInstanceUID}") + study_selected_series = StudySelectedSeries(study) + for series in study.get_all_series(): + logging.info(f"Working on series, instance UID: {str(series.SeriesInstanceUID)}") + selected_series = SelectedSeries("", series, None) # No selection name or Image obj. + study_selected_series.add_selected_series(selected_series) + study_selected_series_list.append(study_selected_series) + return study_selected_series_list + + def _select_series( + self, attributes: dict, study: DICOMStudy, all_matched=False, sort_by_sop_instance_count=False + ) -> List[DICOMSeries]: + """Finds series whose attributes match the given attributes. + + Args: + attributes (dict): Dictionary of attributes for matching + all_matched (bool): Gets all matched series in a study. Defaults to False for first match only. + sort_by_sop_instance_count (bool): If all_matched = True and multiple series are matched, sorts the matched series in + descending SOP instance count (i.e. the first Series in the returned List[StudySelectedSeries] will have the highest # + of DICOM images); Defaults to False for no sorting. + + Returns: + List of DICOMSeries. At most one element if all_matched is False. + + Raises: + NotImplementedError: If the value_to_match type is not supported for matching or unsupported PatientPosition value. + """ + assert isinstance(attributes, dict), '"attributes" must be a dict.' + + logging.info(f"Searching study, : {study.StudyInstanceUID}\n # of series: {len(study.get_all_series())}") + study_attr = self._get_instance_properties(study) + + found_series = [] + for series in study.get_all_series(): + logging.info(f"Working on series, instance UID: {series.SeriesInstanceUID}") + + # Combine Study and current Series properties for matching + series_attr = self._get_instance_properties(series) + series_attr.update(study_attr) + + matched = True + # Simple matching on attribute value + for key, value_to_match in attributes.items(): + logging.info(f" On attribute: {key!r} to match value: {value_to_match!r}") + # Ignore None + if not value_to_match: + continue + # Try getting the attribute value from Study and current Series prop dict + attr_value = series_attr.get(key, None) + logging.info(f" Series attribute {key} value: {attr_value}") + + # If not found, try the best at the native instance level for string VR + # This is mainly for attributes like ImageType + if not attr_value: + try: + # Can use some enhancements, especially multi-value where VM > 1 + elem = series.get_sop_instances()[0].get_native_sop_instance()[key] + if elem.VM > 1: + attr_value = [elem.repval] # repval: str representation of the element’s value + else: + attr_value = elem.value # element's value + + logging.info(f" Instance level attribute {key} value: {attr_value}") + series_attr.update({key: attr_value}) + except Exception: + logging.info(f" Attribute {key} not at instance level either") + + if not attr_value: + logging.info(f" Missing attribute: {key!r}") + matched = False + # Image orientation check + elif key == "ImageOrientationPatient": + patient_position = series_attr.get("PatientPosition") + if patient_position is None: + raise NotImplementedError( + "PatientPosition tag absent; value required for image orientation calculation" + ) + if patient_position not in ("HFP", "HFS", "HFDL", "HFDR", "FFP", "FFS", "FFDL", "FFDR"): + raise NotImplementedError(f"No support for PatientPosition value {patient_position}") + matched = self._match_image_orientation(value_to_match, attr_value) + elif isinstance(attr_value, (float, int)): + matched = self._match_numeric_condition(value_to_match, attr_value) + elif isinstance(attr_value, str): + matched = attr_value.casefold() == (value_to_match.casefold()) + if not matched: + # For str, also try RegEx search to check for a match anywhere in the string + # unless the user constrains it in the expression. + if re.search(value_to_match, attr_value, re.IGNORECASE): + matched = True + elif isinstance(attr_value, list): + # Assume multi value string attributes + meta_data_list = str(attr_value).lower() + if isinstance(value_to_match, list): + value_set = {str(element).lower() for element in value_to_match} + # split inclusion and exclusion matches using ! indicator + include_terms = {v for v in value_set if not v.startswith("!")} + exclude_terms = {v[1:] for v in value_set if v.startswith("!")} + matched = all(term in meta_data_list for term in include_terms) and all( + term not in meta_data_list for term in exclude_terms + ) + elif isinstance(value_to_match, (str, numbers.Number)): + v = str(value_to_match).lower() + # ! indicates exclusion match + if v.startswith("!"): + matched = v[1:] not in meta_data_list + else: + matched = v in meta_data_list + else: + raise NotImplementedError( + f"No support for matching condition {value_to_match} (type: {type(value_to_match)})" + ) + + if not matched: + logging.info("This series does not match the selection conditions") + break + + if matched: + logging.info(f"Selected Series, UID: {series.SeriesInstanceUID}") + found_series.append(series) + + if not all_matched: + return found_series + + # If sorting indicated and multiple series found, sort series in descending SOP instance count + if sort_by_sop_instance_count and len(found_series) > 1: + logging.info( + "Multiple series matched the selection criteria; choosing series with the highest number of DICOM images." + ) + found_series.sort(key=lambda x: len(x.get_sop_instances()), reverse=True) + + return found_series + + def _match_numeric_condition(self, value_to_match, attr_value): + """ + Helper method to match numeric conditions, supporting relational, inclusive range, regex, and exact match checks. + + Supported formats: + - [val, ">"]: match if attr_value > val + - [val, ">="]: match if attr_value >= val + - [val, "<"]: match if attr_value < val + - [val, "<="]: match if attr_value <= val + - [val, "!="]: match if attr_value != val + - [min_val, max_val]: inclusive range check + - "regex": regular expression match + - number: exact match + + Args: + value_to_match (Union[list, str, int, float]): The condition to match against. + attr_value (Union[int, float]): The attribute value from the series. + + Returns: + bool: True if the attribute value matches the condition, else False. + + Raises: + NotImplementedError: If the value_to_match condition is not supported for numeric matching. + """ + + if isinstance(value_to_match, list): + # Relational operator check: >, >=, <, <=, != + if len(value_to_match) == 2 and isinstance(value_to_match[1], str): + val = float(value_to_match[0]) + op = value_to_match[1] + if op == ">": + return attr_value > val + elif op == ">=": + return attr_value >= val + elif op == "<": + return attr_value < val + elif op == "<=": + return attr_value <= val + elif op == "!=": + return attr_value != val + else: + raise NotImplementedError( + f"Unsupported relational operator {op!r} in numeric condition. Must be one of: '>', '>=', '<', '<=', '!='" + ) + + # Inclusive range check + elif len(value_to_match) == 2 and all(isinstance(v, (int, float)) for v in value_to_match): + return value_to_match[0] <= attr_value <= value_to_match[1] + + else: + raise NotImplementedError(f"No support for numeric matching condition {value_to_match}") + + # Regular expression match + elif isinstance(value_to_match, str): + return bool(re.fullmatch(value_to_match, str(attr_value))) + + # Exact numeric match + elif isinstance(value_to_match, (int, float)): + return value_to_match == attr_value + + else: + raise NotImplementedError(f"No support for numeric matching on this type: {type(value_to_match)}") + + def _match_image_orientation(self, value_to_match, attr_value): + """ + Helper method to calculate and match the image orientation using the ImageOrientationPatient tag. + The following PatientPosition values are supported and have been tested: + - "HFP" + - "HFS" + - "HFDL" + - "HFDR" + - "FFP" + - "FFS" + - "FFDL" + - "FFDR" + + Supported image orientation inputs for matching (case-insensitive): + - "Axial" + - "Coronal" + - "Sagittal" + + Args: + value_to_match (str): The image orientation condition to match against. + attr_value (List[str]): Raw ImageOrientationPatient tag value from the series. + + Returns: + bool: True if the computed orientation matches the expected orientation, else False. + + Raises: + ValueError: If the expected orientation is invalid or the normal vector cannot be computed. + """ + + # Validate image orientation to match input + value_to_match = value_to_match.strip().lower().capitalize() + allowed_orientations = {"Axial", "Coronal", "Sagittal"} + if value_to_match not in allowed_orientations: + raise ValueError(f"Invalid orientation string {value_to_match!r}. Must be one of: {allowed_orientations}") + + # Format ImageOrientationPatient tag value as an array and grab row and column cosines + iop_str = attr_value[0].strip("[]") + iop = [float(x.strip()) for x in iop_str.split(",")] + row_cosines = np.array(iop[:3], dtype=np.float64) + col_cosines = np.array(iop[3:], dtype=np.float64) + + # Validate DICOM constraints (normal row and column cosines + should be orthogonal) + # Throw warnings if tolerance exceeded + tolerance = 1e-4 + row_norm = np.linalg.norm(row_cosines) + col_norm = np.linalg.norm(col_cosines) + dot_product = np.dot(row_cosines, col_cosines) + + if abs(row_norm - 1.0) > tolerance: + logging.warn(f"Row direction cosine normal is {row_norm}, deviates from 1 by more than {tolerance}") + if abs(col_norm - 1.0) > tolerance: + logging.warn(f"Column direction cosine normal is {col_norm}, deviates from 1 by more than {tolerance}") + if abs(dot_product) > tolerance: + logging.warn(f"Row and Column cosines are not orthogonal: dot product = {dot_product}") + + # Normalize row and column vectors + row_cosines /= np.linalg.norm(row_cosines) + col_cosines /= np.linalg.norm(col_cosines) + + # Compute and validate slice normal + normal = np.cross(row_cosines, col_cosines) + if np.linalg.norm(normal) == 0: + raise ValueError("Invalid normal vector computed from IOP") + + # Normalize the slice normal + normal /= np.linalg.norm(normal) + + # Identify the dominant image orientation + axis_labels = ["Sagittal", "Coronal", "Axial"] + major_axis = np.argmax(np.abs(normal)) + computed_orientation = axis_labels[major_axis] + + logging.info(f" Computed orientation from ImageOrientationPatient value: {computed_orientation}") + + return bool(computed_orientation == value_to_match) + + @staticmethod + def _get_instance_properties(obj: object): + if not obj: + return {} + else: + return {x: getattr(obj, x, None) for x in type(obj).__dict__ if isinstance(type(obj).__dict__[x], property)} + + +# Module functions +# Helper function to get console output of the selection content when testing the script +def _print_instance_properties(obj: object, pre_fix: str = "", print_val=True): + print(f"{pre_fix}Instance of {type(obj)}") + for attribute in [x for x in type(obj).__dict__ if isinstance(type(obj).__dict__[x], property)]: + attr_val = getattr(obj, attribute, None) + print(f"{pre_fix} {attribute}: {type(attr_val)} {attr_val if print_val else ''}") + + +def test(): + from pathlib import Path + + from monai.deploy.operators.dicom_data_loader_operator import DICOMDataLoaderOperator + + current_file_dir = Path(__file__).parent.resolve() + data_path = current_file_dir.joinpath("../../../inputs/spleen_ct/dcm").absolute() + + fragment = Fragment() + loader = DICOMDataLoaderOperator(fragment, name="loader_op") + selector = DICOMSeriesSelectorOperator(fragment, name="selector_op") + study_list = loader.load_data_to_studies(data_path) + sample_selection_rule = json_loads(Sample_Rules_Text) + print(f"Selection rules in JSON:\n{sample_selection_rule}") + study_selected_series_list = selector.filter(sample_selection_rule, study_list) + + for sss_obj in study_selected_series_list: + _print_instance_properties(sss_obj, pre_fix="", print_val=False) + study = sss_obj.study + pre_fix = " " + print(f"{pre_fix}==== Details of the study ====") + _print_instance_properties(study, pre_fix, print_val=False) + print(f"{pre_fix}==============================") + + # The following commented code block accesses and prints the flat list of all selected series. + # for ss_obj in sss_obj.selected_series: + # pre_fix = " " + # _print_instance_properties(ss_obj, pre_fix, print_val=False) + # pre_fix = " " + # print(f"{pre_fix}==== Details of the series ====") + # _print_instance_properties(ss_obj, pre_fix) + # print(f"{pre_fix}===============================") + + # The following block uses hierarchical grouping by selection name, and prints the list of series for each. + for selection_name, ss_list in sss_obj.series_by_selection_name.items(): + pre_fix = " " + print(f"{pre_fix}Selection name: {selection_name}") + for ss_obj in ss_list: + pre_fix = " " + _print_instance_properties(ss_obj, pre_fix, print_val=False) + print(f"{pre_fix}==== Details of the series ====") + _print_instance_properties(ss_obj, pre_fix) + print(f"{pre_fix}===============================") + + print(f" A total of {len(sss_obj.selected_series)} series selected for study {study.StudyInstanceUID}") + + +# Sample rule used for testing +Sample_Rules_Text = """ +{ + "selections": [ + { + "name": "CT Series 1", + "conditions": { + "StudyDescription": "(?i)^Spleen", + "Modality": "(?i)CT", + "SeriesDescription": "(?i)^No series description|(.*?)" + } + }, + { + "name": "CT Series 2", + "conditions": { + "Modality": "CT", + "BodyPartExamined": "Abdomen", + "SeriesDescription" : "Not to be matched" + } + }, + { + "name": "CT Series 3", + "conditions": { + "StudyDescription": "(.*?)", + "Modality": "(?i)CT", + "ImageType": ["PRIMARY", "ORIGINAL", "AXIAL"], + "SliceThickness": [3, 5] + } + }, + { + "name": "CT Series 4", + "conditions": { + "StudyDescription": "(.*?)", + "Modality": "(?i)MR", + "ImageOrientationPatient": "Axial", + "SliceThickness": [2, ">"] + } + }, + { + "name": "CT Series 5", + "conditions": { + "StudyDescription": "(.*?)", + "Modality": "(?i)CT", + "ImageType": ["PRIMARY", "!SECONDARY"] + } + } + ] +} +""" + +if __name__ == "__main__": + test() diff --git a/examples/apps/cchmc_nnunet_fifteen_ckpt_app/my_app/nnunet_bundle.py b/examples/apps/cchmc_nnunet_fifteen_ckpt_app/my_app/nnunet_bundle.py index 712be41a..50c21001 100644 --- a/examples/apps/cchmc_nnunet_fifteen_ckpt_app/my_app/nnunet_bundle.py +++ b/examples/apps/cchmc_nnunet_fifteen_ckpt_app/my_app/nnunet_bundle.py @@ -1,995 +1,995 @@ -# Copyright (c) MONAI Consortium -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from __future__ import annotations - -import os -import shutil -from pathlib import Path -from typing import Any, Optional, Tuple, Union - -import numpy as np -import torch -from torch.backends import cudnn - -from monai.data.meta_tensor import MetaTensor -from monai.utils import optional_import - -join, _ = optional_import("batchgenerators.utilities.file_and_folder_operations", name="join") -load_json, _ = optional_import("batchgenerators.utilities.file_and_folder_operations", name="load_json") - -__all__ = [ - "get_nnunet_trainer", - "get_nnunet_monai_predictor", - "get_network_from_nnunet_plans", - "convert_nnunet_to_monai_bundle", - "convert_monai_bundle_to_nnunet", - "ModelnnUNetWrapper", - "EnsembleProbabilitiesToSegmentation", -] - -# Constants -NNUNET_CHECKPOINT_FILENAME = "nnunet_checkpoint.pth" -PLANS_JSON_FILENAME = "plans.json" -DATASET_JSON_FILENAME = "dataset.json" - - -# Convert a single nnUNet model checkpoint to MONAI bundle format -# The function saves the converted model checkpoint and configuration files in the specified bundle root folder. -def convert_nnunet_to_monai_bundle(nnunet_config: dict, bundle_root_folder: str, fold: int = 0) -> None: - """ - Convert nnUNet model checkpoints and configuration to MONAI bundle format. - - Parameters - ---------- - nnunet_config : dict - Configuration dictionary for nnUNet, containing keys such as 'dataset_name_or_id', 'nnunet_configuration', - 'nnunet_trainer', and 'nnunet_plans'. - bundle_root_folder : str - Root folder where the MONAI bundle will be saved. - fold : int, optional - Fold number of the nnUNet model to be converted, by default 0. - - Returns - ------- - None - """ - - nnunet_trainer = "nnUNetTrainer" - nnunet_plans = "nnUNetPlans" - nnunet_configuration = "3d_fullres" - - if "nnunet_trainer" in nnunet_config: - nnunet_trainer = nnunet_config["nnunet_trainer"] - - if "nnunet_plans" in nnunet_config: - nnunet_plans = nnunet_config["nnunet_plans"] - - if "nnunet_configuration" in nnunet_config: - nnunet_configuration = nnunet_config["nnunet_configuration"] - - from nnunetv2.utilities.dataset_name_id_conversion import maybe_convert_to_dataset_name - - dataset_name = maybe_convert_to_dataset_name(nnunet_config["dataset_name_or_id"]) - nnunet_model_folder = Path(os.environ["nnUNet_results"]).joinpath( - dataset_name, f"{nnunet_trainer}__{nnunet_plans}__{nnunet_configuration}" - ) - - nnunet_checkpoint_final = torch.load( - Path(nnunet_model_folder).joinpath(f"fold_{fold}", "checkpoint_final.pth"), weights_only=False - ) - nnunet_checkpoint_best = torch.load( - Path(nnunet_model_folder).joinpath(f"fold_{fold}", "checkpoint_best.pth"), weights_only=False - ) - - nnunet_checkpoint = {} - nnunet_checkpoint["inference_allowed_mirroring_axes"] = nnunet_checkpoint_final["inference_allowed_mirroring_axes"] - nnunet_checkpoint["init_args"] = nnunet_checkpoint_final["init_args"] - nnunet_checkpoint["trainer_name"] = nnunet_checkpoint_final["trainer_name"] - - Path(bundle_root_folder).joinpath("models", nnunet_configuration).mkdir(parents=True, exist_ok=True) - - torch.save( - nnunet_checkpoint, Path(bundle_root_folder).joinpath("models", nnunet_configuration, NNUNET_CHECKPOINT_FILENAME) - ) - - Path(bundle_root_folder).joinpath("models", nnunet_configuration, f"fold_{fold}").mkdir(parents=True, exist_ok=True) - # This might not be needed, comment it out for now - # monai_last_checkpoint = {} - # monai_last_checkpoint["network_weights"] = nnunet_checkpoint_final["network_weights"] - # torch.save(monai_last_checkpoint, Path(bundle_root_folder).joinpath("models", nnunet_configuration, f"fold_{fold}", "model.pt")) - - monai_best_checkpoint = {} - monai_best_checkpoint["network_weights"] = nnunet_checkpoint_best["network_weights"] - torch.save( - monai_best_checkpoint, - Path(bundle_root_folder).joinpath("models", nnunet_configuration, f"fold_{fold}", "best_model.pt"), - ) - - if not os.path.exists(os.path.join(bundle_root_folder, "models", "jsonpkls", PLANS_JSON_FILENAME)): - shutil.copy( - Path(nnunet_model_folder).joinpath(PLANS_JSON_FILENAME), - Path(bundle_root_folder).joinpath("models", "jsonpkls", PLANS_JSON_FILENAME), - ) - - if not os.path.exists(os.path.join(bundle_root_folder, "models", "jsonpkls", DATASET_JSON_FILENAME)): - shutil.copy( - Path(nnunet_model_folder).joinpath(DATASET_JSON_FILENAME), - Path(bundle_root_folder).joinpath("models", "jsonpkls", DATASET_JSON_FILENAME), - ) - - -# A function to convert all nnunet models (configs and folds) to MONAI bundle format. -# The function iterates through all folds and configurations, converting each model to the specified bundle format. -# The number of folds, configurations, plans and dataset.json will be parsed from the nnunet folder -def convert_best_nnunet_to_monai_bundle( - nnunet_config: dict, bundle_root_folder: str, inference_info_file: str = "inference_information.json" -) -> None: - """ - Convert all nnUNet models (configs and folds) to MONAI bundle format. - - Parameters - ---------- - nnunet_config : dict - Configuration dictionary for nnUNet. Expected keys are: - - "dataset_name_or_id": str, name or ID of the dataset. - - "nnunet_configuration": str, configuration name. - - "nnunet_trainer": str, optional, name of the nnU-Net trainer (default is "nnUNetTrainer"). - - "nnunet_plans": str, optional, name of the nnU-Net plans (default is "nnUNetPlans"). - bundle_root_folder : str - Path to the root folder of the MONAI bundle. - inference_info : str, optional - Path to the inference information file (default is "inference_information.json"). - - Returns - ------- - None - """ - from batchgenerators.utilities.file_and_folder_operations import subfiles - from nnunetv2.utilities.dataset_name_id_conversion import maybe_convert_to_dataset_name - - dataset_name = nnunet_config["dataset_name_or_id"] - - inference_info_path = Path(os.environ["nnUNet_results"]).joinpath( - maybe_convert_to_dataset_name(dataset_name), inference_info_file - ) - - if not os.path.exists(inference_info_path): - raise FileNotFoundError(f"Inference information file not found: {inference_info_path}") - inference_info = load_json(inference_info_path) - - # Get the best model or ensemble from the inference information - if "best_model_or_ensemble" not in inference_info: - raise KeyError(f"Key 'best_model_or_ensemble' not found in inference information file: {inference_info_path}") - best_model_dict = inference_info["best_model_or_ensemble"] - - # Get the folds information - if "folds" not in inference_info: - raise KeyError(f"Key 'folds' not found in inference information file: {inference_info_path}") - folds = inference_info["folds"] # list of folds - - cascade_3d_fullres = False - for model_dict in best_model_dict["selected_model_or_models"]: - if model_dict["configuration"] == "3d_cascade_fullres": - cascade_3d_fullres = True - - print("Converting model: ", model_dict["configuration"]) - nnunet_model_folder = Path(os.environ["nnUNet_results"]).joinpath( - maybe_convert_to_dataset_name(dataset_name), - f"{model_dict['trainer']}__{model_dict['plans_identifier']}__{model_dict['configuration']}", - ) - nnunet_config["nnunet_configuration"] = model_dict["configuration"] - nnunet_config["nnunet_trainer"] = model_dict["trainer"] - nnunet_config["nnunet_plans"] = model_dict["plans_identifier"] - - if not os.path.exists(nnunet_model_folder): - raise FileNotFoundError(f"Model folder not found: {nnunet_model_folder}") - - for fold in folds: - print("Converting fold: ", fold, " of model: ", model_dict["configuration"]) - convert_nnunet_to_monai_bundle(nnunet_config, bundle_root_folder, fold) - - # IF model is a cascade model, 3d_lowres is also needed - if cascade_3d_fullres: - # check if 3d_lowres is already in the bundle - if not os.path.exists(os.path.join(bundle_root_folder, "models", "3d_lowres")): - # copy the 3d_lowres model folder from nnunet results - nnunet_model_folder = Path(os.environ["nnUNet_results"]).joinpath( - maybe_convert_to_dataset_name(dataset_name), - f"{model_dict['trainer']}__{model_dict['plans_identifier']}__3d_lowres", - ) - if not os.path.exists(nnunet_model_folder): - raise FileNotFoundError(f"Model folder not found: {nnunet_model_folder}") - # copy the 3d_lowres model folder to the bundle root folder - nnunet_config["nnunet_configuration"] = "3d_lowres" - nnunet_config["nnunet_trainer"] = best_model_dict["selected_model_or_models"][-1][ - "trainer" - ] # Using the same trainer as the cascade model - nnunet_config["nnunet_plans"] = best_model_dict["selected_model_or_models"][-1][ - "plans_identifier" - ] # Using the same plans id as the cascade model - for fold in folds: - print("Converting fold: ", fold, " of model: ", "3d_lowres") - convert_nnunet_to_monai_bundle(nnunet_config, bundle_root_folder, fold) - - # Finally if postprocessing is needed (for ensemble models) - if "postprocessing_file" in best_model_dict: - postprocessing_file_path = best_model_dict["postprocessing_file"] - if not os.path.exists(postprocessing_file_path): - raise FileNotFoundError(f"Postprocessing file not found: {postprocessing_file_path}") - shutil.copy(postprocessing_file_path, Path(bundle_root_folder).joinpath("models", "postprocessing.pkl")) - - -def convert_monai_bundle_to_nnunet(nnunet_config: dict, bundle_root_folder: str, fold: int = 0) -> None: - """ - Convert a MONAI bundle to nnU-Net format. - - Parameters - ---------- - nnunet_config : dict - Configuration dictionary for nnU-Net. Expected keys are: - - "dataset_name_or_id": str, name or ID of the dataset. - - "nnunet_trainer": str, optional, name of the nnU-Net trainer (default is "nnUNetTrainer"). - - "nnunet_plans": str, optional, name of the nnU-Net plans (default is "nnUNetPlans"). - bundle_root_folder : str - Path to the root folder of the MONAI bundle. - fold : int, optional - Fold number for cross-validation (default is 0). - - Returns - ------- - None - """ - from odict import odict - - nnunet_trainer: str = "nnUNetTrainer" - nnunet_plans: str = "nnUNetPlans" - - if "nnunet_trainer" in nnunet_config: - nnunet_trainer = nnunet_config["nnunet_trainer"] - - if "nnunet_plans" in nnunet_config: - nnunet_plans = nnunet_config["nnunet_plans"] - - from nnunetv2.training.logging.nnunet_logger import nnUNetLogger - from nnunetv2.utilities.dataset_name_id_conversion import maybe_convert_to_dataset_name - - def subfiles( - folder: Union[str, Path], prefix: Optional[str] = None, suffix: Optional[str] = None, sort: bool = True - ) -> list[str]: - res = [ - i.name - for i in Path(folder).iterdir() - if i.is_file() - and (prefix is None or i.name.startswith(prefix)) - and (suffix is None or i.name.endswith(suffix)) - ] - if sort: - res.sort() - return res - - nnunet_model_folder: Path = Path(os.environ["nnUNet_results"]).joinpath( - maybe_convert_to_dataset_name(nnunet_config["dataset_name_or_id"]), - f"{nnunet_trainer}__{nnunet_plans}__3d_fullres", - ) - - nnunet_preprocess_model_folder: Path = Path(os.environ["nnUNet_preprocessed"]).joinpath( - maybe_convert_to_dataset_name(nnunet_config["dataset_name_or_id"]) - ) - - Path(nnunet_model_folder).joinpath(f"fold_{fold}").mkdir(parents=True, exist_ok=True) - - nnunet_checkpoint: dict = torch.load(f"{bundle_root_folder}/models/{NNUNET_CHECKPOINT_FILENAME}", weights_only=False) - latest_checkpoints: list[str] = subfiles( - Path(bundle_root_folder).joinpath("models", f"fold_{fold}"), prefix="checkpoint_epoch", sort=True - ) - epochs: list[int] = [] - for latest_checkpoint in latest_checkpoints: - epochs.append(int(latest_checkpoint[len("checkpoint_epoch=") : -len(".pt")])) - - epochs.sort() - final_epoch: int = epochs[-1] - monai_last_checkpoint: dict = torch.load( - f"{bundle_root_folder}/models/fold_{fold}/checkpoint_epoch={final_epoch}.pt", weights_only=False - ) - - best_checkpoints: list[str] = subfiles( - Path(bundle_root_folder).joinpath("models", f"fold_{fold}"), prefix="checkpoint_key_metric", sort=True - ) - key_metrics: list[str] = [] - for best_checkpoint in best_checkpoints: - key_metrics.append(str(best_checkpoint[len("checkpoint_key_metric=") : -len(".pt")])) - - key_metrics.sort() - best_key_metric: str = key_metrics[-1] - monai_best_checkpoint: dict = torch.load( - f"{bundle_root_folder}/models/fold_{fold}/checkpoint_key_metric={best_key_metric}.pt", weights_only=False - ) - - if "optimizer_state" in monai_last_checkpoint: - nnunet_checkpoint["optimizer_state"] = monai_last_checkpoint["optimizer_state"] - - nnunet_checkpoint["network_weights"] = odict() - - for key in monai_last_checkpoint["network_weights"]: - nnunet_checkpoint["network_weights"][key] = monai_last_checkpoint["network_weights"][key] - - nnunet_checkpoint["current_epoch"] = final_epoch - nnunet_checkpoint["logging"] = nnUNetLogger().get_checkpoint() - nnunet_checkpoint["_best_ema"] = 0 - nnunet_checkpoint["grad_scaler_state"] = None - - torch.save(nnunet_checkpoint, Path(nnunet_model_folder).joinpath(f"fold_{fold}", "checkpoint_final.pth")) - - nnunet_checkpoint["network_weights"] = odict() - - if "optimizer_state" in monai_last_checkpoint: - nnunet_checkpoint["optimizer_state"] = monai_best_checkpoint["optimizer_state"] - - for key in monai_best_checkpoint["network_weights"]: - nnunet_checkpoint["network_weights"][key] = monai_best_checkpoint["network_weights"][key] - - torch.save(nnunet_checkpoint, Path(nnunet_model_folder).joinpath(f"fold_{fold}", "checkpoint_best.pth")) - - if not os.path.exists(os.path.join(nnunet_model_folder, DATASET_JSON_FILENAME)): - shutil.copy(f"{bundle_root_folder}/models/jsonpkls/{DATASET_JSON_FILENAME}", nnunet_model_folder) - if not os.path.exists(os.path.join(nnunet_model_folder, PLANS_JSON_FILENAME)): - shutil.copy(f"{bundle_root_folder}/models/jsonpkls/{PLANS_JSON_FILENAME}", nnunet_model_folder) - if not os.path.exists(os.path.join(nnunet_model_folder, "dataset_fingerprint.json")): - shutil.copy(f"{nnunet_preprocess_model_folder}/dataset_fingerprint.json", nnunet_model_folder) - if not os.path.exists(os.path.join(nnunet_model_folder, NNUNET_CHECKPOINT_FILENAME)): - shutil.copy(f"{bundle_root_folder}/models/{NNUNET_CHECKPOINT_FILENAME}", nnunet_model_folder) - - -# This function loads a nnUNet network from the provided plans and dataset files. -# It initializes the network architecture and loads the model weights if a checkpoint is provided. -def get_network_from_nnunet_plans( - plans_file: str, - dataset_file: str, - configuration: str, - model_ckpt: Optional[str] = None, - model_key_in_ckpt: str = "model", -) -> Union[torch.nn.Module, Any]: - """ - Load and initialize a nnUNet network based on nnUNet plans and configuration. - - Parameters - ---------- - plans_file : str - Path to the JSON file containing the nnUNet plans. - dataset_file : str - Path to the JSON file containing the dataset information. - configuration : str - The configuration name to be used from the plans. - model_ckpt : Optional[str], optional - Path to the model checkpoint file. If None, the network is returned without loading weights (default is None). - model_key_in_ckpt : str, optional - The key in the checkpoint file that contains the model state dictionary (default is "model"). - - Returns - ------- - network : torch.nn.Module - The initialized neural network, with weights loaded if `model_ckpt` is provided. - """ - from batchgenerators.utilities.file_and_folder_operations import load_json - from nnunetv2.utilities.get_network_from_plans import get_network_from_plans - from nnunetv2.utilities.label_handling.label_handling import determine_num_input_channels - from nnunetv2.utilities.plans_handling.plans_handler import PlansManager - - plans = load_json(plans_file) - dataset_json = load_json(dataset_file) - - plans_manager = PlansManager(plans) - configuration_manager = plans_manager.get_configuration(configuration) - num_input_channels = determine_num_input_channels(plans_manager, configuration_manager, dataset_json) - label_manager = plans_manager.get_label_manager(dataset_json) - - enable_deep_supervision = True - - network = get_network_from_plans( - configuration_manager.network_arch_class_name, - configuration_manager.network_arch_init_kwargs, - configuration_manager.network_arch_init_kwargs_req_import, - num_input_channels, - label_manager.num_segmentation_heads, - allow_init=True, - deep_supervision=enable_deep_supervision, - ) - - if model_ckpt is None: - return network - else: - state_dict = torch.load(model_ckpt, weights_only=False) - network.load_state_dict(state_dict[model_key_in_ckpt]) - return network - - -def get_nnunet_trainer( - dataset_name_or_id: Union[str, int], - configuration: str, - fold: Union[int, str], - trainer_class_name: str = "nnUNetTrainer", - plans_identifier: str = "nnUNetPlans", - use_compressed_data: bool = False, - continue_training: bool = False, - only_run_validation: bool = False, - disable_checkpointing: bool = False, - device: str = "cuda", - pretrained_model: Optional[str] = None, -) -> Any: # type: ignore - """ - Get the nnUNet trainer instance based on the provided configuration. - The returned nnUNet trainer can be used to initialize the SupervisedTrainer for training, including the network, - optimizer, loss function, DataLoader, etc. - - Example:: - - from monai.apps import SupervisedTrainer - from monai.bundle.nnunet import get_nnunet_trainer - - dataset_name_or_id = 'Task009_Spleen' - fold = 0 - configuration = '3d_fullres' - nnunet_trainer = get_nnunet_trainer(dataset_name_or_id, configuration, fold) - - trainer = SupervisedTrainer( - device=nnunet_trainer.device, - max_epochs=nnunet_trainer.num_epochs, - train_data_loader=nnunet_trainer.dataloader_train, - network=nnunet_trainer.network, - optimizer=nnunet_trainer.optimizer, - loss_function=nnunet_trainer.loss_function, - epoch_length=nnunet_trainer.num_iterations_per_epoch, - ) - - Parameters - ---------- - dataset_name_or_id : Union[str, int] - The name or ID of the dataset to be used. - configuration : str - The configuration name for the training. - fold : Union[int, str] - The fold number or 'all' for cross-validation. - trainer_class_name : str, optional - The class name of the trainer to be used. Default is 'nnUNetTrainer'. - For a complete list of supported trainers, check: - https://github.com/MIC-DKFZ/nnUNet/tree/master/nnunetv2/training/nnUNetTrainer/variants - plans_identifier : str, optional - Identifier for the plans to be used. Default is 'nnUNetPlans'. - use_compressed_data : bool, optional - Whether to use compressed data. Default is False. - continue_training : bool, optional - Whether to continue training from a checkpoint. Default is False. - only_run_validation : bool, optional - Whether to only run validation. Default is False. - disable_checkpointing : bool, optional - Whether to disable checkpointing. Default is False. - device : str, optional - The device to be used for training. Default is 'cuda'. - pretrained_model : Optional[str], optional - Path to the pretrained model file. - - Returns - ------- - nnunet_trainer : object - The nnUNet trainer instance. - """ - # From nnUNet/nnunetv2/run/run_training.py#run_training - if isinstance(fold, str): - if fold != "all": - try: - fold = int(fold) - except ValueError as e: - print( - f'Unable to convert given value for fold to int: {fold}. fold must bei either "all" or an integer!' - ) - raise e - - from nnunetv2.run.run_training import get_trainer_from_args, maybe_load_checkpoint - - nnunet_trainer = get_trainer_from_args( - str(dataset_name_or_id), - configuration, - fold, - trainer_class_name, - plans_identifier, - device=torch.device(device), - ) - if disable_checkpointing: - nnunet_trainer.disable_checkpointing = disable_checkpointing - - assert not (continue_training and only_run_validation), "Cannot set --c and --val flag at the same time. Dummy." - - maybe_load_checkpoint(nnunet_trainer, continue_training, only_run_validation) - nnunet_trainer.on_train_start() # Added to Initialize Trainer - if torch.cuda.is_available(): - cudnn.deterministic = False - cudnn.benchmark = True - - if pretrained_model is not None: - state_dict = torch.load(pretrained_model, weights_only=False) - if "network_weights" in state_dict: - nnunet_trainer.network._orig_mod.load_state_dict(state_dict["network_weights"]) - return nnunet_trainer - - -def get_nnunet_monai_predictor( - model_folder: Union[str, Path], - model_name: str = "model.pt", - dataset_json: dict = None, - plans: dict = None, - nnunet_config: dict = None, - save_probabilities: bool = False, - save_files: bool = False, - use_folds: Optional[Union[int, str]] = None, -) -> ModelnnUNetWrapper: - """ - Initializes and returns a `nnUNetMONAIModelWrapper` containing the corresponding `nnUNetPredictor`. - The model folder should contain the following files, created during training: - - - dataset.json: from the nnUNet results folder - - plans.json: from the nnUNet results folder - - nnunet_checkpoint.pth: The nnUNet checkpoint file, containing the nnUNet training configuration - - model.pt: The checkpoint file containing the model weights. - - The returned wrapper object can be used for inference with MONAI framework: - Example:: - - from monai.bundle.nnunet import get_nnunet_monai_predictor - - model_folder = 'path/to/monai_bundle/model' - model_name = 'model.pt' - wrapper = get_nnunet_monai_predictor(model_folder, model_name) - - # Perform inference - input_data = ... - output = wrapper(input_data) - - - Parameters - ---------- - model_folder : Union[str, Path] - The folder where the model is stored. - model_name : str, optional - The name of the model file, by default "model.pt". - dataset_json : dict, optional - The dataset JSON file containing dataset information. - plans : dict, optional - The plans JSON file containing model configuration. - nnunet_config : dict, optional - The nnUNet configuration dictionary containing model parameters. - - Returns - ------- - ModelnnUNetWrapper - A wrapper object that contains the nnUNetPredictor and the loaded model. - """ - - from nnunetv2.inference.predict_from_raw_data import nnUNetPredictor - - predictor = nnUNetPredictor( - tile_step_size=0.5, - use_gaussian=True, - use_mirroring=True, - device=torch.device("cuda", 0), - verbose=True, - verbose_preprocessing=False, - allow_tqdm=True, - ) - # initializes the network architecture, loads the checkpoint - print("nnunet_predictor: Model Folder: ", model_folder) - print("nnunet_predictor: Model name: ", model_name) - print("nnunet_predictor: use_folds: ", use_folds) - wrapper = ModelnnUNetWrapper( - predictor, - model_folder=model_folder, - checkpoint_name=model_name, - dataset_json=dataset_json, - plans=plans, - nnunet_config=nnunet_config, - save_probabilities=save_probabilities, - save_files=save_files, - use_folds=use_folds, - ) - return wrapper - - -def get_nnunet_monai_predictors_for_ensemble( - model_list: list, - model_path: Union[str, Path], - model_name: str = "model.pt", - use_folds: Optional[Union[int, str]] = None, -) -> Tuple[ModelnnUNetWrapper, ...]: - network_list = [] - for model_config in model_list: - model_folder = Path(model_path).joinpath(model_config) - network_list.append( - get_nnunet_monai_predictor( - model_folder=model_folder, - model_name=model_name, - save_probabilities=True, - save_files=True, - use_folds=use_folds, - ) - ) - return tuple(network_list) - - -import os -from typing import Dict, List, Union - -import numpy as np -from nnunetv2.ensembling.ensemble import average_probabilities -from nnunetv2.utilities.label_handling.label_handling import LabelManager -from nnunetv2.utilities.plans_handling.plans_handler import PlansManager - -from monai.config import KeysCollection -from monai.data.meta_tensor import MetaTensor -from monai.transforms import MapTransform - - -class EnsembleProbabilitiesToSegmentation(MapTransform): - """ - MONAI transform that loads .npz probability files from metadata['saved_file'] for a given key, - averages them, and converts to final segmentation using nnU-Net's LabelManager. - Returns a MetaTensor segmentation result (instead of saving to disk). - """ - - def __init__( - self, - keys: KeysCollection, - dataset_json_path: str, - plans_json_path: str, - allow_missing_keys: bool = False, - output_key: str = "pred", - ): - super().__init__(keys, allow_missing_keys) - - # Load required nnU-Net configs - self.plans_manager = PlansManager(plans_json_path) - self.dataset_json = self._load_json(dataset_json_path) - self.label_manager = self.plans_manager.get_label_manager(self.dataset_json) - self.output_key = output_key - - def _load_json(self, path: str) -> Dict: - import json - - with open(path, "r") as f: - return json.load(f) - - def __call__(self, data: Dict) -> Dict: - d = dict(data) - all_files = [] - for key in self.keys: - meta = d[key].meta if isinstance(d[key], MetaTensor) else d.get("meta", {}) - saved_file = meta.get("saved_file", None) - - # Support multiple files for ensemble - if isinstance(saved_file, str): - saved_file = [saved_file] - elif not isinstance(saved_file, list): - raise ValueError(f"'saved_file' in meta must be str or List[str], got {type(saved_file)}") - - for f in saved_file: - if not os.path.exists(f): - raise FileNotFoundError(f"Probability file not found: {f}") - all_files.append(f) - - print("All files to average: ", all_files) - # Step 1: average probabilities - avg_probs = average_probabilities(all_files) - - # Step 2: convert to segmentation - segmentation = self.label_manager.convert_logits_to_segmentation(avg_probs) # shape: (H, W, D) - - # Step 3: wrap as MetaTensor and attach meta - seg_tensor = MetaTensor(segmentation[None].astype(np.uint8)) # add channel dim - seg_tensor.meta = dict(meta) - - # Replace the key or store in new key - d[self.output_key] = seg_tensor - return d - - -class ModelnnUNetWrapper(torch.nn.Module): - """ - A wrapper class for nnUNet model integration with MONAI framework. - The wrapper can be use to integrate the nnUNet Bundle within MONAI framework for inference. - - Parameters - ---------- - predictor : nnUNetPredictor - The nnUNet predictor object used for inference. - model_folder : Union[str, Path] - The folder path where the model and related files are stored. - model_name : str, optional - The name of the model file, by default "model.pt". - dataset_json : dict, optional - The dataset JSON file containing dataset information. - plans : dict, optional - The plans JSON file containing model configuration. - nnunet_config : dict, optional - The nnUNet configuration dictionary containing model parameters. - - Attributes - ---------- - predictor : nnUNetPredictor - The nnUNet predictor object used for inference. - network_weights : torch.nn.Module - The network weights of the model. - - Notes - ----- - This class integrates nnUNet model with MONAI framework by loading necessary configurations, - restoring network architecture, and setting up the predictor for inference. - """ - - def __init__( - self, - predictor: object, - model_folder: Union[str, Path], - checkpoint_name: str = None, - dataset_json: dict = None, - plans: dict = None, - nnunet_config: dict = None, - save_probabilities: bool = False, - save_files: bool = False, - tmp_dir: str = "tmp", - use_folds: Union[int, str, Tuple[Union[int, str], ...], List[Union[int, str]]] = None, - ): - - super().__init__() - self.predictor = predictor - - if not checkpoint_name: - raise ValueError("Model name is required. Please provide a valid model name.") - - self.tmp_dir = tmp_dir - self.save_probabilities = save_probabilities - self.save_files = save_files - - # Set up model paths - model_training_output_dir = model_folder - model_parent_dir = Path(model_training_output_dir).parent - - # Import required modules - from nnunetv2.utilities.plans_handling.plans_handler import PlansManager - - # Load dataset and plans if not provided - if dataset_json is None: - dataset_json = load_json(join(Path(model_parent_dir), "jsonpkls", DATASET_JSON_FILENAME)) - if plans is None: - plans = load_json(join(Path(model_parent_dir), "jsonpkls", PLANS_JSON_FILENAME)) - - plans_manager = PlansManager(plans) - parameters = [] - - # Get configuration from nnunet_checkpoint.pth or provided config - if nnunet_config is None: - checkpoint_path = join(Path(model_training_output_dir), NNUNET_CHECKPOINT_FILENAME) - if not os.path.exists(checkpoint_path): - raise ValueError( - f"Checkpoint file not found at {checkpoint_path}. Please ensure the model is trained and the checkpoint exists." - ) - - checkpoint = torch.load(checkpoint_path, weights_only=False, map_location=torch.device("cpu")) - trainer_name = checkpoint["trainer_name"] - configuration_name = checkpoint["init_args"]["configuration"] - inference_allowed_mirroring_axes = ( - checkpoint["inference_allowed_mirroring_axes"] - if "inference_allowed_mirroring_axes" in checkpoint.keys() - else None - ) - else: - trainer_name = nnunet_config["trainer_name"] - configuration_name = nnunet_config["configuration"] - inference_allowed_mirroring_axes = nnunet_config["inference_allowed_mirroring_axes"] - - # Store configuration name - self.configuration_name = configuration_name - - # Handle folds - if isinstance(use_folds, str) or isinstance(use_folds, int): - use_folds = [use_folds] - - if use_folds is None: - use_folds = self.predictor.auto_detect_available_folds(model_training_output_dir, checkpoint_name) - - # Load model parameters from each fold - for f in use_folds: - f = int(f) if f != "all" else f - fold_checkpoint_path = join(model_training_output_dir, f"fold_{f}", checkpoint_name) - monai_checkpoint = torch.load(fold_checkpoint_path, map_location=torch.device("cpu"), weights_only=False) - - if "network_weights" in monai_checkpoint.keys(): - parameters.append(monai_checkpoint["network_weights"]) - else: - parameters.append(monai_checkpoint) - - # Get configuration manager and setup network - configuration_manager = plans_manager.get_configuration(configuration_name) - - # Import required nnUNet modules - import nnunetv2 - from nnunetv2.utilities.find_class_by_name import recursive_find_python_class - from nnunetv2.utilities.label_handling.label_handling import determine_num_input_channels - - # Determine input channels and find trainer class - num_input_channels = determine_num_input_channels(plans_manager, configuration_manager, dataset_json) - trainer_class = recursive_find_python_class( - join(nnunetv2.__path__[0], "training", "nnUNetTrainer"), trainer_name, "nnunetv2.training.nnUNetTrainer" - ) - - if trainer_class is None: - raise RuntimeError(f"Unable to locate trainer class {trainer_name} in nnunetv2.training.nnUNetTrainer.") - - # Build network architecture - network = trainer_class.build_network_architecture( - configuration_manager.network_arch_class_name, - configuration_manager.network_arch_init_kwargs, - configuration_manager.network_arch_init_kwargs_req_import, - num_input_channels, - plans_manager.get_label_manager(dataset_json).num_segmentation_heads, - enable_deep_supervision=False, - ) - - # Configure predictor with all required settings - predictor.plans_manager = plans_manager - predictor.configuration_manager = configuration_manager - predictor.list_of_parameters = parameters - predictor.network = network - predictor.dataset_json = dataset_json - predictor.trainer_name = trainer_name - predictor.allowed_mirroring_axes = inference_allowed_mirroring_axes - predictor.label_manager = plans_manager.get_label_manager(dataset_json) - - # Store network weights reference - self.network_weights = self.predictor.network - - def forward(self, x: MetaTensor) -> MetaTensor: - """ - Forward pass for the nnUNet model. - - Args: - x (MetaTensor): Input tensor for inference. - - Returns: - MetaTensor: The output tensor with the same metadata as the input. - - Raises: - TypeError: If the input is not a MetaTensor. - """ - if not isinstance(x, MetaTensor): - raise TypeError("Input must be a MetaTensor.") - - # Extract spatial shape from input - spatial_shape = list(x.shape[-3:]) # [H, W, D] or [X, Y, Z] - - # Get spacing information from metadata - properties_or_list_of_properties = {} - - if "pixdim" in x.meta: - # Get spacing from pixdim - if x.meta["pixdim"].ndim == 1: - properties_or_list_of_properties["spacing"] = x.meta["pixdim"][1:4].tolist() - else: - properties_or_list_of_properties["spacing"] = x.meta["pixdim"][0][1:4].numpy().tolist() - - elif "affine" in x.meta: - # Get spacing from affine matrix - affine = x.meta["affine"][0].cpu().numpy() if x.meta["affine"].ndim == 3 else x.meta["affine"].cpu().numpy() - spacing = np.array( - [ - np.sqrt(np.sum(affine[:3, 0] ** 2)), - np.sqrt(np.sum(affine[:3, 1] ** 2)), - np.sqrt(np.sum(affine[:3, 2] ** 2)), - ] - ) - properties_or_list_of_properties["spacing"] = spacing - else: - # Default spacing if no metadata available - properties_or_list_of_properties["spacing"] = [1.0, 1.0, 1.0] - - # Add spatial shape to properties - properties_or_list_of_properties["spatial_shape"] = spatial_shape - - # Convert input tensor to numpy array - image_or_list_of_images = x.cpu().numpy()[0, :] - - # Setup output file path if saving enabled - outfile = None - if self.save_files: - # Get original filename from metadata - infile = x.meta["filename_or_obj"] - if isinstance(infile, list): - infile = infile[0] - - # Create output path - outfile_name = os.path.basename(infile).split(".")[0] - outfolder = Path(self.tmp_dir).joinpath(self.configuration_name) - os.makedirs(outfolder, exist_ok=True) - outfile = str(Path(outfolder).joinpath(outfile_name)) - - # Extract 4x4 affine matrix for SimpleITK compatibility - if "affine" in x.meta: - # Get affine matrix with proper shape - if x.meta["affine"].shape == (1, 4, 4): - affine = x.meta["affine"][0].cpu().numpy() - elif x.meta["affine"].shape == (4, 4): - affine = x.meta["affine"].cpu().numpy() - else: - raise ValueError(f"Unexpected affine shape: {x.meta['affine'].shape}") - - # Calculate spacing, origin and direction - spacing = tuple(np.linalg.norm(affine[:3, i]) for i in range(3)) - origin = tuple(float(v) for v in affine[:3, 3]) - direction_matrix = affine[:3, :3] / spacing - direction = tuple(direction_matrix.flatten().round(6)) - - # Add to properties dict for SimpleITK - properties_or_list_of_properties["sitk_stuff"] = { - "spacing": spacing, - "origin": origin, - "direction": direction, - } - # Handle cascade models by loading segmentation from previous stage - previous_segmentation = None - if self.configuration_name == "3d_cascade_fullres": - # For cascade models, we need the lowres prediction - lowres_predictions_folder = os.path.join(self.tmp_dir, "3d_lowres") - - if outfile: - seg_file = os.path.join(lowres_predictions_folder, outfile_name + ".nii.gz") - # Load the lowres segmentation from file - rw = self.predictor.plans_manager.image_reader_writer_class() - previous_segmentation, _ = rw.read_seg(seg_file) - - if previous_segmentation is None: - raise ValueError("Failed to load previous segmentation for cascade model.") - else: - raise ValueError("Output file name is required for 3d_cascade_fullres configuration.") - - # Run prediction using nnUNet predictor - prediction_output = self.predictor.predict_from_list_of_npy_arrays( - image_or_list_of_images, - previous_segmentation, - properties_or_list_of_properties, - save_probabilities=self.save_probabilities, - truncated_ofname=outfile, - num_processes=2, - num_processes_segmentation_export=2, - ) - - # Process prediction output based on save_files setting - if not self.save_files: - # Return the prediction output directly - out_tensors = [] - for out in prediction_output: - # Add batch and channel dimensions - out_tensors.append(torch.from_numpy(np.expand_dims(np.expand_dims(out, 0), 0))) - # Concatenate along batch dimension - out_tensor = torch.cat(out_tensors, 0) - - return MetaTensor(out_tensor, meta=x.meta) - else: - # Return a placeholder tensor with file path in metadata - saved_path = outfile + ".npz" - if not os.path.exists(saved_path): - raise FileNotFoundError(f"Expected saved file not found: {saved_path}") - - # Create placeholder tensor with same spatial dimensions - shape = properties_or_list_of_properties["spatial_shape"] - dummy_tensor = torch.zeros((1, 1, *shape), dtype=torch.float32) - - # Create metadata with file path - meta_with_filepath = dict(x.meta) - meta_with_filepath["saved_file"] = saved_path - - return MetaTensor(dummy_tensor, meta=meta_with_filepath) +# Copyright (c) MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import annotations + +import os +import shutil +from pathlib import Path +from typing import Any, Optional, Tuple, Union + +import numpy as np +import torch +from torch.backends import cudnn + +from monai.data.meta_tensor import MetaTensor +from monai.utils import optional_import + +join, _ = optional_import("batchgenerators.utilities.file_and_folder_operations", name="join") +load_json, _ = optional_import("batchgenerators.utilities.file_and_folder_operations", name="load_json") + +__all__ = [ + "get_nnunet_trainer", + "get_nnunet_monai_predictor", + "get_network_from_nnunet_plans", + "convert_nnunet_to_monai_bundle", + "convert_monai_bundle_to_nnunet", + "ModelnnUNetWrapper", + "EnsembleProbabilitiesToSegmentation", +] + +# Constants +NNUNET_CHECKPOINT_FILENAME = "nnunet_checkpoint.pth" +PLANS_JSON_FILENAME = "plans.json" +DATASET_JSON_FILENAME = "dataset.json" + + +# Convert a single nnUNet model checkpoint to MONAI bundle format +# The function saves the converted model checkpoint and configuration files in the specified bundle root folder. +def convert_nnunet_to_monai_bundle(nnunet_config: dict, bundle_root_folder: str, fold: int = 0) -> None: + """ + Convert nnUNet model checkpoints and configuration to MONAI bundle format. + + Parameters + ---------- + nnunet_config : dict + Configuration dictionary for nnUNet, containing keys such as 'dataset_name_or_id', 'nnunet_configuration', + 'nnunet_trainer', and 'nnunet_plans'. + bundle_root_folder : str + Root folder where the MONAI bundle will be saved. + fold : int, optional + Fold number of the nnUNet model to be converted, by default 0. + + Returns + ------- + None + """ + + nnunet_trainer = "nnUNetTrainer" + nnunet_plans = "nnUNetPlans" + nnunet_configuration = "3d_fullres" + + if "nnunet_trainer" in nnunet_config: + nnunet_trainer = nnunet_config["nnunet_trainer"] + + if "nnunet_plans" in nnunet_config: + nnunet_plans = nnunet_config["nnunet_plans"] + + if "nnunet_configuration" in nnunet_config: + nnunet_configuration = nnunet_config["nnunet_configuration"] + + from nnunetv2.utilities.dataset_name_id_conversion import maybe_convert_to_dataset_name + + dataset_name = maybe_convert_to_dataset_name(nnunet_config["dataset_name_or_id"]) + nnunet_model_folder = Path(os.environ["nnUNet_results"]).joinpath( + dataset_name, f"{nnunet_trainer}__{nnunet_plans}__{nnunet_configuration}" + ) + + nnunet_checkpoint_final = torch.load( + Path(nnunet_model_folder).joinpath(f"fold_{fold}", "checkpoint_final.pth"), weights_only=False + ) + nnunet_checkpoint_best = torch.load( + Path(nnunet_model_folder).joinpath(f"fold_{fold}", "checkpoint_best.pth"), weights_only=False + ) + + nnunet_checkpoint = {} + nnunet_checkpoint["inference_allowed_mirroring_axes"] = nnunet_checkpoint_final["inference_allowed_mirroring_axes"] + nnunet_checkpoint["init_args"] = nnunet_checkpoint_final["init_args"] + nnunet_checkpoint["trainer_name"] = nnunet_checkpoint_final["trainer_name"] + + Path(bundle_root_folder).joinpath("models", nnunet_configuration).mkdir(parents=True, exist_ok=True) + + torch.save( + nnunet_checkpoint, Path(bundle_root_folder).joinpath("models", nnunet_configuration, NNUNET_CHECKPOINT_FILENAME) + ) + + Path(bundle_root_folder).joinpath("models", nnunet_configuration, f"fold_{fold}").mkdir(parents=True, exist_ok=True) + # This might not be needed, comment it out for now + # monai_last_checkpoint = {} + # monai_last_checkpoint["network_weights"] = nnunet_checkpoint_final["network_weights"] + # torch.save( + # monai_last_checkpoint, + # Path(bundle_root_folder).joinpath("models", nnunet_configuration, f"fold_{fold}", "model.pt") + # ) + + monai_best_checkpoint = {} + monai_best_checkpoint["network_weights"] = nnunet_checkpoint_best["network_weights"] + torch.save( + monai_best_checkpoint, + Path(bundle_root_folder).joinpath("models", nnunet_configuration, f"fold_{fold}", "best_model.pt"), + ) + + if not os.path.exists(os.path.join(bundle_root_folder, "models", "jsonpkls", PLANS_JSON_FILENAME)): + shutil.copy( + Path(nnunet_model_folder).joinpath(PLANS_JSON_FILENAME), + Path(bundle_root_folder).joinpath("models", "jsonpkls", PLANS_JSON_FILENAME), + ) + + if not os.path.exists(os.path.join(bundle_root_folder, "models", "jsonpkls", DATASET_JSON_FILENAME)): + shutil.copy( + Path(nnunet_model_folder).joinpath(DATASET_JSON_FILENAME), + Path(bundle_root_folder).joinpath("models", "jsonpkls", DATASET_JSON_FILENAME), + ) + + +# A function to convert all nnunet models (configs and folds) to MONAI bundle format. +# The function iterates through all folds and configurations, converting each model to the specified bundle format. +# The number of folds, configurations, plans and dataset.json will be parsed from the nnunet folder +def convert_best_nnunet_to_monai_bundle( + nnunet_config: dict, bundle_root_folder: str, inference_info_file: str = "inference_information.json" +) -> None: + """ + Convert all nnUNet models (configs and folds) to MONAI bundle format. + + Parameters + ---------- + nnunet_config : dict + Configuration dictionary for nnUNet. Expected keys are: + - "dataset_name_or_id": str, name or ID of the dataset. + - "nnunet_configuration": str, configuration name. + - "nnunet_trainer": str, optional, name of the nnU-Net trainer (default is "nnUNetTrainer"). + - "nnunet_plans": str, optional, name of the nnU-Net plans (default is "nnUNetPlans"). + bundle_root_folder : str + Path to the root folder of the MONAI bundle. + inference_info : str, optional + Path to the inference information file (default is "inference_information.json"). + + Returns + ------- + None + """ + from nnunetv2.utilities.dataset_name_id_conversion import maybe_convert_to_dataset_name + + dataset_name = nnunet_config["dataset_name_or_id"] + + inference_info_path = Path(os.environ["nnUNet_results"]).joinpath( + maybe_convert_to_dataset_name(dataset_name), inference_info_file + ) + + if not os.path.exists(inference_info_path): + raise FileNotFoundError(f"Inference information file not found: {inference_info_path}") + inference_info = load_json(inference_info_path) + + # Get the best model or ensemble from the inference information + if "best_model_or_ensemble" not in inference_info: + raise KeyError(f"Key 'best_model_or_ensemble' not found in inference information file: {inference_info_path}") + best_model_dict = inference_info["best_model_or_ensemble"] + + # Get the folds information + if "folds" not in inference_info: + raise KeyError(f"Key 'folds' not found in inference information file: {inference_info_path}") + folds = inference_info["folds"] # list of folds + + cascade_3d_fullres = False + for model_dict in best_model_dict["selected_model_or_models"]: + if model_dict["configuration"] == "3d_cascade_fullres": + cascade_3d_fullres = True + + print("Converting model: ", model_dict["configuration"]) + nnunet_model_folder = Path(os.environ["nnUNet_results"]).joinpath( + maybe_convert_to_dataset_name(dataset_name), + f"{model_dict['trainer']}__{model_dict['plans_identifier']}__{model_dict['configuration']}", + ) + nnunet_config["nnunet_configuration"] = model_dict["configuration"] + nnunet_config["nnunet_trainer"] = model_dict["trainer"] + nnunet_config["nnunet_plans"] = model_dict["plans_identifier"] + + if not os.path.exists(nnunet_model_folder): + raise FileNotFoundError(f"Model folder not found: {nnunet_model_folder}") + + for fold in folds: + print("Converting fold: ", fold, " of model: ", model_dict["configuration"]) + convert_nnunet_to_monai_bundle(nnunet_config, bundle_root_folder, fold) + + # IF model is a cascade model, 3d_lowres is also needed + if cascade_3d_fullres: + # check if 3d_lowres is already in the bundle + if not os.path.exists(os.path.join(bundle_root_folder, "models", "3d_lowres")): + # copy the 3d_lowres model folder from nnunet results + nnunet_model_folder = Path(os.environ["nnUNet_results"]).joinpath( + maybe_convert_to_dataset_name(dataset_name), + f"{model_dict['trainer']}__{model_dict['plans_identifier']}__3d_lowres", + ) + if not os.path.exists(nnunet_model_folder): + raise FileNotFoundError(f"Model folder not found: {nnunet_model_folder}") + # copy the 3d_lowres model folder to the bundle root folder + nnunet_config["nnunet_configuration"] = "3d_lowres" + nnunet_config["nnunet_trainer"] = best_model_dict["selected_model_or_models"][-1][ + "trainer" + ] # Using the same trainer as the cascade model + nnunet_config["nnunet_plans"] = best_model_dict["selected_model_or_models"][-1][ + "plans_identifier" + ] # Using the same plans id as the cascade model + for fold in folds: + print("Converting fold: ", fold, " of model: ", "3d_lowres") + convert_nnunet_to_monai_bundle(nnunet_config, bundle_root_folder, fold) + + # Finally if postprocessing is needed (for ensemble models) + if "postprocessing_file" in best_model_dict: + postprocessing_file_path = best_model_dict["postprocessing_file"] + if not os.path.exists(postprocessing_file_path): + raise FileNotFoundError(f"Postprocessing file not found: {postprocessing_file_path}") + shutil.copy(postprocessing_file_path, Path(bundle_root_folder).joinpath("models", "postprocessing.pkl")) + + +def convert_monai_bundle_to_nnunet(nnunet_config: dict, bundle_root_folder: str, fold: int = 0) -> None: + """ + Convert a MONAI bundle to nnU-Net format. + + Parameters + ---------- + nnunet_config : dict + Configuration dictionary for nnU-Net. Expected keys are: + - "dataset_name_or_id": str, name or ID of the dataset. + - "nnunet_trainer": str, optional, name of the nnU-Net trainer (default is "nnUNetTrainer"). + - "nnunet_plans": str, optional, name of the nnU-Net plans (default is "nnUNetPlans"). + bundle_root_folder : str + Path to the root folder of the MONAI bundle. + fold : int, optional + Fold number for cross-validation (default is 0). + + Returns + ------- + None + """ + from odict import odict + + nnunet_trainer: str = "nnUNetTrainer" + nnunet_plans: str = "nnUNetPlans" + + if "nnunet_trainer" in nnunet_config: + nnunet_trainer = nnunet_config["nnunet_trainer"] + + if "nnunet_plans" in nnunet_config: + nnunet_plans = nnunet_config["nnunet_plans"] + + from nnunetv2.training.logging.nnunet_logger import nnUNetLogger + from nnunetv2.utilities.dataset_name_id_conversion import maybe_convert_to_dataset_name + + def subfiles( + folder: Union[str, Path], prefix: Optional[str] = None, suffix: Optional[str] = None, sort: bool = True + ) -> list[str]: + res = [ + i.name + for i in Path(folder).iterdir() + if i.is_file() + and (prefix is None or i.name.startswith(prefix)) + and (suffix is None or i.name.endswith(suffix)) + ] + if sort: + res.sort() + return res + + nnunet_model_folder: Path = Path(os.environ["nnUNet_results"]).joinpath( + maybe_convert_to_dataset_name(nnunet_config["dataset_name_or_id"]), + f"{nnunet_trainer}__{nnunet_plans}__3d_fullres", + ) + + nnunet_preprocess_model_folder: Path = Path(os.environ["nnUNet_preprocessed"]).joinpath( + maybe_convert_to_dataset_name(nnunet_config["dataset_name_or_id"]) + ) + + Path(nnunet_model_folder).joinpath(f"fold_{fold}").mkdir(parents=True, exist_ok=True) + + nnunet_checkpoint: dict = torch.load( + f"{bundle_root_folder}/models/{NNUNET_CHECKPOINT_FILENAME}", weights_only=False + ) + latest_checkpoints: list[str] = subfiles( + Path(bundle_root_folder).joinpath("models", f"fold_{fold}"), prefix="checkpoint_epoch", sort=True + ) + epochs: list[int] = [] + for latest_checkpoint in latest_checkpoints: + epochs.append(int(latest_checkpoint[len("checkpoint_epoch=") : -len(".pt")])) + + epochs.sort() + final_epoch: int = epochs[-1] + monai_last_checkpoint: dict = torch.load( + f"{bundle_root_folder}/models/fold_{fold}/checkpoint_epoch={final_epoch}.pt", weights_only=False + ) + + best_checkpoints: list[str] = subfiles( + Path(bundle_root_folder).joinpath("models", f"fold_{fold}"), prefix="checkpoint_key_metric", sort=True + ) + key_metrics: list[str] = [] + for best_checkpoint in best_checkpoints: + key_metrics.append(str(best_checkpoint[len("checkpoint_key_metric=") : -len(".pt")])) + + key_metrics.sort() + best_key_metric: str = key_metrics[-1] + monai_best_checkpoint: dict = torch.load( + f"{bundle_root_folder}/models/fold_{fold}/checkpoint_key_metric={best_key_metric}.pt", weights_only=False + ) + + if "optimizer_state" in monai_last_checkpoint: + nnunet_checkpoint["optimizer_state"] = monai_last_checkpoint["optimizer_state"] + + nnunet_checkpoint["network_weights"] = odict() + + for key in monai_last_checkpoint["network_weights"]: + nnunet_checkpoint["network_weights"][key] = monai_last_checkpoint["network_weights"][key] + + nnunet_checkpoint["current_epoch"] = final_epoch + nnunet_checkpoint["logging"] = nnUNetLogger().get_checkpoint() + nnunet_checkpoint["_best_ema"] = 0 + nnunet_checkpoint["grad_scaler_state"] = None + + torch.save(nnunet_checkpoint, Path(nnunet_model_folder).joinpath(f"fold_{fold}", "checkpoint_final.pth")) + + nnunet_checkpoint["network_weights"] = odict() + + if "optimizer_state" in monai_last_checkpoint: + nnunet_checkpoint["optimizer_state"] = monai_best_checkpoint["optimizer_state"] + + for key in monai_best_checkpoint["network_weights"]: + nnunet_checkpoint["network_weights"][key] = monai_best_checkpoint["network_weights"][key] + + torch.save(nnunet_checkpoint, Path(nnunet_model_folder).joinpath(f"fold_{fold}", "checkpoint_best.pth")) + + if not os.path.exists(os.path.join(nnunet_model_folder, DATASET_JSON_FILENAME)): + shutil.copy(f"{bundle_root_folder}/models/jsonpkls/{DATASET_JSON_FILENAME}", nnunet_model_folder) + if not os.path.exists(os.path.join(nnunet_model_folder, PLANS_JSON_FILENAME)): + shutil.copy(f"{bundle_root_folder}/models/jsonpkls/{PLANS_JSON_FILENAME}", nnunet_model_folder) + if not os.path.exists(os.path.join(nnunet_model_folder, "dataset_fingerprint.json")): + shutil.copy(f"{nnunet_preprocess_model_folder}/dataset_fingerprint.json", nnunet_model_folder) + if not os.path.exists(os.path.join(nnunet_model_folder, NNUNET_CHECKPOINT_FILENAME)): + shutil.copy(f"{bundle_root_folder}/models/{NNUNET_CHECKPOINT_FILENAME}", nnunet_model_folder) + + +# This function loads a nnUNet network from the provided plans and dataset files. +# It initializes the network architecture and loads the model weights if a checkpoint is provided. +def get_network_from_nnunet_plans( + plans_file: str, + dataset_file: str, + configuration: str, + model_ckpt: Optional[str] = None, + model_key_in_ckpt: str = "model", +) -> Union[torch.nn.Module, Any]: + """ + Load and initialize a nnUNet network based on nnUNet plans and configuration. + + Parameters + ---------- + plans_file : str + Path to the JSON file containing the nnUNet plans. + dataset_file : str + Path to the JSON file containing the dataset information. + configuration : str + The configuration name to be used from the plans. + model_ckpt : Optional[str], optional + Path to the model checkpoint file. If None, the network is returned without loading weights (default is None). + model_key_in_ckpt : str, optional + The key in the checkpoint file that contains the model state dictionary (default is "model"). + + Returns + ------- + network : torch.nn.Module + The initialized neural network, with weights loaded if `model_ckpt` is provided. + """ + from batchgenerators.utilities.file_and_folder_operations import load_json + from nnunetv2.utilities.get_network_from_plans import get_network_from_plans + from nnunetv2.utilities.label_handling.label_handling import determine_num_input_channels + from nnunetv2.utilities.plans_handling.plans_handler import PlansManager + + plans = load_json(plans_file) + dataset_json = load_json(dataset_file) + + plans_manager = PlansManager(plans) + configuration_manager = plans_manager.get_configuration(configuration) + num_input_channels = determine_num_input_channels(plans_manager, configuration_manager, dataset_json) + label_manager = plans_manager.get_label_manager(dataset_json) + + enable_deep_supervision = True + + network = get_network_from_plans( + configuration_manager.network_arch_class_name, + configuration_manager.network_arch_init_kwargs, + configuration_manager.network_arch_init_kwargs_req_import, + num_input_channels, + label_manager.num_segmentation_heads, + allow_init=True, + deep_supervision=enable_deep_supervision, + ) + + if model_ckpt is None: + return network + else: + state_dict = torch.load(model_ckpt, weights_only=False) + network.load_state_dict(state_dict[model_key_in_ckpt]) + return network + + +def get_nnunet_trainer( + dataset_name_or_id: Union[str, int], + configuration: str, + fold: Union[int, str], + trainer_class_name: str = "nnUNetTrainer", + plans_identifier: str = "nnUNetPlans", + use_compressed_data: bool = False, + continue_training: bool = False, + only_run_validation: bool = False, + disable_checkpointing: bool = False, + device: str = "cuda", + pretrained_model: Optional[str] = None, +) -> Any: # type: ignore + """ + Get the nnUNet trainer instance based on the provided configuration. + The returned nnUNet trainer can be used to initialize the SupervisedTrainer for training, including the network, + optimizer, loss function, DataLoader, etc. + + Example:: + + from monai.apps import SupervisedTrainer + from monai.bundle.nnunet import get_nnunet_trainer + + dataset_name_or_id = 'Task009_Spleen' + fold = 0 + configuration = '3d_fullres' + nnunet_trainer = get_nnunet_trainer(dataset_name_or_id, configuration, fold) + + trainer = SupervisedTrainer( + device=nnunet_trainer.device, + max_epochs=nnunet_trainer.num_epochs, + train_data_loader=nnunet_trainer.dataloader_train, + network=nnunet_trainer.network, + optimizer=nnunet_trainer.optimizer, + loss_function=nnunet_trainer.loss_function, + epoch_length=nnunet_trainer.num_iterations_per_epoch, + ) + + Parameters + ---------- + dataset_name_or_id : Union[str, int] + The name or ID of the dataset to be used. + configuration : str + The configuration name for the training. + fold : Union[int, str] + The fold number or 'all' for cross-validation. + trainer_class_name : str, optional + The class name of the trainer to be used. Default is 'nnUNetTrainer'. + For a complete list of supported trainers, check: + https://github.com/MIC-DKFZ/nnUNet/tree/master/nnunetv2/training/nnUNetTrainer/variants + plans_identifier : str, optional + Identifier for the plans to be used. Default is 'nnUNetPlans'. + use_compressed_data : bool, optional + Whether to use compressed data. Default is False. + continue_training : bool, optional + Whether to continue training from a checkpoint. Default is False. + only_run_validation : bool, optional + Whether to only run validation. Default is False. + disable_checkpointing : bool, optional + Whether to disable checkpointing. Default is False. + device : str, optional + The device to be used for training. Default is 'cuda'. + pretrained_model : Optional[str], optional + Path to the pretrained model file. + + Returns + ------- + nnunet_trainer : object + The nnUNet trainer instance. + """ + # From nnUNet/nnunetv2/run/run_training.py#run_training + if isinstance(fold, str): + if fold != "all": + try: + fold = int(fold) + except ValueError as e: + print( + f'Unable to convert given value for fold to int: {fold}. fold must bei either "all" or an integer!' + ) + raise e + + from nnunetv2.run.run_training import get_trainer_from_args, maybe_load_checkpoint + + nnunet_trainer = get_trainer_from_args( + str(dataset_name_or_id), + configuration, + fold, + trainer_class_name, + plans_identifier, + device=torch.device(device), + ) + if disable_checkpointing: + nnunet_trainer.disable_checkpointing = disable_checkpointing + + assert not (continue_training and only_run_validation), "Cannot set --c and --val flag at the same time. Dummy." + + maybe_load_checkpoint(nnunet_trainer, continue_training, only_run_validation) + nnunet_trainer.on_train_start() # Added to Initialize Trainer + if torch.cuda.is_available(): + cudnn.deterministic = False + cudnn.benchmark = True + + if pretrained_model is not None: + state_dict = torch.load(pretrained_model, weights_only=False) + if "network_weights" in state_dict: + nnunet_trainer.network._orig_mod.load_state_dict(state_dict["network_weights"]) + return nnunet_trainer + + +def get_nnunet_monai_predictor( + model_folder: Union[str, Path], + model_name: str = "model.pt", + dataset_json: dict = None, + plans: dict = None, + nnunet_config: dict = None, + save_probabilities: bool = False, + save_files: bool = False, + use_folds: Optional[Union[int, str]] = None, +) -> ModelnnUNetWrapper: + """ + Initializes and returns a `nnUNetMONAIModelWrapper` containing the corresponding `nnUNetPredictor`. + The model folder should contain the following files, created during training: + + - dataset.json: from the nnUNet results folder + - plans.json: from the nnUNet results folder + - nnunet_checkpoint.pth: The nnUNet checkpoint file, containing the nnUNet training configuration + - model.pt: The checkpoint file containing the model weights. + + The returned wrapper object can be used for inference with MONAI framework: + Example:: + + from monai.bundle.nnunet import get_nnunet_monai_predictor + + model_folder = 'path/to/monai_bundle/model' + model_name = 'model.pt' + wrapper = get_nnunet_monai_predictor(model_folder, model_name) + + # Perform inference + input_data = ... + output = wrapper(input_data) + + + Parameters + ---------- + model_folder : Union[str, Path] + The folder where the model is stored. + model_name : str, optional + The name of the model file, by default "model.pt". + dataset_json : dict, optional + The dataset JSON file containing dataset information. + plans : dict, optional + The plans JSON file containing model configuration. + nnunet_config : dict, optional + The nnUNet configuration dictionary containing model parameters. + + Returns + ------- + ModelnnUNetWrapper + A wrapper object that contains the nnUNetPredictor and the loaded model. + """ + + from nnunetv2.inference.predict_from_raw_data import nnUNetPredictor + + predictor = nnUNetPredictor( + tile_step_size=0.5, + use_gaussian=True, + use_mirroring=True, + device=torch.device("cuda", 0), + verbose=True, + verbose_preprocessing=False, + allow_tqdm=True, + ) + # initializes the network architecture, loads the checkpoint + print("nnunet_predictor: Model Folder: ", model_folder) + print("nnunet_predictor: Model name: ", model_name) + print("nnunet_predictor: use_folds: ", use_folds) + wrapper = ModelnnUNetWrapper( + predictor, + model_folder=model_folder, + checkpoint_name=model_name, + dataset_json=dataset_json, + plans=plans, + nnunet_config=nnunet_config, + save_probabilities=save_probabilities, + save_files=save_files, + use_folds=use_folds, + ) + return wrapper + + +def get_nnunet_monai_predictors_for_ensemble( + model_list: list, + model_path: Union[str, Path], + model_name: str = "model.pt", + use_folds: Optional[Union[int, str]] = None, +) -> Tuple[ModelnnUNetWrapper, ...]: + network_list = [] + for model_config in model_list: + model_folder = Path(model_path).joinpath(model_config) + network_list.append( + get_nnunet_monai_predictor( + model_folder=model_folder, + model_name=model_name, + save_probabilities=True, + save_files=True, + use_folds=use_folds, + ) + ) + return tuple(network_list) + + +from typing import Dict, List + +from nnunetv2.ensembling.ensemble import average_probabilities +from nnunetv2.utilities.plans_handling.plans_handler import PlansManager + +from monai.config import KeysCollection +from monai.transforms import MapTransform + + +class EnsembleProbabilitiesToSegmentation(MapTransform): + """ + MONAI transform that loads .npz probability files from metadata['saved_file'] for a given key, + averages them, and converts to final segmentation using nnU-Net's LabelManager. + Returns a MetaTensor segmentation result (instead of saving to disk). + """ + + def __init__( + self, + keys: KeysCollection, + dataset_json_path: str, + plans_json_path: str, + allow_missing_keys: bool = False, + output_key: str = "pred", + ): + super().__init__(keys, allow_missing_keys) + + # Load required nnU-Net configs + self.plans_manager = PlansManager(plans_json_path) + self.dataset_json = self._load_json(dataset_json_path) + self.label_manager = self.plans_manager.get_label_manager(self.dataset_json) + self.output_key = output_key + + def _load_json(self, path: str) -> Dict: + import json + + with open(path, "r") as f: + return json.load(f) + + def __call__(self, data: Dict) -> Dict: + d = dict(data) + all_files = [] + for key in self.keys: + meta = d[key].meta if isinstance(d[key], MetaTensor) else d.get("meta", {}) + saved_file = meta.get("saved_file", None) + + # Support multiple files for ensemble + if isinstance(saved_file, str): + saved_file = [saved_file] + elif not isinstance(saved_file, list): + raise ValueError(f"'saved_file' in meta must be str or List[str], got {type(saved_file)}") + + for f in saved_file: + if not os.path.exists(f): + raise FileNotFoundError(f"Probability file not found: {f}") + all_files.append(f) + + print("All files to average: ", all_files) + # Step 1: average probabilities + avg_probs = average_probabilities(all_files) + + # Step 2: convert to segmentation + segmentation = self.label_manager.convert_logits_to_segmentation(avg_probs) # shape: (H, W, D) + + # Step 3: wrap as MetaTensor and attach meta + seg_tensor = MetaTensor(segmentation[None].astype(np.uint8)) # add channel dim + seg_tensor.meta = dict(meta) + + # Replace the key or store in new key + d[self.output_key] = seg_tensor + return d + + +class ModelnnUNetWrapper(torch.nn.Module): + """ + A wrapper class for nnUNet model integration with MONAI framework. + The wrapper can be use to integrate the nnUNet Bundle within MONAI framework for inference. + + Parameters + ---------- + predictor : nnUNetPredictor + The nnUNet predictor object used for inference. + model_folder : Union[str, Path] + The folder path where the model and related files are stored. + model_name : str, optional + The name of the model file, by default "model.pt". + dataset_json : dict, optional + The dataset JSON file containing dataset information. + plans : dict, optional + The plans JSON file containing model configuration. + nnunet_config : dict, optional + The nnUNet configuration dictionary containing model parameters. + + Attributes + ---------- + predictor : nnUNetPredictor + The nnUNet predictor object used for inference. + network_weights : torch.nn.Module + The network weights of the model. + + Notes + ----- + This class integrates nnUNet model with MONAI framework by loading necessary configurations, + restoring network architecture, and setting up the predictor for inference. + """ + + def __init__( + self, + predictor: object, + model_folder: Union[str, Path], + checkpoint_name: str = None, + dataset_json: dict = None, + plans: dict = None, + nnunet_config: dict = None, + save_probabilities: bool = False, + save_files: bool = False, + tmp_dir: str = "tmp", + use_folds: Union[int, str, Tuple[Union[int, str], ...], List[Union[int, str]]] = None, + ): + + super().__init__() + self.predictor = predictor + + if not checkpoint_name: + raise ValueError("Model name is required. Please provide a valid model name.") + + self.tmp_dir = tmp_dir + self.save_probabilities = save_probabilities + self.save_files = save_files + + # Set up model paths + model_training_output_dir = model_folder + model_parent_dir = Path(model_training_output_dir).parent + + # Import required modules + from nnunetv2.utilities.plans_handling.plans_handler import PlansManager + + # Load dataset and plans if not provided + if dataset_json is None: + dataset_json = load_json(join(Path(model_parent_dir), "jsonpkls", DATASET_JSON_FILENAME)) + if plans is None: + plans = load_json(join(Path(model_parent_dir), "jsonpkls", PLANS_JSON_FILENAME)) + + plans_manager = PlansManager(plans) + parameters = [] + + # Get configuration from nnunet_checkpoint.pth or provided config + if nnunet_config is None: + checkpoint_path = join(Path(model_training_output_dir), NNUNET_CHECKPOINT_FILENAME) + if not os.path.exists(checkpoint_path): + raise ValueError( + f"Checkpoint file not found at {checkpoint_path}. Please ensure the model is trained and the checkpoint exists." + ) + + checkpoint = torch.load(checkpoint_path, weights_only=False, map_location=torch.device("cpu")) + trainer_name = checkpoint["trainer_name"] + configuration_name = checkpoint["init_args"]["configuration"] + inference_allowed_mirroring_axes = ( + checkpoint["inference_allowed_mirroring_axes"] + if "inference_allowed_mirroring_axes" in checkpoint.keys() + else None + ) + else: + trainer_name = nnunet_config["trainer_name"] + configuration_name = nnunet_config["configuration"] + inference_allowed_mirroring_axes = nnunet_config["inference_allowed_mirroring_axes"] + + # Store configuration name + self.configuration_name = configuration_name + + # Handle folds + if isinstance(use_folds, str) or isinstance(use_folds, int): + use_folds = [use_folds] + + if use_folds is None: + use_folds = self.predictor.auto_detect_available_folds(model_training_output_dir, checkpoint_name) + + # Load model parameters from each fold + for f in use_folds: + f = int(f) if f != "all" else f + fold_checkpoint_path = join(model_training_output_dir, f"fold_{f}", checkpoint_name) + monai_checkpoint = torch.load(fold_checkpoint_path, map_location=torch.device("cpu"), weights_only=False) + + if "network_weights" in monai_checkpoint.keys(): + parameters.append(monai_checkpoint["network_weights"]) + else: + parameters.append(monai_checkpoint) + + # Get configuration manager and setup network + configuration_manager = plans_manager.get_configuration(configuration_name) + + # Import required nnUNet modules + import nnunetv2 + from nnunetv2.utilities.find_class_by_name import recursive_find_python_class + from nnunetv2.utilities.label_handling.label_handling import determine_num_input_channels + + # Determine input channels and find trainer class + num_input_channels = determine_num_input_channels(plans_manager, configuration_manager, dataset_json) + trainer_class = recursive_find_python_class( + join(nnunetv2.__path__[0], "training", "nnUNetTrainer"), trainer_name, "nnunetv2.training.nnUNetTrainer" + ) + + if trainer_class is None: + raise RuntimeError(f"Unable to locate trainer class {trainer_name} in nnunetv2.training.nnUNetTrainer.") + + # Build network architecture + network = trainer_class.build_network_architecture( + configuration_manager.network_arch_class_name, + configuration_manager.network_arch_init_kwargs, + configuration_manager.network_arch_init_kwargs_req_import, + num_input_channels, + plans_manager.get_label_manager(dataset_json).num_segmentation_heads, + enable_deep_supervision=False, + ) + + # Configure predictor with all required settings + predictor.plans_manager = plans_manager + predictor.configuration_manager = configuration_manager + predictor.list_of_parameters = parameters + predictor.network = network + predictor.dataset_json = dataset_json + predictor.trainer_name = trainer_name + predictor.allowed_mirroring_axes = inference_allowed_mirroring_axes + predictor.label_manager = plans_manager.get_label_manager(dataset_json) + + # Store network weights reference + self.network_weights = self.predictor.network + + def forward(self, x: MetaTensor) -> MetaTensor: + """ + Forward pass for the nnUNet model. + + Args: + x (MetaTensor): Input tensor for inference. + + Returns: + MetaTensor: The output tensor with the same metadata as the input. + + Raises: + TypeError: If the input is not a MetaTensor. + """ + if not isinstance(x, MetaTensor): + raise TypeError("Input must be a MetaTensor.") + + # Extract spatial shape from input + spatial_shape = list(x.shape[-3:]) # [H, W, D] or [X, Y, Z] + + # Get spacing information from metadata + properties_or_list_of_properties = {} + + if "pixdim" in x.meta: + # Get spacing from pixdim + if x.meta["pixdim"].ndim == 1: + properties_or_list_of_properties["spacing"] = x.meta["pixdim"][1:4].tolist() + else: + properties_or_list_of_properties["spacing"] = x.meta["pixdim"][0][1:4].numpy().tolist() + + elif "affine" in x.meta: + # Get spacing from affine matrix + affine = x.meta["affine"][0].cpu().numpy() if x.meta["affine"].ndim == 3 else x.meta["affine"].cpu().numpy() + spacing = np.array( + [ + np.sqrt(np.sum(affine[:3, 0] ** 2)), + np.sqrt(np.sum(affine[:3, 1] ** 2)), + np.sqrt(np.sum(affine[:3, 2] ** 2)), + ] + ) + properties_or_list_of_properties["spacing"] = spacing + else: + # Default spacing if no metadata available + properties_or_list_of_properties["spacing"] = [1.0, 1.0, 1.0] + + # Add spatial shape to properties + properties_or_list_of_properties["spatial_shape"] = spatial_shape + + # Convert input tensor to numpy array + image_or_list_of_images = x.cpu().numpy()[0, :] + + # Setup output file path if saving enabled + outfile = None + if self.save_files: + # Get original filename from metadata + infile = x.meta["filename_or_obj"] + if isinstance(infile, list): + infile = infile[0] + + # Create output path + outfile_name = os.path.basename(infile).split(".")[0] + outfolder = Path(self.tmp_dir).joinpath(self.configuration_name) + os.makedirs(outfolder, exist_ok=True) + outfile = str(Path(outfolder).joinpath(outfile_name)) + + # Extract 4x4 affine matrix for SimpleITK compatibility + if "affine" in x.meta: + # Get affine matrix with proper shape + if x.meta["affine"].shape == (1, 4, 4): + affine = x.meta["affine"][0].cpu().numpy() + elif x.meta["affine"].shape == (4, 4): + affine = x.meta["affine"].cpu().numpy() + else: + raise ValueError(f"Unexpected affine shape: {x.meta['affine'].shape}") + + # Calculate spacing, origin and direction + spacing = tuple(np.linalg.norm(affine[:3, i]) for i in range(3)) + origin = tuple(float(v) for v in affine[:3, 3]) + direction_matrix = affine[:3, :3] / spacing + direction = tuple(direction_matrix.flatten().round(6)) + + # Add to properties dict for SimpleITK + properties_or_list_of_properties["sitk_stuff"] = { + "spacing": spacing, + "origin": origin, + "direction": direction, + } + # Handle cascade models by loading segmentation from previous stage + previous_segmentation = None + if self.configuration_name == "3d_cascade_fullres": + # For cascade models, we need the lowres prediction + lowres_predictions_folder = os.path.join(self.tmp_dir, "3d_lowres") + + if outfile: + seg_file = os.path.join(lowres_predictions_folder, outfile_name + ".nii.gz") + # Load the lowres segmentation from file + rw = self.predictor.plans_manager.image_reader_writer_class() + previous_segmentation, _ = rw.read_seg(seg_file) + + if previous_segmentation is None: + raise ValueError("Failed to load previous segmentation for cascade model.") + else: + raise ValueError("Output file name is required for 3d_cascade_fullres configuration.") + + # Run prediction using nnUNet predictor + prediction_output = self.predictor.predict_from_list_of_npy_arrays( + image_or_list_of_images, + previous_segmentation, + properties_or_list_of_properties, + save_probabilities=self.save_probabilities, + truncated_ofname=outfile, + num_processes=2, + num_processes_segmentation_export=2, + ) + + # Process prediction output based on save_files setting + if not self.save_files: + # Return the prediction output directly + out_tensors = [] + for out in prediction_output: + # Add batch and channel dimensions + out_tensors.append(torch.from_numpy(np.expand_dims(np.expand_dims(out, 0), 0))) + # Concatenate along batch dimension + out_tensor = torch.cat(out_tensors, 0) + + return MetaTensor(out_tensor, meta=x.meta) + else: + # Return a placeholder tensor with file path in metadata + saved_path = outfile + ".npz" + if not os.path.exists(saved_path): + raise FileNotFoundError(f"Expected saved file not found: {saved_path}") + + # Create placeholder tensor with same spatial dimensions + shape = properties_or_list_of_properties["spatial_shape"] + dummy_tensor = torch.zeros((1, 1, *shape), dtype=torch.float32) + + # Create metadata with file path + meta_with_filepath = dict(x.meta) + meta_with_filepath["saved_file"] = saved_path + + return MetaTensor(dummy_tensor, meta=meta_with_filepath) diff --git a/examples/apps/cchmc_nnunet_fifteen_ckpt_app/my_app/nnunet_seg_operator.py b/examples/apps/cchmc_nnunet_fifteen_ckpt_app/my_app/nnunet_seg_operator.py index ad52fb08..cb7d57b4 100644 --- a/examples/apps/cchmc_nnunet_fifteen_ckpt_app/my_app/nnunet_seg_operator.py +++ b/examples/apps/cchmc_nnunet_fifteen_ckpt_app/my_app/nnunet_seg_operator.py @@ -1,426 +1,428 @@ -# Copyright 2021-2025 MONAI Consortium -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import logging -from pathlib import Path -from typing import Dict, List - -import torch -from numpy import int16, uint8 - -# Import custom transforms -from post_transforms import CalculateVolumeFromMaskd, ExtractVolumeToTextd, LabelToContourd, OverlayImageLabeld - -# Import from MONAI deploy -from monai.deploy.utils.importutil import optional_import - -Dataset, _ = optional_import("monai.data", name="Dataset") -DataLoader, _ = optional_import("monai.data", name="DataLoader") -import os - -# Try importing from local version first, then fall back to MONAI if not available -# This approach works regardless of how the file is imported (as module or script) -import sys - -# Add current directory to path to ensure the local module is found -current_dir = os.path.dirname(os.path.abspath(__file__)) -if current_dir not in sys.path: - sys.path.insert(0, current_dir) - -try: - # Try local version first - from nnunet_bundle import EnsembleProbabilitiesToSegmentation, get_nnunet_monai_predictors_for_ensemble -except ImportError: - # Fall back to MONAI version if local version fails - from monai.apps.nnunet.nnunet_bundle import ( - get_nnunet_monai_predictors_for_ensemble, - EnsembleProbabilitiesToSegmentation, - ) - -from monai.deploy.core import AppContext, Fragment, Model, Operator, OperatorSpec -from monai.deploy.operators.monai_seg_inference_operator import InMemImageReader - -# Import MONAI transforms -from monai.transforms import Compose, KeepLargestConnectedComponentd, Lambdad, LoadImaged, SaveImaged, Transposed - - -class NNUnetSegOperator(Operator): - """ - Operator that performs segmentation inference with nnU-Net ensemble models. - - This operator loads and runs multiple nnU-Net models in an ensemble fashion, - processes the results, and outputs segmentation masks, volume measurements, - and visualization overlays. - """ - - def __init__( - self, - fragment: Fragment, - *args, - app_context: AppContext, - model_path: Path, - output_folder: Path = Path.cwd() / "output", - output_labels: List[int] = None, - model_list: List[str] = None, - model_name: str = "best_model.pt", - save_probabilities: bool = False, - save_files: bool = False, - **kwargs, - ): - """ - Initialize the nnU-Net segmentation operator. - - Args: - fragment: The fragment this operator belongs to - app_context: The application context - model_path: Path to the nnU-Net model directory - output_folder: Directory to save output files - output_labels: List of label indices to include in outputs - model_list: List of nnU-Net model types to use in ensemble - model_name: Name of the model checkpoint file - save_probabilities: Whether to save probability maps - save_files: Whether to save intermediate files - """ - # Initialize logger - self._logger = logging.getLogger(f"{__name__}.{type(self).__name__}") - - # Set up data keys - self._input_dataset_key = "image" - self._pred_dataset_key = "pred" - - # Model configuration - self.model_path = self._find_model_file_path(model_path) - self.model_list = model_list or ["3d_fullres", "3d_lowres", "3d_cascade_fullres"] - self.model_name = model_name - self.save_probabilities = save_probabilities - self.save_files = save_files - self.prediction_keys = [f"pred_{model}" for model in self.model_list] - - # Output configuration - self.output_folder = output_folder - self.output_folder.mkdir(parents=True, exist_ok=True) - self.output_labels = output_labels if output_labels is not None else [1] - - # Store app context - self.app_context = app_context - - # I/O names for operator - self.input_name_image = "image" - self.output_name_seg = "seg_image" - self.output_name_text = "result_text" - self.output_name_sc_path = "dicom_sc_dir" - - # Call parent constructor - super().__init__(fragment, *args, **kwargs) - - def _find_model_file_path(self, model_path: Path) -> Path: - """ - Validates and returns the model directory path. - - Args: - model_path: Path to the model directory - - Returns: - Validated Path object to the model directory - - Raises: - ValueError: If model_path is invalid or doesn't exist - """ - # When executing as MAP, model_path is typically a directory (/opt/holoscan/models) - # nnU-Net expects a directory structure with model subdirectories - if not model_path: - raise ValueError("Model path not provided") - - if not model_path.is_dir(): - raise ValueError(f"Model path should be a directory, got: {model_path}") - - return model_path - - def _load_nnunet_models(self): - """ - Loads nnU-Net ensemble models using MONAI's nnU-Net bundle functionality - and registers them in the app_context. - - Raises: - RuntimeError: If model loading fails - """ - # Determine device based on availability - _device = torch.device("cuda" if torch.cuda.is_available() else "cpu") - self._logger.info(f"Loading nnU-Net ensemble models from: {self.model_path} on {_device}") - - try: - # Get nnU-Net ensemble predictors (returns tuple of ModelnnUNetWrapper objects) - network_def = get_nnunet_monai_predictors_for_ensemble( - model_list=self.model_list, model_path=str(self.model_path), model_name=self.model_name - ) - - # Move models to device and set to evaluation mode - ensemble_predictors = [] - for predictor in network_def: - predictor.to(_device) - predictor.eval() - ensemble_predictors.append(predictor) - - # Create a MONAI Model object to encapsulate the ensemble - loaded_model = Model(self.model_path, name="nnunet_ensemble") - loaded_model.predictor = ensemble_predictors - - # Register the loaded Model object in the application context - self.app_context.models = loaded_model - - self._logger.info(f"Successfully loaded {len(ensemble_predictors)} nnU-Net models: {self.model_list}") - - except Exception as e: - self._logger.error(f"Failed to load nnU-Net models: {str(e)}") - raise - - def setup(self, spec: OperatorSpec): - """ - Sets up the operator by configuring input and output specifications. - - Args: - spec: The operator specification to configure - """ - # Define input - expects a DICOM image - spec.input(self.input_name_image) - - # Define outputs: - # 1. Segmentation output (for DICOM SEG) - spec.output(self.output_name_seg) - - # 2. Measurement results text (for DICOM SR) - spec.output(self.output_name_text) - - # 3. Directory path for visualization overlays (for DICOM SC) - spec.output(self.output_name_sc_path) - - def _convert_dicom_metadata_datatype(self, metadata: Dict) -> Dict: - """ - Converts pydicom-specific metadata types to corresponding native Python types. - - This addresses an issue with pydicom types in metadata for images converted from DICOM series. - Reference issue: https://github.com/Project-MONAI/monai-deploy-app-sdk/issues/185 - - Args: - metadata: Dictionary containing image metadata - - Returns: - Dictionary with converted metadata types - """ - if not metadata: - return metadata - - # Convert known metadata attributes to appropriate Python types - known_conversions = {"SeriesInstanceUID": str, "row_pixel_spacing": float, "col_pixel_spacing": float} - - for key, conversion_func in known_conversions.items(): - if key in metadata: - try: - metadata[key] = conversion_func(metadata[key]) - except Exception: - self._logger.warning(f"Failed to convert {key} to {conversion_func.__name__}") - - # Log converted metadata at debug level - if self._logger.isEnabledFor(logging.DEBUG): - self._logger.debug("Converted Image object metadata:") - for k, v in metadata.items(): - self._logger.debug(f"{k}: {v}, type {type(v)}") - - return metadata - - def compute(self, op_input, op_output, context): - """ - Main compute method that processes input, runs inference, and emits outputs. - """ - # Get input image - input_image = op_input.receive(self.input_name_image) - if not input_image: - raise ValueError("Input image is not found.") - - # Load nnU-Net ensemble models - self._logger.info("Loading nnU-Net ensemble models") - self._load_nnunet_models() - - # Perform inference using our custom implementation - data_dict = self.compute_impl(input_image, context)[0] - - # Squeeze the batch dimension - data_dict[self._pred_dataset_key] = data_dict[self._pred_dataset_key].squeeze(0) - data_dict[self._input_dataset_key] = data_dict[self._input_dataset_key].squeeze(0) - - # Squeeze the batch dimension of affine meta data - data_dict[self._pred_dataset_key].affine = data_dict[self._pred_dataset_key].affine.squeeze(0) - data_dict[self._input_dataset_key].affine = data_dict[self._input_dataset_key].affine.squeeze(0) - - # Log shape information - self._logger.info(f"Segmentation prediction shape: {data_dict[self._pred_dataset_key].shape}") - self._logger.info(f"Segmentation image shape: {data_dict[self._input_dataset_key].shape}") - - # Get post transforms for MAP outputs - post_transforms = self.post_process_stage2() - - # Apply postprocessing transforms for MAP outputs - data_dict = post_transforms(data_dict) - - self._logger.info( - f"Segmentation prediction shape after post processing: {data_dict[self._pred_dataset_key].shape}" - ) - - # DICOM SEG output - op_output.emit(data_dict[self._pred_dataset_key].squeeze(0).numpy().astype(uint8), self.output_name_seg) - - # DICOM SR output - extract result text - result_text = self.get_result_text_from_transforms(post_transforms) - if not result_text: - raise ValueError("Result text could not be generated.") - - self._logger.info(f"Calculated Organ Volumes: {result_text}") - op_output.emit(result_text, self.output_name_text) - - # DICOM SC output - dicom_sc_dir = self.output_folder / "temp" - self._logger.info(f"Temporary DICOM SC saved at: {dicom_sc_dir}") - op_output.emit(dicom_sc_dir, self.output_name_sc_path) - - def pre_process(self, img_reader) -> Compose: - """Composes transforms for preprocessing the input image before predicting on nnU-Net models.""" - my_key = self._input_dataset_key - - return Compose( - [ - LoadImaged(keys=my_key, reader=img_reader, ensure_channel_first=True), - Transposed(keys=my_key, indices=[0, 3, 2, 1]), - ] - ) - - def compute_impl(self, input_image, context) -> List[Dict]: - """ - Performs the actual nnU-Net ensemble inference using ModelnnUNetWrapper. - This function handles the complete inference pipeline including preprocessing, - ensemble prediction, and postprocessing. - """ - - if not input_image: - raise ValueError("Input is None.") - - # Need to try to convert the data type of a few metadata attributes. - # input_img_metadata = self._convert_dicom_metadata_datatype(input_image.metadata()) - # Need to give a name to the image as in-mem Image obj has no name. - img_name = "Img_in_context" - - # This operator gets an in-memory Image object, so a specialized ImageReader is needed. - _reader = InMemImageReader(input_image) - - # Apply preprocessing transforms - pre_transforms = self.pre_process(_reader) - - # Create data dictionary - data_dict = {self._input_dataset_key: img_name} - - # Create dataset and dataloader - dataset = Dataset(data=[data_dict], transform=pre_transforms) - dataloader = DataLoader(dataset, batch_size=1, shuffle=False, num_workers=0) - - out_dict = [] - for d in dataloader: - preprocessed_image = d[self._input_dataset_key] - self._logger.info(f"Input shape: {preprocessed_image.shape}") - - # Get the loaded ensemble models from app context - if not hasattr(self.app_context, "models") or self.app_context.models is None: - raise RuntimeError("nnU-Net models not loaded. Call _load_nnunet_models first.") - - ensemble_predictors = self.app_context.models.predictor - - # Perform ensemble inference - self._logger.info("Running nnU-Net ensemble inference...") - - for i, predictor in enumerate(ensemble_predictors): - model_key = self.prediction_keys[i] - self._logger.info(f"Running inference with model: {model_key}") - - # Run inference with individual model - prediction = predictor(preprocessed_image) - d[model_key] = prediction - - self._logger.info("Inference complete, applying postprocessing...") - - # Apply postprocessing transforms (includes ensemble combination) - post_transforms1 = self.post_process_stage1() - d = post_transforms1(d) - out_dict.append(d) - return out_dict - - def post_process_stage1(self) -> Compose: - """Composes transforms for postprocessing the nnU-Net prediction results.""" - pred_key = self._pred_dataset_key - return Compose( - [ - # nnU-Net ensemble post-processing - EnsembleProbabilitiesToSegmentation( - keys=self.prediction_keys, - dataset_json_path=str(self.model_path / "jsonpkls/dataset.json"), - plans_json_path=str(self.model_path / "jsonpkls/plans.json"), - output_key=pred_key, - ), - # Add batch dimension to final prediction - Lambdad(keys=[pred_key], func=lambda x: x.unsqueeze(0)), - # Transpose dimensions back to original format - Transposed(keys=[self._input_dataset_key, pred_key], indices=(0, 1, 4, 3, 2)), - ] - ) - - def post_process_stage2(self) -> Compose: - """Composes transforms for postprocessing MAP outputs""" - pred_key = self._pred_dataset_key - - # Define labels for the segmentation output - labels = {"background": 0, "airway": 1} - - return Compose( - [ - # Keep only largest connected component for each label - KeepLargestConnectedComponentd(keys=pred_key, applied_labels=[1]), - # Calculate volume from segmentation mask - CalculateVolumeFromMaskd(keys=pred_key, label_names=labels), - # Extract volume data to text format - ExtractVolumeToTextd( - keys=[pred_key + "_volumes"], label_names=labels, output_labels=self.output_labels - ), - # Convert labels to contours - LabelToContourd(keys=pred_key, output_labels=self.output_labels), - # Create overlay of image and contours - OverlayImageLabeld(image_key=self._input_dataset_key, label_key=pred_key, overlay_key="overlay"), - # Save overlays as DICOM SC - SaveImaged( - keys="overlay", - output_ext=".dcm", - output_dir=self.output_folder / "temp", - separate_folder=False, - output_dtype=int16, - ), - ] - ) - - def get_result_text_from_transforms(self, post_transforms: Compose) -> str: - """ - Extracts result_text from the ExtractVolumeToTextd transform in the transform pipeline. - - Args: - post_transforms: Composed transforms that include ExtractVolumeToTextd - - Returns: - The extracted result text or empty string if not found - """ - for transform in post_transforms.transforms: - if isinstance(transform, ExtractVolumeToTextd): - return transform.result_text - return "" +# Copyright 2021-2025 MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +from pathlib import Path +from typing import Dict, List + +import torch +from numpy import int16, uint8 + +# Import custom transforms +from post_transforms import CalculateVolumeFromMaskd, ExtractVolumeToTextd, LabelToContourd, OverlayImageLabeld + +# Import from MONAI deploy +from monai.deploy.utils.importutil import optional_import + +Dataset, _ = optional_import("monai.data", name="Dataset") +DataLoader, _ = optional_import("monai.data", name="DataLoader") +import os + +# Try importing from local version first, then fall back to MONAI if not available +# This approach works regardless of how the file is imported (as module or script) +import sys + +# Add current directory to path to ensure the local module is found +current_dir = os.path.dirname(os.path.abspath(__file__)) +if current_dir not in sys.path: + sys.path.insert(0, current_dir) + +try: + # Try local version first + from nnunet_bundle import EnsembleProbabilitiesToSegmentation, get_nnunet_monai_predictors_for_ensemble +except ImportError: + # Fall back to MONAI version if local version fails + from monai.apps.nnunet.nnunet_bundle import ( + get_nnunet_monai_predictors_for_ensemble, + EnsembleProbabilitiesToSegmentation, + ) + +from monai.deploy.core import AppContext, Fragment, Model, Operator, OperatorSpec +from monai.deploy.operators.monai_seg_inference_operator import InMemImageReader + +# Import MONAI transforms +from monai.transforms import Compose, KeepLargestConnectedComponentd, Lambdad, LoadImaged, SaveImaged, Transposed + +DEFAULT_OUTPUT_FOLDER = Path.cwd() / "output" + + +class NNUnetSegOperator(Operator): + """ + Operator that performs segmentation inference with nnU-Net ensemble models. + + This operator loads and runs multiple nnU-Net models in an ensemble fashion, + processes the results, and outputs segmentation masks, volume measurements, + and visualization overlays. + """ + + def __init__( + self, + fragment: Fragment, + *args, + app_context: AppContext, + model_path: Path, + output_folder: Path = DEFAULT_OUTPUT_FOLDER, + output_labels: List[int] = None, + model_list: List[str] = None, + model_name: str = "best_model.pt", + save_probabilities: bool = False, + save_files: bool = False, + **kwargs, + ): + """ + Initialize the nnU-Net segmentation operator. + + Args: + fragment: The fragment this operator belongs to + app_context: The application context + model_path: Path to the nnU-Net model directory + output_folder: Directory to save output files + output_labels: List of label indices to include in outputs + model_list: List of nnU-Net model types to use in ensemble + model_name: Name of the model checkpoint file + save_probabilities: Whether to save probability maps + save_files: Whether to save intermediate files + """ + # Initialize logger + self._logger = logging.getLogger(f"{__name__}.{type(self).__name__}") + + # Set up data keys + self._input_dataset_key = "image" + self._pred_dataset_key = "pred" + + # Model configuration + self.model_path = self._find_model_file_path(model_path) + self.model_list = model_list or ["3d_fullres", "3d_lowres", "3d_cascade_fullres"] + self.model_name = model_name + self.save_probabilities = save_probabilities + self.save_files = save_files + self.prediction_keys = [f"pred_{model}" for model in self.model_list] + + # Output configuration + self.output_folder = output_folder if output_folder is not None else DEFAULT_OUTPUT_FOLDER + self.output_folder.mkdir(parents=True, exist_ok=True) + self.output_labels = output_labels if output_labels is not None else [1] + + # Store app context + self.app_context = app_context + + # I/O names for operator + self.input_name_image = "image" + self.output_name_seg = "seg_image" + self.output_name_text = "result_text" + self.output_name_sc_path = "dicom_sc_dir" + + # Call parent constructor + super().__init__(fragment, *args, **kwargs) + + def _find_model_file_path(self, model_path: Path) -> Path: + """ + Validates and returns the model directory path. + + Args: + model_path: Path to the model directory + + Returns: + Validated Path object to the model directory + + Raises: + ValueError: If model_path is invalid or doesn't exist + """ + # When executing as MAP, model_path is typically a directory (/opt/holoscan/models) + # nnU-Net expects a directory structure with model subdirectories + if not model_path: + raise ValueError("Model path not provided") + + if not model_path.is_dir(): + raise ValueError(f"Model path should be a directory, got: {model_path}") + + return model_path + + def _load_nnunet_models(self): + """ + Loads nnU-Net ensemble models using MONAI's nnU-Net bundle functionality + and registers them in the app_context. + + Raises: + RuntimeError: If model loading fails + """ + # Determine device based on availability + _device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + self._logger.info(f"Loading nnU-Net ensemble models from: {self.model_path} on {_device}") + + try: + # Get nnU-Net ensemble predictors (returns tuple of ModelnnUNetWrapper objects) + network_def = get_nnunet_monai_predictors_for_ensemble( + model_list=self.model_list, model_path=str(self.model_path), model_name=self.model_name + ) + + # Move models to device and set to evaluation mode + ensemble_predictors = [] + for predictor in network_def: + predictor.to(_device) + predictor.eval() + ensemble_predictors.append(predictor) + + # Create a MONAI Model object to encapsulate the ensemble + loaded_model = Model(self.model_path, name="nnunet_ensemble") + loaded_model.predictor = ensemble_predictors + + # Register the loaded Model object in the application context + self.app_context.models = loaded_model + + self._logger.info(f"Successfully loaded {len(ensemble_predictors)} nnU-Net models: {self.model_list}") + + except Exception as e: + self._logger.error(f"Failed to load nnU-Net models: {str(e)}") + raise + + def setup(self, spec: OperatorSpec): + """ + Sets up the operator by configuring input and output specifications. + + Args: + spec: The operator specification to configure + """ + # Define input - expects a DICOM image + spec.input(self.input_name_image) + + # Define outputs: + # 1. Segmentation output (for DICOM SEG) + spec.output(self.output_name_seg) + + # 2. Measurement results text (for DICOM SR) + spec.output(self.output_name_text) + + # 3. Directory path for visualization overlays (for DICOM SC) + spec.output(self.output_name_sc_path) + + def _convert_dicom_metadata_datatype(self, metadata: Dict) -> Dict: + """ + Converts pydicom-specific metadata types to corresponding native Python types. + + This addresses an issue with pydicom types in metadata for images converted from DICOM series. + Reference issue: https://github.com/Project-MONAI/monai-deploy-app-sdk/issues/185 + + Args: + metadata: Dictionary containing image metadata + + Returns: + Dictionary with converted metadata types + """ + if not metadata: + return metadata + + # Convert known metadata attributes to appropriate Python types + known_conversions = {"SeriesInstanceUID": str, "row_pixel_spacing": float, "col_pixel_spacing": float} + + for key, conversion_func in known_conversions.items(): + if key in metadata: + try: + metadata[key] = conversion_func(metadata[key]) + except Exception: + self._logger.warning(f"Failed to convert {key} to {conversion_func.__name__}") + + # Log converted metadata at debug level + if self._logger.isEnabledFor(logging.DEBUG): + self._logger.debug("Converted Image object metadata:") + for k, v in metadata.items(): + self._logger.debug(f"{k}: {v}, type {type(v)}") + + return metadata + + def compute(self, op_input, op_output, context): + """ + Main compute method that processes input, runs inference, and emits outputs. + """ + # Get input image + input_image = op_input.receive(self.input_name_image) + if not input_image: + raise ValueError("Input image is not found.") + + # Load nnU-Net ensemble models + self._logger.info("Loading nnU-Net ensemble models") + self._load_nnunet_models() + + # Perform inference using our custom implementation + data_dict = self.compute_impl(input_image, context)[0] + + # Squeeze the batch dimension + data_dict[self._pred_dataset_key] = data_dict[self._pred_dataset_key].squeeze(0) + data_dict[self._input_dataset_key] = data_dict[self._input_dataset_key].squeeze(0) + + # Squeeze the batch dimension of affine meta data + data_dict[self._pred_dataset_key].affine = data_dict[self._pred_dataset_key].affine.squeeze(0) + data_dict[self._input_dataset_key].affine = data_dict[self._input_dataset_key].affine.squeeze(0) + + # Log shape information + self._logger.info(f"Segmentation prediction shape: {data_dict[self._pred_dataset_key].shape}") + self._logger.info(f"Segmentation image shape: {data_dict[self._input_dataset_key].shape}") + + # Get post transforms for MAP outputs + post_transforms = self.post_process_stage2() + + # Apply postprocessing transforms for MAP outputs + data_dict = post_transforms(data_dict) + + self._logger.info( + f"Segmentation prediction shape after post processing: {data_dict[self._pred_dataset_key].shape}" + ) + + # DICOM SEG output + op_output.emit(data_dict[self._pred_dataset_key].squeeze(0).numpy().astype(uint8), self.output_name_seg) + + # DICOM SR output - extract result text + result_text = self.get_result_text_from_transforms(post_transforms) + if not result_text: + raise ValueError("Result text could not be generated.") + + self._logger.info(f"Calculated Organ Volumes: {result_text}") + op_output.emit(result_text, self.output_name_text) + + # DICOM SC output + dicom_sc_dir = self.output_folder / "temp" + self._logger.info(f"Temporary DICOM SC saved at: {dicom_sc_dir}") + op_output.emit(dicom_sc_dir, self.output_name_sc_path) + + def pre_process(self, img_reader) -> Compose: + """Composes transforms for preprocessing the input image before predicting on nnU-Net models.""" + my_key = self._input_dataset_key + + return Compose( + [ + LoadImaged(keys=my_key, reader=img_reader, ensure_channel_first=True), + Transposed(keys=my_key, indices=[0, 3, 2, 1]), + ] + ) + + def compute_impl(self, input_image, context) -> List[Dict]: + """ + Performs the actual nnU-Net ensemble inference using ModelnnUNetWrapper. + This function handles the complete inference pipeline including preprocessing, + ensemble prediction, and postprocessing. + """ + + if not input_image: + raise ValueError("Input is None.") + + # Need to try to convert the data type of a few metadata attributes. + # input_img_metadata = self._convert_dicom_metadata_datatype(input_image.metadata()) + # Need to give a name to the image as in-mem Image obj has no name. + img_name = "Img_in_context" + + # This operator gets an in-memory Image object, so a specialized ImageReader is needed. + _reader = InMemImageReader(input_image) + + # Apply preprocessing transforms + pre_transforms = self.pre_process(_reader) + + # Create data dictionary + data_dict = {self._input_dataset_key: img_name} + + # Create dataset and dataloader + dataset = Dataset(data=[data_dict], transform=pre_transforms) + dataloader = DataLoader(dataset, batch_size=1, shuffle=False, num_workers=0) + + out_dict = [] + for d in dataloader: + preprocessed_image = d[self._input_dataset_key] + self._logger.info(f"Input shape: {preprocessed_image.shape}") + + # Get the loaded ensemble models from app context + if not hasattr(self.app_context, "models") or self.app_context.models is None: + raise RuntimeError("nnU-Net models not loaded. Call _load_nnunet_models first.") + + ensemble_predictors = self.app_context.models.predictor + + # Perform ensemble inference + self._logger.info("Running nnU-Net ensemble inference...") + + for i, predictor in enumerate(ensemble_predictors): + model_key = self.prediction_keys[i] + self._logger.info(f"Running inference with model: {model_key}") + + # Run inference with individual model + prediction = predictor(preprocessed_image) + d[model_key] = prediction + + self._logger.info("Inference complete, applying postprocessing...") + + # Apply postprocessing transforms (includes ensemble combination) + post_transforms1 = self.post_process_stage1() + d = post_transforms1(d) + out_dict.append(d) + return out_dict + + def post_process_stage1(self) -> Compose: + """Composes transforms for postprocessing the nnU-Net prediction results.""" + pred_key = self._pred_dataset_key + return Compose( + [ + # nnU-Net ensemble post-processing + EnsembleProbabilitiesToSegmentation( + keys=self.prediction_keys, + dataset_json_path=str(self.model_path / "jsonpkls/dataset.json"), + plans_json_path=str(self.model_path / "jsonpkls/plans.json"), + output_key=pred_key, + ), + # Add batch dimension to final prediction + Lambdad(keys=[pred_key], func=lambda x: x.unsqueeze(0)), + # Transpose dimensions back to original format + Transposed(keys=[self._input_dataset_key, pred_key], indices=(0, 1, 4, 3, 2)), + ] + ) + + def post_process_stage2(self) -> Compose: + """Composes transforms for postprocessing MAP outputs""" + pred_key = self._pred_dataset_key + + # Define labels for the segmentation output + labels = {"background": 0, "airway": 1} + + return Compose( + [ + # Keep only largest connected component for each label + KeepLargestConnectedComponentd(keys=pred_key, applied_labels=[1]), + # Calculate volume from segmentation mask + CalculateVolumeFromMaskd(keys=pred_key, label_names=labels), + # Extract volume data to text format + ExtractVolumeToTextd( + keys=[pred_key + "_volumes"], label_names=labels, output_labels=self.output_labels + ), + # Convert labels to contours + LabelToContourd(keys=pred_key, output_labels=self.output_labels), + # Create overlay of image and contours + OverlayImageLabeld(image_key=self._input_dataset_key, label_key=pred_key, overlay_key="overlay"), + # Save overlays as DICOM SC + SaveImaged( + keys="overlay", + output_ext=".dcm", + output_dir=self.output_folder / "temp", + separate_folder=False, + output_dtype=int16, + ), + ] + ) + + def get_result_text_from_transforms(self, post_transforms: Compose) -> str: + """ + Extracts result_text from the ExtractVolumeToTextd transform in the transform pipeline. + + Args: + post_transforms: Composed transforms that include ExtractVolumeToTextd + + Returns: + The extracted result text or empty string if not found + """ + for transform in post_transforms.transforms: + if isinstance(transform, ExtractVolumeToTextd): + return transform.result_text + return "" diff --git a/examples/apps/cchmc_nnunet_fifteen_ckpt_app/my_app/post_transforms.py b/examples/apps/cchmc_nnunet_fifteen_ckpt_app/my_app/post_transforms.py index 813a508b..a53de889 100644 --- a/examples/apps/cchmc_nnunet_fifteen_ckpt_app/my_app/post_transforms.py +++ b/examples/apps/cchmc_nnunet_fifteen_ckpt_app/my_app/post_transforms.py @@ -1,390 +1,390 @@ -# Copyright 2021-2025 MONAI Consortium -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import json -import logging -import os -from typing import List, Optional - -import matplotlib.cm as cm -import numpy as np - -from monai.config import KeysCollection -from monai.data import MetaTensor -from monai.transforms import LabelToContour, MapTransform - - -# Calculate segmentation volumes in ml -class CalculateVolumeFromMaskd(MapTransform): - """ - Dictionary-based transform to calculate the volume of predicted organ masks. - - Args: - keys (list): The keys corresponding to the predicted organ masks in the dictionary. - label_names (list): The list of organ names corresponding to the masks. - """ - - def __init__(self, keys, label_names): - self._logger = logging.getLogger(f"{__name__}.{type(self).__name__}") - super().__init__(keys) - self.label_names = label_names - - def __call__(self, data): - # Initialize a dictionary to store the volumes of each organ - pred_volumes = {} - - for key in self.keys: - for label_name in self.label_names.keys(): - # self._logger.info('Key: ', key, ' organ_name: ', label_name) - if label_name != "background": - # Get the predicted mask from the dictionary - pred_mask = data[key] - # Calculate the voxel size in cubic millimeters (voxel size should be in the metadata) - # Assuming the metadata contains 'spatial_shape' with voxel dimensions in mm - if hasattr(pred_mask, "affine"): - # voxel size - # Ensure the affine matrix is collapsed to shape (4, 4) - affine_matrix = np.squeeze(pred_mask.affine) - if affine_matrix.shape != (4, 4): - raise ValueError(f"Affine matrix must have shape (4, 4), but got {affine_matrix.shape}") - - # Calculate voxel size - voxel_size = np.abs(np.linalg.det(affine_matrix[:3, :3])) - # print(f"Voxel Size (mm³): {voxel_size}") - else: - raise ValueError("Affine transformation matrix with voxel spacing information is required.") - - # Calculate the volume in cubic millimeters - label_volume_mm3 = np.sum(pred_mask == self.label_names[label_name]) * voxel_size - - # Convert to milliliters (1 ml = 1000 mm^3) - label_volume_ml = label_volume_mm3 / 1000.0 - - # Store the result in the pred_volumes dictionary - # convert to int - radiologists prefer whole number with no decimals - pred_volumes[label_name] = int(round(label_volume_ml, 2)) - - # Add the calculated volumes to the data dictionary - key_name = key + "_volumes" - - data[key_name] = pred_volumes - # self._logger.info('pred_volumes: ', pred_volumes) - return data - - -class LabelToContourd(MapTransform): - def __init__(self, keys: KeysCollection, output_labels: list, allow_missing_keys: bool = False): - - self._logger = logging.getLogger(f"{__name__}.{type(self).__name__}") - super().__init__(keys, allow_missing_keys) - - self.output_labels = output_labels - - def __call__(self, data): - d = dict(data) - for key in self.keys: - label_image = d[key] - assert isinstance(label_image, MetaTensor), "Input image must be a MetaTensor." - - # Initialize the contour image with the same shape as the label image - contour_image = np.zeros_like(label_image.cpu().numpy()) - - if label_image.ndim == 4: # Check if the label image is 4D with a channel dimension - # Process each 2D slice independently along the last axis (z-axis) - for i in range(label_image.shape[-1]): - slice_image = label_image[:, :, :, i].cpu().numpy() - - # Extract unique labels excluding background (assumed to be 0) - unique_labels = np.unique(slice_image) - unique_labels = unique_labels[unique_labels != 0] - - slice_contour = np.zeros_like(slice_image) - - # Generate contours for each label in the slice - for label in unique_labels: - # skip contour generation for labels that are not in output_labels - if label not in self.output_labels: - continue - - # Create a binary mask for the current label - binary_mask = np.zeros_like(slice_image) - binary_mask[slice_image == label] = 1.0 - - # Apply LabelToContour to the 2D slice (replace this with actual contour logic) - binary_mask = binary_mask.astype(np.float32) # Convert to float32 for LabelToContour - thick_edges = LabelToContour()(binary_mask) - - # Convert the edges back to binary mask - thick_edges = (thick_edges > 0).astype(np.uint8) - - # Assign the label value to the contour image at the edge positions - slice_contour[thick_edges > 0] = label - - # Stack the processed slice back into the 4D contour image - contour_image[:, :, :, i] = slice_contour - else: - # If the label image is not 4D, process it directly - slice_image = label_image.cpu().numpy() - unique_labels = np.unique(slice_image) - unique_labels = unique_labels[unique_labels != 0] - - for label in unique_labels: - binary_mask = np.zeros_like(slice_image) - binary_mask[slice_image == label] = 1.0 - - thick_edges = LabelToContour()(binary_mask) - contour_image[thick_edges > 0] = label - - # Convert the contour image back to a MetaTensor with the original metadata - contour_image_meta = MetaTensor(contour_image, meta=label_image.meta) # , affine=label_image.affine) - - # Store the contour MetaTensor in the output dictionary - d[key] = contour_image_meta - - return d - - -class OverlayImageLabeld(MapTransform): - def __init__( - self, - image_key: KeysCollection, - label_key: str, - overlay_key: str = "overlay", - alpha: float = 0.7, - allow_missing_keys: bool = False, - ): - - self._logger = logging.getLogger(f"{__name__}.{type(self).__name__}") - super().__init__(image_key, allow_missing_keys) - - self.image_key = image_key - self.label_key = label_key - self.overlay_key = overlay_key - self.alpha = alpha - self.jet_colormap = cm.get_cmap("jet", 256) # Get the Jet colormap with 256 discrete colors - - def apply_jet_colormap(self, label_volume): - """ - Apply the Jet colormap to a 3D label volume using matplotlib's colormap. - """ - assert label_volume.ndim == 3, "Label volume should have 3 dimensions (H, W, D) after removing channel." - - label_volume_normalized = (label_volume / label_volume.max()) * 255.0 - label_volume_uint8 = label_volume_normalized.astype(np.uint8) - - # Apply the colormap to each label - label_rgb = self.jet_colormap(label_volume_uint8)[:, :, :, :3] # Only take the RGB channels - - label_rgb = (label_rgb * 255).astype(np.uint8) - # Rearrange axes to get (3, H, W, D) - label_rgb = np.transpose(label_rgb, (3, 0, 1, 2)) - - assert label_rgb.shape == ( - 3, - *label_volume.shape, - ), f"Label RGB shape should be (3,H, W, D) but got {label_rgb.shape}" - - return label_rgb - - def convert_to_rgb(self, image_volume): - """ - Convert a single-channel grayscale 3D image to an RGB 3D image. - """ - assert image_volume.ndim == 3, "Image volume should have 3 dimensions (H, W, D) after removing channel." - - image_volume_normalized = (image_volume - image_volume.min()) / (image_volume.max() - image_volume.min()) - image_rgb = np.stack([image_volume_normalized] * 3, axis=0) - image_rgb = (image_rgb * 255).astype(np.uint8) - - assert image_rgb.shape == ( - 3, - *image_volume.shape, - ), f"Image RGB shape should be (3,H, W, D) but got {image_rgb.shape}" - - return image_rgb - - def _create_overlay(self, image_volume, label_volume): - # Convert the image volume and label volume to RGB - image_rgb = self.convert_to_rgb(image_volume) - label_rgb = self.apply_jet_colormap(label_volume) - - # Create an alpha-blended overlay - overlay = image_rgb.copy() - mask = label_volume > 0 - - # Apply the overlay where the mask is present - for i in range(3): # For each color channel - overlay[i, mask] = (self.alpha * label_rgb[i, mask] + (1 - self.alpha) * overlay[i, mask]).astype(np.uint8) - - assert ( - overlay.shape == image_rgb.shape - ), f"Overlay shape should match image RGB shape: {overlay.shape} vs {image_rgb.shape}" - - return overlay - - def __call__(self, data): - d = dict(data) - - # Get the image and label tensors - image = d[self.image_key] # Expecting shape (1, H, W, D) - label = d[self.label_key] # Expecting shape (1, H, W, D) - - # # uncomment when running pipeline with mask (non-contour) outputs, i.e. LabelToContourd transform absent - # if image.device.type == "cuda": - # image = image.cpu() - # d[self.image_key] = image - # if label.device.type == "cuda": - # label = label.cpu() - # d[self.label_key] = label - # # ----------------------- - - # Ensure that the input has the correct dimensions - assert image.shape[0] == 1 and label.shape[0] == 1, "Image and label must have a channel dimension of 1." - assert image.shape == label.shape, f"Image and label must have the same shape: {image.shape} vs {label.shape}" - - # Remove the channel dimension for processing - image_volume = image[0] # Shape: (H, W, D) - label_volume = label[0] # Shape: (H, W, D) - - # Convert to 3D overlay - overlay = self._create_overlay(image_volume, label_volume) - - # Add the channel dimension back - # d[self.overlay_key] = np.expand_dims(overlay, axis=0) # Shape: (1, H, W, D, 3) - d[self.overlay_key] = MetaTensor(overlay, meta=label.meta, affine=label.affine) # Shape: (3, H, W, D) - - # Assert the final output shape - # assert d[self.overlay_key].shape == (1, *image_volume.shape, 3), \ - # f"Final overlay shape should be (1, H, W, D, 3) but got {d[self.overlay_key].shape}" - - assert d[self.overlay_key].shape == ( - 3, - *image_volume.shape, - ), f"Final overlay shape should be (3, H, W, D) but got {d[self.overlay_key].shape}" - - # Log the overlay creation (debugging) - self._logger.info(f"Overlay created with shape: {overlay.shape}") - # self._logger.info(f"Dictionary keys: {d.keys()}") - - # self._logger.info('overlay_image shape: ', d[self.overlay_key].shape) - return d - - -class SaveData(MapTransform): - """ - Save the output dictionary into JSON files. - - The name of the saved file will be `{key}_{output_postfix}.json`. - - Args: - keys: keys of the corresponding items to be saved in the dictionary. - output_dir: directory to save the output files. - output_postfix: a string appended to all output file names, default is `data`. - separate_folder: whether to save each file in a separate folder. Default is `True`. - print_log: whether to print logs when saving. Default is `True`. - """ - - def __init__( - self, - keys: KeysCollection, - namekey: str = "image", - output_dir: str = "./", - output_postfix: str = "data", - separate_folder: bool = False, - print_log: bool = True, - allow_missing_keys: bool = False, - ): - self._logger = logging.getLogger(f"{__name__}.{type(self).__name__}") - super().__init__(keys, allow_missing_keys) - self.output_dir = output_dir - self.output_postfix = output_postfix - self.separate_folder = separate_folder - self.print_log = print_log - self.namekey = namekey - - def __call__(self, data): - d = dict(data) - image_name = os.path.basename(d[self.namekey].meta["filename_or_obj"]).split(".")[0] - for key in self.keys: - # Get the data - output_data = d[key] - - # Determine the file name - file_name = f"{image_name}_{self.output_postfix}.json" - if self.separate_folder: - file_path = os.path.join(self.output_dir, image_name, file_name) - os.makedirs(os.path.dirname(file_path), exist_ok=True) - else: - file_path = os.path.join(self.output_dir, file_name) - - # Save the dictionary as a JSON file - with open(file_path, "w") as f: - json.dump(output_data, f) - - if self.print_log: - self._logger.info(f"Saved data to {file_path}") - - return d - - -# custom transform (not in original post_transforms.py in bundle): -class ExtractVolumeToTextd(MapTransform): - """ - Custom transform to extract volume information from the segmentation results and format it as a textual summary. - Filters organ volumes based on output_labels for DICOM SR write, while including all organs for MongoDB write. - The upstream CalculateVolumeFromMaskd transform calculates organ volumes and stores them in the dictionary - under the pred_key + '_volumes' key. The input dictionary is outputted unchanged as to not affect downstream operators. - - Args: - keys: keys of the corresponding items to be saved in the dictionary. - label_names: dictionary mapping organ names to their corresponding label indices. - output_labels: list of target label indices for organs to include in the DICOM SR output. - """ - - def __init__( - self, - keys: KeysCollection, - label_names: dict, - output_labels: List[int], - allow_missing_keys: bool = False, - ): - self._logger = logging.getLogger(f"{__name__}.{type(self).__name__}") - super().__init__(keys, allow_missing_keys) - - self.label_names = label_names - self.output_labels = output_labels - - self.result_text: Optional[str] = None - - def __call__(self, data): - d = dict(data) - # use the first key in `keys` to access the volume data (e.g., pred_key + '_volumes') - volumes_key = self.keys[0] - organ_volumes = d.get(volumes_key, None) - - if organ_volumes is None: - raise ValueError(f"Volume data not found for key {volumes_key}.") - - # create the volume text output - volume_text = [] - - # loop through calculated organ volumes - for organ, volume in organ_volumes.items(): - # if the organ's label index is in output_labels - label_index = self.label_names.get(organ, None) - if label_index in self.output_labels: - # append organ volume for DICOM SR entry - volume_text.append(f"{organ.capitalize()} Volume: {volume} mL") - - self.result_text = "\n".join(volume_text) - - # not adding result_text to dictionary; return dictionary unchanged as to not affect downstream operators - return d +# Copyright 2021-2025 MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json +import logging +import os +from typing import List, Optional + +import matplotlib.cm as cm +import numpy as np + +from monai.config import KeysCollection +from monai.data import MetaTensor +from monai.transforms import LabelToContour, MapTransform + + +# Calculate segmentation volumes in ml +class CalculateVolumeFromMaskd(MapTransform): + """ + Dictionary-based transform to calculate the volume of predicted organ masks. + + Args: + keys (list): The keys corresponding to the predicted organ masks in the dictionary. + label_names (list): The list of organ names corresponding to the masks. + """ + + def __init__(self, keys, label_names): + self._logger = logging.getLogger(f"{__name__}.{type(self).__name__}") + super().__init__(keys) + self.label_names = label_names + + def __call__(self, data): + # Initialize a dictionary to store the volumes of each organ + pred_volumes = {} + + for key in self.keys: + for label_name in self.label_names.keys(): + # self._logger.info('Key: ', key, ' organ_name: ', label_name) + if label_name != "background": + # Get the predicted mask from the dictionary + pred_mask = data[key] + # Calculate the voxel size in cubic millimeters (voxel size should be in the metadata) + # Assuming the metadata contains 'spatial_shape' with voxel dimensions in mm + if hasattr(pred_mask, "affine"): + # voxel size + # Ensure the affine matrix is collapsed to shape (4, 4) + affine_matrix = np.squeeze(pred_mask.affine) + if affine_matrix.shape != (4, 4): + raise ValueError(f"Affine matrix must have shape (4, 4), but got {affine_matrix.shape}") + + # Calculate voxel size + voxel_size = np.abs(np.linalg.det(affine_matrix[:3, :3])) + # print(f"Voxel Size (mm³): {voxel_size}") + else: + raise ValueError("Affine transformation matrix with voxel spacing information is required.") + + # Calculate the volume in cubic millimeters + label_volume_mm3 = np.sum(pred_mask == self.label_names[label_name]) * voxel_size + + # Convert to milliliters (1 ml = 1000 mm^3) + label_volume_ml = label_volume_mm3 / 1000.0 + + # Store the result in the pred_volumes dictionary + # convert to int - radiologists prefer whole number with no decimals + pred_volumes[label_name] = int(round(label_volume_ml, 2)) + + # Add the calculated volumes to the data dictionary + key_name = key + "_volumes" + + data[key_name] = pred_volumes + # self._logger.info('pred_volumes: ', pred_volumes) + return data + + +class LabelToContourd(MapTransform): + def __init__(self, keys: KeysCollection, output_labels: list, allow_missing_keys: bool = False): + + self._logger = logging.getLogger(f"{__name__}.{type(self).__name__}") + super().__init__(keys, allow_missing_keys) + + self.output_labels = output_labels + + def __call__(self, data): + d = dict(data) + for key in self.keys: + label_image = d[key] + assert isinstance(label_image, MetaTensor), "Input image must be a MetaTensor." + + # Initialize the contour image with the same shape as the label image + contour_image = np.zeros_like(label_image.cpu().numpy()) + + if label_image.ndim == 4: # Check if the label image is 4D with a channel dimension + # Process each 2D slice independently along the last axis (z-axis) + for i in range(label_image.shape[-1]): + slice_image = label_image[:, :, :, i].cpu().numpy() + + # Extract unique labels excluding background (assumed to be 0) + unique_labels = np.unique(slice_image) + unique_labels = unique_labels[unique_labels != 0] + + slice_contour = np.zeros_like(slice_image) + + # Generate contours for each label in the slice + for label in unique_labels: + # skip contour generation for labels that are not in output_labels + if label not in self.output_labels: + continue + + # Create a binary mask for the current label + binary_mask = np.zeros_like(slice_image) + binary_mask[slice_image == label] = 1.0 + + # Apply LabelToContour to the 2D slice (replace this with actual contour logic) + binary_mask = binary_mask.astype(np.float32) # Convert to float32 for LabelToContour + thick_edges = LabelToContour()(binary_mask) + + # Convert the edges back to binary mask + thick_edges = (thick_edges > 0).astype(np.uint8) + + # Assign the label value to the contour image at the edge positions + slice_contour[thick_edges > 0] = label + + # Stack the processed slice back into the 4D contour image + contour_image[:, :, :, i] = slice_contour + else: + # If the label image is not 4D, process it directly + slice_image = label_image.cpu().numpy() + unique_labels = np.unique(slice_image) + unique_labels = unique_labels[unique_labels != 0] + + for label in unique_labels: + binary_mask = np.zeros_like(slice_image) + binary_mask[slice_image == label] = 1.0 + + thick_edges = LabelToContour()(binary_mask) + contour_image[thick_edges > 0] = label + + # Convert the contour image back to a MetaTensor with the original metadata + contour_image_meta = MetaTensor(contour_image, meta=label_image.meta) # , affine=label_image.affine) + + # Store the contour MetaTensor in the output dictionary + d[key] = contour_image_meta + + return d + + +class OverlayImageLabeld(MapTransform): + def __init__( + self, + image_key: KeysCollection, + label_key: str, + overlay_key: str = "overlay", + alpha: float = 0.7, + allow_missing_keys: bool = False, + ): + + self._logger = logging.getLogger(f"{__name__}.{type(self).__name__}") + super().__init__(image_key, allow_missing_keys) + + self.image_key = image_key + self.label_key = label_key + self.overlay_key = overlay_key + self.alpha = alpha + self.jet_colormap = cm.get_cmap("jet", 256) # Get the Jet colormap with 256 discrete colors + + def apply_jet_colormap(self, label_volume): + """ + Apply the Jet colormap to a 3D label volume using matplotlib's colormap. + """ + assert label_volume.ndim == 3, "Label volume should have 3 dimensions (H, W, D) after removing channel." + + label_volume_normalized = (label_volume / label_volume.max()) * 255.0 + label_volume_uint8 = label_volume_normalized.astype(np.uint8) + + # Apply the colormap to each label + label_rgb = self.jet_colormap(label_volume_uint8)[:, :, :, :3] # Only take the RGB channels + + label_rgb = (label_rgb * 255).astype(np.uint8) + # Rearrange axes to get (3, H, W, D) + label_rgb = np.transpose(label_rgb, (3, 0, 1, 2)) + + assert label_rgb.shape == ( + 3, + *label_volume.shape, + ), f"Label RGB shape should be (3,H, W, D) but got {label_rgb.shape}" + + return label_rgb + + def convert_to_rgb(self, image_volume): + """ + Convert a single-channel grayscale 3D image to an RGB 3D image. + """ + assert image_volume.ndim == 3, "Image volume should have 3 dimensions (H, W, D) after removing channel." + + image_volume_normalized = (image_volume - image_volume.min()) / (image_volume.max() - image_volume.min()) + image_rgb = np.stack([image_volume_normalized] * 3, axis=0) + image_rgb = (image_rgb * 255).astype(np.uint8) + + assert image_rgb.shape == ( + 3, + *image_volume.shape, + ), f"Image RGB shape should be (3,H, W, D) but got {image_rgb.shape}" + + return image_rgb + + def _create_overlay(self, image_volume, label_volume): + # Convert the image volume and label volume to RGB + image_rgb = self.convert_to_rgb(image_volume) + label_rgb = self.apply_jet_colormap(label_volume) + + # Create an alpha-blended overlay + overlay = image_rgb.copy() + mask = label_volume > 0 + + # Apply the overlay where the mask is present + for i in range(3): # For each color channel + overlay[i, mask] = (self.alpha * label_rgb[i, mask] + (1 - self.alpha) * overlay[i, mask]).astype(np.uint8) + + assert ( + overlay.shape == image_rgb.shape + ), f"Overlay shape should match image RGB shape: {overlay.shape} vs {image_rgb.shape}" + + return overlay + + def __call__(self, data): + d = dict(data) + + # Get the image and label tensors + image = d[self.image_key] # Expecting shape (1, H, W, D) + label = d[self.label_key] # Expecting shape (1, H, W, D) + + # # uncomment when running pipeline with mask (non-contour) outputs, i.e. LabelToContourd transform absent + # if image.device.type == "cuda": + # image = image.cpu() + # d[self.image_key] = image + # if label.device.type == "cuda": + # label = label.cpu() + # d[self.label_key] = label + # # ----------------------- + + # Ensure that the input has the correct dimensions + assert image.shape[0] == 1 and label.shape[0] == 1, "Image and label must have a channel dimension of 1." + assert image.shape == label.shape, f"Image and label must have the same shape: {image.shape} vs {label.shape}" + + # Remove the channel dimension for processing + image_volume = image[0] # Shape: (H, W, D) + label_volume = label[0] # Shape: (H, W, D) + + # Convert to 3D overlay + overlay = self._create_overlay(image_volume, label_volume) + + # Add the channel dimension back + # d[self.overlay_key] = np.expand_dims(overlay, axis=0) # Shape: (1, H, W, D, 3) + d[self.overlay_key] = MetaTensor(overlay, meta=label.meta, affine=label.affine) # Shape: (3, H, W, D) + + # Assert the final output shape + # assert d[self.overlay_key].shape == (1, *image_volume.shape, 3), \ + # f"Final overlay shape should be (1, H, W, D, 3) but got {d[self.overlay_key].shape}" + + assert d[self.overlay_key].shape == ( + 3, + *image_volume.shape, + ), f"Final overlay shape should be (3, H, W, D) but got {d[self.overlay_key].shape}" + + # Log the overlay creation (debugging) + self._logger.info(f"Overlay created with shape: {overlay.shape}") + # self._logger.info(f"Dictionary keys: {d.keys()}") + + # self._logger.info('overlay_image shape: ', d[self.overlay_key].shape) + return d + + +class SaveData(MapTransform): + """ + Save the output dictionary into JSON files. + + The name of the saved file will be `{key}_{output_postfix}.json`. + + Args: + keys: keys of the corresponding items to be saved in the dictionary. + output_dir: directory to save the output files. + output_postfix: a string appended to all output file names, default is `data`. + separate_folder: whether to save each file in a separate folder. Default is `True`. + print_log: whether to print logs when saving. Default is `True`. + """ + + def __init__( + self, + keys: KeysCollection, + namekey: str = "image", + output_dir: str = "./", + output_postfix: str = "data", + separate_folder: bool = False, + print_log: bool = True, + allow_missing_keys: bool = False, + ): + self._logger = logging.getLogger(f"{__name__}.{type(self).__name__}") + super().__init__(keys, allow_missing_keys) + self.output_dir = output_dir + self.output_postfix = output_postfix + self.separate_folder = separate_folder + self.print_log = print_log + self.namekey = namekey + + def __call__(self, data): + d = dict(data) + image_name = os.path.basename(d[self.namekey].meta["filename_or_obj"]).split(".")[0] + for key in self.keys: + # Get the data + output_data = d[key] + + # Determine the file name + file_name = f"{image_name}_{self.output_postfix}.json" + if self.separate_folder: + file_path = os.path.join(self.output_dir, image_name, file_name) + os.makedirs(os.path.dirname(file_path), exist_ok=True) + else: + file_path = os.path.join(self.output_dir, file_name) + + # Save the dictionary as a JSON file + with open(file_path, "w") as f: + json.dump(output_data, f) + + if self.print_log: + self._logger.info(f"Saved data to {file_path}") + + return d + + +# custom transform (not in original post_transforms.py in bundle): +class ExtractVolumeToTextd(MapTransform): + """ + Custom transform to extract volume information from the segmentation results and format it as a textual summary. + Filters organ volumes based on output_labels for DICOM SR write, while including all organs for MongoDB write. + The upstream CalculateVolumeFromMaskd transform calculates organ volumes and stores them in the dictionary + under the pred_key + '_volumes' key. The input dictionary is outputted unchanged as to not affect downstream operators. + + Args: + keys: keys of the corresponding items to be saved in the dictionary. + label_names: dictionary mapping organ names to their corresponding label indices. + output_labels: list of target label indices for organs to include in the DICOM SR output. + """ + + def __init__( + self, + keys: KeysCollection, + label_names: dict, + output_labels: List[int], + allow_missing_keys: bool = False, + ): + self._logger = logging.getLogger(f"{__name__}.{type(self).__name__}") + super().__init__(keys, allow_missing_keys) + + self.label_names = label_names + self.output_labels = output_labels + + self.result_text: Optional[str] = None + + def __call__(self, data): + d = dict(data) + # use the first key in `keys` to access the volume data (e.g., pred_key + '_volumes') + volumes_key = self.keys[0] + organ_volumes = d.get(volumes_key, None) + + if organ_volumes is None: + raise ValueError(f"Volume data not found for key {volumes_key}.") + + # create the volume text output + volume_text = [] + + # loop through calculated organ volumes + for organ, volume in organ_volumes.items(): + # if the organ's label index is in output_labels + label_index = self.label_names.get(organ, None) + if label_index in self.output_labels: + # append organ volume for DICOM SR entry + volume_text.append(f"{organ.capitalize()} Volume: {volume} mL") + + self.result_text = "\n".join(volume_text) + + # not adding result_text to dictionary; return dictionary unchanged as to not affect downstream operators + return d diff --git a/examples/apps/cchmc_nnunet_fifteen_ckpt_app/my_app/requirements.txt b/examples/apps/cchmc_nnunet_fifteen_ckpt_app/my_app/requirements.txt index fae3bb47..d3035e41 100644 --- a/examples/apps/cchmc_nnunet_fifteen_ckpt_app/my_app/requirements.txt +++ b/examples/apps/cchmc_nnunet_fifteen_ckpt_app/my_app/requirements.txt @@ -1,37 +1,37 @@ -# requirements.txt file specifies dependencies our Python project needs to run - -# install MONAI and necessary image processing packages (base list pulled from MONAI Bundle Spleen Seg App example) -# based on CCHMC Ped Abd MRI MONAI Bundle dependencies: -# monai, numpy, nibabel versions upgraded -# pytorch-ignite and fire dependencies added -# python 3.9 is required to install specified pytorch and monai-deploy-app-sdk versions -# einops optional dependency needed for DAE model workflow -monai[einops]==1.3.0 -torch>=1.12.0 -pytorch-ignite==0.4.11 -fire==0.4.0 -numpy>=1.24,<2.0 -nibabel==4.0.1 -# pydicom v3.0.0 removed pydicom._storage_sopclass_uids; don't meet or exceed this version -pydicom>=2.3.0,<3.0.0 -# pylibjpeg for processing compressed DICOM pixel data -pylibjpeg[all] -highdicom>=0.18.2 -itk>=5.3.0 -SimpleITK>=2.0.0 -scikit-image>=0.17.2 -Pillow>=8.0.0 -numpy-stl>=2.12.0 -trimesh>=3.8.11 -matplotlib>=3.7.2 -setuptools>=75.8.0 # for pkg_resources - -# MONAI Deploy App SDK package installation -# includes Holoscan SDK and CLI ~=3.0 -monai-deploy-app-sdk==3.0.0 - -# fine control over holoscan and holoscan-cli versions -holoscan==3.2.0 -holoscan-cli==3.2.0 -nvflare>=2.6.2,<3.0.0 +# requirements.txt file specifies dependencies our Python project needs to run + +# install MONAI and necessary image processing packages (base list pulled from MONAI Bundle Spleen Seg App example) +# based on CCHMC Ped Abd MRI MONAI Bundle dependencies: +# monai, numpy, nibabel versions upgraded +# pytorch-ignite and fire dependencies added +# python 3.9 is required to install specified pytorch and monai-deploy-app-sdk versions +# einops optional dependency needed for DAE model workflow +monai[einops]==1.3.0 +torch>=1.12.0 +pytorch-ignite==0.4.11 +fire==0.4.0 +numpy>=1.24,<2.0 +nibabel==4.0.1 +# pydicom v3.0.0 removed pydicom._storage_sopclass_uids; don't meet or exceed this version +pydicom>=2.3.0,<3.0.0 +# pylibjpeg for processing compressed DICOM pixel data +pylibjpeg[all] +highdicom>=0.18.2 +itk>=5.3.0 +SimpleITK>=2.0.0 +scikit-image>=0.17.2 +Pillow>=8.0.0 +numpy-stl>=2.12.0 +trimesh>=3.8.11 +matplotlib>=3.7.2 +setuptools>=75.8.0 # for pkg_resources + +# MONAI Deploy App SDK package installation +# includes Holoscan SDK and CLI ~=3.0 +monai-deploy-app-sdk==3.0.0 + +# fine control over holoscan and holoscan-cli versions +holoscan==3.2.0 +holoscan-cli==3.2.0 +nvflare>=2.6.2,<3.0.0 nnunetv2>=2.6.2,<3.0.0 \ No newline at end of file diff --git a/examples/apps/convert_nnunet_ckpts.py b/examples/apps/convert_nnunet_ckpts.py new file mode 100644 index 00000000..e1691e89 --- /dev/null +++ b/examples/apps/convert_nnunet_ckpts.py @@ -0,0 +1,103 @@ +# Copyright (c) MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Convert nnUNet checkpoints to MONAI bundle format. +This script follows the logic in the conversion notebook but imports from local apps.nnunet_bundle. +""" + +import argparse +import os +import sys + +# Add the current directory to the path to find the local module +current_dir = os.path.dirname(os.path.abspath(__file__)) +if current_dir not in sys.path: + sys.path.insert(0, current_dir) + +# Try importing from local apps.nnunet_bundle instead of from MONAI +try: + from my_app.nnunet_bundle import convert_best_nnunet_to_monai_bundle +except ImportError: + # If local import fails, try to find the module in alternate locations + try: + from monai.apps.nnunet_bundle import convert_best_nnunet_to_monai_bundle + except ImportError: + print( + "Error: Could not import convert_best_nnunet_to_monai_bundle from my_app.nnunet_bundle or apps.nnunet_bundle" + ) + print("Please ensure that nnunet_bundle.py is properly installed in your project.") + sys.exit(1) + + +def parse_args(): + parser = argparse.ArgumentParser(description="Convert nnUNet checkpoints to MONAI bundle format.") + parser.add_argument( + "--dataset_name_or_id", type=str, required=True, help="The name or ID of the dataset to convert." + ) + parser.add_argument( + "--MAP_root", + type=str, + default=os.getcwd(), + help="The root directory where the Medical Application Package (MAP) will be created. Defaults to current directory.", + ) + + parser.add_argument( + "--nnUNet_results", + type=str, + required=False, + default=None, + help="Path to nnUNet results directory with trained models.", + ) + return parser.parse_args() + + +def main(): + args = parse_args() + + # Create the nnUNet config dictionary + nnunet_config = { + "dataset_name_or_id": args.dataset_name_or_id, + } + + # Create the MAP root directory + map_root = args.MAP_root + os.makedirs(map_root, exist_ok=True) + + # Set nnUNet environment variables if provided + if args.nnUNet_results: + os.environ["nnUNet_results"] = args.nnUNet_results + print(f"Set nnUNet_results to: {args.nnUNet_results}") + + # Check if required environment variables are set + required_env_vars = ["nnUNet_results"] + missing_vars = [var for var in required_env_vars if var not in os.environ] + + if missing_vars: + print(f"Error: The following required nnUNet environment variables are not set: {', '.join(missing_vars)}") + print("Please provide them as arguments or set them in your environment before running this script.") + sys.exit(1) + + print(f"Converting nnUNet checkpoints for dataset {nnunet_config['dataset_name_or_id']} to MONAI bundle format...") + print(f"MAP will be created at: {map_root}") + print(f" nnUNet_results: {os.environ.get('nnUNet_results')}") + + # Convert the nnUNet checkpoints to MONAI bundle format + try: + convert_best_nnunet_to_monai_bundle(nnunet_config, map_root) + print(f"Successfully converted nnUNet checkpoints to MONAI bundle at: {map_root}/models") + except Exception as e: + print(f"Error converting nnUNet checkpoints: {e}") + sys.exit(1) + + +if __name__ == "__main__": + main() diff --git a/examples/apps/development_notes.md b/examples/apps/development_notes.md new file mode 100644 index 00000000..f1e91b99 --- /dev/null +++ b/examples/apps/development_notes.md @@ -0,0 +1,62 @@ +# Development Notes + +## Implementation Notes for nnUNet MAP + + +* Initial Tests show volume and Dice agreement with Bundle, need to do more thorough testing. + +1. For each model configuration the output gets written to .npz file by nnunet inference functions. + +2. These file paths are then used by the EnsembleProbabilities Transform function to create the final output. + +3. If nnunet postprocessing is used, use the largest connected component transform in the MAP. There could be minor differences in the implementation, will do thorough analysis later. + +3. Need to better understand the use of "context" in compute and compute_impl as input arguments. + +4. Investigate keeping the probabilities in the memory, to help with speedup. + +5. Need to investigate the current traceability provisions in the operators implemented. + + +## Implementation Details + +### Testing Strategy + +Tests should be conducted to: +1. Compare MAP output with native nnUNet output +2. Measure performance (time, memory usage) +3. Validate with various input formats and sizes +4. Test error handling and edge cases + + +### nnUNet Integration + +The current implementation relies on the nnUNet's native inference approach which outputs intermediate .npz files for each model configuration. While this works, it introduces file I/O overhead which could potentially be optimized. + +### Ensemble Prediction Flow + +1. Multiple nnUNet models (3d_fullres, 3d_lowres, 3d_cascade_fullres) are loaded +2. Each model performs inference separately +3. Results are written to temporary .npz files +4. EnsembleProbabilitiesToSegmentation transform reads these files +5. Final segmentation is produced by combining results + +### Potential Optimizations + +- Keep probability maps in memory instead of writing to disk +- Parallelize model inference where applicable +- Streamline the ensemble computation process + +### Context Usage + +The `context` parameter in `compute` and `compute_impl` functions appears to be used for storing and retrieving models. Further investigation is needed to fully understand how this context is managed and whether it's being used optimally. + +### Traceability + +Current traceability in the operators may need improvement. Consider adding: + +- More detailed logging +- Performance metrics +- Input/output validation steps +- Error handling with informative messages + From f9420e43c61c7e1f2955999137d531d3937eeba5 Mon Sep 17 00:00:00 2001 From: chezhia Date: Thu, 30 Oct 2025 00:37:00 -0400 Subject: [PATCH 17/21] all tests pass Signed-off-by: chezhia --- .../my_app/nnunet_bundle.py | 48 +++++++++++-------- .../my_app/nnunet_seg_operator.py | 19 +++----- .../operators/dicom_seg_writer_operator.py | 4 +- 3 files changed, 35 insertions(+), 36 deletions(-) diff --git a/examples/apps/cchmc_nnunet_fifteen_ckpt_app/my_app/nnunet_bundle.py b/examples/apps/cchmc_nnunet_fifteen_ckpt_app/my_app/nnunet_bundle.py index 50c21001..6e24b7a3 100644 --- a/examples/apps/cchmc_nnunet_fifteen_ckpt_app/my_app/nnunet_bundle.py +++ b/examples/apps/cchmc_nnunet_fifteen_ckpt_app/my_app/nnunet_bundle.py @@ -13,7 +13,7 @@ import os import shutil from pathlib import Path -from typing import Any, Optional, Tuple, Union +from typing import Any, Dict, List, Optional, Tuple, Union import numpy as np import torch @@ -24,6 +24,7 @@ join, _ = optional_import("batchgenerators.utilities.file_and_folder_operations", name="join") load_json, _ = optional_import("batchgenerators.utilities.file_and_folder_operations", name="load_json") +nnunet_predictor_cls, _ = optional_import("nnunetv2.inference.predict_from_raw_data", name="nnUNetPredictor") __all__ = [ "get_nnunet_trainer", @@ -43,7 +44,7 @@ # Convert a single nnUNet model checkpoint to MONAI bundle format # The function saves the converted model checkpoint and configuration files in the specified bundle root folder. -def convert_nnunet_to_monai_bundle(nnunet_config: dict, bundle_root_folder: str, fold: int = 0) -> None: +def convert_nnunet_to_monai_bundle(nnunet_config: Dict[str, Any], bundle_root_folder: str, fold: int = 0) -> None: """ Convert nnUNet model checkpoints and configuration to MONAI bundle format. @@ -133,7 +134,7 @@ def convert_nnunet_to_monai_bundle(nnunet_config: dict, bundle_root_folder: str, # The function iterates through all folds and configurations, converting each model to the specified bundle format. # The number of folds, configurations, plans and dataset.json will be parsed from the nnunet folder def convert_best_nnunet_to_monai_bundle( - nnunet_config: dict, bundle_root_folder: str, inference_info_file: str = "inference_information.json" + nnunet_config: Dict[str, Any], bundle_root_folder: str, inference_info_file: str = "inference_information.json" ) -> None: """ Convert all nnUNet models (configs and folds) to MONAI bundle format. @@ -527,9 +528,9 @@ def get_nnunet_trainer( def get_nnunet_monai_predictor( model_folder: Union[str, Path], model_name: str = "model.pt", - dataset_json: dict = None, - plans: dict = None, - nnunet_config: dict = None, + dataset_json: Optional[Dict[Any, Any]] = None, + plans: Optional[Dict[Any, Any]] = None, + nnunet_config: Optional[Dict[Any, Any]] = None, save_probabilities: bool = False, save_files: bool = False, use_folds: Optional[Union[int, str]] = None, @@ -606,7 +607,7 @@ def get_nnunet_monai_predictor( def get_nnunet_monai_predictors_for_ensemble( - model_list: list, + model_list: List[Any], model_path: Union[str, Path], model_name: str = "model.pt", use_folds: Optional[Union[int, str]] = None, @@ -626,8 +627,6 @@ def get_nnunet_monai_predictors_for_ensemble( return tuple(network_list) -from typing import Dict, List - from nnunetv2.ensembling.ensemble import average_probabilities from nnunetv2.utilities.plans_handling.plans_handler import PlansManager @@ -658,13 +657,14 @@ def __init__( self.label_manager = self.plans_manager.get_label_manager(self.dataset_json) self.output_key = output_key - def _load_json(self, path: str) -> Dict: + def _load_json(self, path: str) -> Dict[Any, Any]: import json with open(path, "r") as f: - return json.load(f) + result = json.load(f) + return dict(result) # Ensure return type matches annotation - def __call__(self, data: Dict) -> Dict: + def __call__(self, data: Dict[Any, Any]) -> Dict[Any, Any]: d = dict(data) all_files = [] for key in self.keys: @@ -733,16 +733,16 @@ class ModelnnUNetWrapper(torch.nn.Module): def __init__( self, - predictor: object, + predictor: Any, # nnUNetPredictor type, but using Any to avoid import issues model_folder: Union[str, Path], - checkpoint_name: str = None, - dataset_json: dict = None, - plans: dict = None, - nnunet_config: dict = None, + checkpoint_name: Optional[str] = None, + dataset_json: Optional[Dict[Any, Any]] = None, + plans: Optional[Dict[Any, Any]] = None, + nnunet_config: Optional[Dict[Any, Any]] = None, save_probabilities: bool = False, save_files: bool = False, tmp_dir: str = "tmp", - use_folds: Union[int, str, Tuple[Union[int, str], ...], List[Union[int, str]]] = None, + use_folds: Optional[Union[int, str, Tuple[Union[int, str], ...], List[Union[int, str]]]] = None, ): super().__init__() @@ -802,6 +802,10 @@ def __init__( if use_folds is None: use_folds = self.predictor.auto_detect_available_folds(model_training_output_dir, checkpoint_name) + # Ensure use_folds is always iterable + if not isinstance(use_folds, (list, tuple)): + use_folds = [use_folds] + # Load model parameters from each fold for f in use_folds: f = int(f) if f != "all" else f @@ -928,14 +932,14 @@ def forward(self, x: MetaTensor) -> MetaTensor: raise ValueError(f"Unexpected affine shape: {x.meta['affine'].shape}") # Calculate spacing, origin and direction - spacing = tuple(np.linalg.norm(affine[:3, i]) for i in range(3)) + spacing_tuple = tuple(float(np.linalg.norm(affine[:3, i])) for i in range(3)) origin = tuple(float(v) for v in affine[:3, 3]) - direction_matrix = affine[:3, :3] / spacing + direction_matrix = affine[:3, :3] / np.array(spacing_tuple) direction = tuple(direction_matrix.flatten().round(6)) # Add to properties dict for SimpleITK properties_or_list_of_properties["sitk_stuff"] = { - "spacing": spacing, + "spacing": spacing_tuple, "origin": origin, "direction": direction, } @@ -980,6 +984,8 @@ def forward(self, x: MetaTensor) -> MetaTensor: return MetaTensor(out_tensor, meta=x.meta) else: # Return a placeholder tensor with file path in metadata + if outfile is None: + raise ValueError("Output file path is None when save_files is True") saved_path = outfile + ".npz" if not os.path.exists(saved_path): raise FileNotFoundError(f"Expected saved file not found: {saved_path}") diff --git a/examples/apps/cchmc_nnunet_fifteen_ckpt_app/my_app/nnunet_seg_operator.py b/examples/apps/cchmc_nnunet_fifteen_ckpt_app/my_app/nnunet_seg_operator.py index cb7d57b4..4d000dd1 100644 --- a/examples/apps/cchmc_nnunet_fifteen_ckpt_app/my_app/nnunet_seg_operator.py +++ b/examples/apps/cchmc_nnunet_fifteen_ckpt_app/my_app/nnunet_seg_operator.py @@ -11,7 +11,7 @@ import logging from pathlib import Path -from typing import Dict, List +from typing import Dict, List, Optional import torch from numpy import int16, uint8 @@ -35,15 +35,8 @@ if current_dir not in sys.path: sys.path.insert(0, current_dir) -try: - # Try local version first - from nnunet_bundle import EnsembleProbabilitiesToSegmentation, get_nnunet_monai_predictors_for_ensemble -except ImportError: - # Fall back to MONAI version if local version fails - from monai.apps.nnunet.nnunet_bundle import ( - get_nnunet_monai_predictors_for_ensemble, - EnsembleProbabilitiesToSegmentation, - ) +# Import from local nnunet_bundle module +from nnunet_bundle import EnsembleProbabilitiesToSegmentation, get_nnunet_monai_predictors_for_ensemble from monai.deploy.core import AppContext, Fragment, Model, Operator, OperatorSpec from monai.deploy.operators.monai_seg_inference_operator import InMemImageReader @@ -70,8 +63,8 @@ def __init__( app_context: AppContext, model_path: Path, output_folder: Path = DEFAULT_OUTPUT_FOLDER, - output_labels: List[int] = None, - model_list: List[str] = None, + output_labels: Optional[List[int]] = None, + model_list: Optional[List[str]] = None, model_name: str = "best_model.pt", save_probabilities: bool = False, save_files: bool = False, @@ -424,5 +417,5 @@ def get_result_text_from_transforms(self, post_transforms: Compose) -> str: """ for transform in post_transforms.transforms: if isinstance(transform, ExtractVolumeToTextd): - return transform.result_text + return str(transform.result_text) # Ensure return type is str return "" diff --git a/monai/deploy/operators/dicom_seg_writer_operator.py b/monai/deploy/operators/dicom_seg_writer_operator.py index e96490c1..21b3a51f 100644 --- a/monai/deploy/operators/dicom_seg_writer_operator.py +++ b/monai/deploy/operators/dicom_seg_writer_operator.py @@ -341,8 +341,8 @@ def create_dicom_seg(self, image: np.ndarray, dicom_series: DICOMSeries, output_ # Adding a few tags that are not in the Dataset # Also try to set the custom tags that are of string type dt_now = datetime.datetime.now() - seg.SeriesDate = dt_now.strftime("%Y%m%d") - seg.SeriesTime = dt_now.strftime("%H%M%S") + seg.SeriesDate = dt_now.strftime("%Y%m%d") # type: ignore[assignment] + seg.SeriesTime = dt_now.strftime("%H%M%S") # type: ignore[assignment] seg.TimezoneOffsetFromUTC = ( dt_now.astimezone().isoformat()[-6:].replace(":", "") ) # '2022-09-27T22:36:20.143857-07:00' From 7ff7e12fe845ed761f39ba2adbb8ee128869a75d Mon Sep 17 00:00:00 2001 From: Ming M Qin <38891913+MMelQin@users.noreply.github.com> Date: Thu, 30 Oct 2025 15:48:09 -0700 Subject: [PATCH 18/21] Make affine and space metadata consistent as well as updating support of latest holoscan SDK CUDA 12 version (#565) * Make affine and space consistent as space is properly parse by MONAI transforms Signed-off-by: M Q * Typing improvements Signed-off-by: M Q * Make image metadata Affine and Space consistent, either LPS or RAS Signed-off-by: M Q * Update monai/deploy/operators/monai_seg_inference_operator.py Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> Signed-off-by: Ming M Qin <38891913+MMelQin@users.noreply.github.com> * Correct CoPilot suggested code which failed liniting Signed-off-by: M Q * Fix complaint from new version of mypy Signed-off-by: M Q * Support the latest holoscan SDK CUDA 12 version, holoscan-cu12 Signed-off-by: M Q * Docs gen works with Python 3.10+ Signed-off-by: M Q * Fix complaints on single quote vs dhouble quote for string Signed-off-by: M Q * Making docs gen require python >= 3.10 Signed-off-by: M Q * Fix docs build error on readthedocs, although local builds had no issues Signed-off-by: M Q --------- Signed-off-by: M Q Signed-off-by: Ming M Qin <38891913+MMelQin@users.noreply.github.com> Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- .readthedocs.yml | 2 +- CONTRIBUTING.md | 4 - docs/requirements.txt | 45 +- docs/source/conf.py | 18 +- .../spleen_seg_operator.py | 2 +- .../ai_unetr_seg_app/unetr_seg_operator.py | 2 +- .../abdomen_seg_operator.py | 2 +- .../operators/dicom_seg_writer_operator.py | 6 +- .../dicom_series_to_volume_operator.py | 37 +- .../operators/monai_seg_inference_operator.py | 19 +- notebooks/tutorials/03_segmentation_app.ipynb | 1722 ++++++++++++++--- platforms/nuance_pin/app/inference.py | 2 +- requirements.txt | 4 +- run | 6 +- setup.cfg | 6 +- 15 files changed, 1574 insertions(+), 303 deletions(-) diff --git a/.readthedocs.yml b/.readthedocs.yml index 59c6d31c..44636e37 100644 --- a/.readthedocs.yml +++ b/.readthedocs.yml @@ -8,7 +8,7 @@ version: 2 build: os: ubuntu-22.04 tools: - python: "3.9" + python: "3.10" # You can also specify other tool versions: # nodejs: "20" # rust: "1.70" diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index eaab3990..1f918f28 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -137,10 +137,6 @@ MONAI Deploy App SDK's code coverage report is available at [CodeCov](https://co #### Building the documentation -:::{note} -Please note that the documentation builds successfully in Python 3.9 environment, but fails with Python 3.10. -::: - MONAI's documentation is located at `docs/`. ```bash diff --git a/docs/requirements.txt b/docs/requirements.txt index 6f6372db..4e01adee 100644 --- a/docs/requirements.txt +++ b/docs/requirements.txt @@ -1,25 +1,24 @@ -Sphinx==4.1.2 -sphinx-autobuild==2021.3.14 -myst-nb==0.17.2 # this version is fine in python 3.9 and avoids pulling in multiple nbformat packages -myst-parser==0.18.0 +Sphinx>=4.5.0 +sphinx-autobuild +myst-nb>=0.17.2 +myst-parser>=0.18.0 lxml_html_clean # needed by myst-nb -linkify-it-py==1.0.1 # https://myst-parser.readthedocs.io/en/latest/syntax/optional.html?highlight=linkify#linkify -sphinx-togglebutton==0.2.3 -sphinx-copybutton==0.4.0 -sphinxcontrib-bibtex<2.0.0 # https://github.com/executablebooks/jupyter-book/issues/1137 -sphinxcontrib-spelling==7.2.1 # https://sphinxcontrib-spelling.readthedocs.io/en/latest/index.html -sphinx-thebe==0.0.10 -sphinx-panels==0.6.0 -ablog==0.10.19 -docutils==0.16 # 0.17 causes error. https://github.com/executablebooks/MyST-Parser/issues/343 -pydata_sphinx_theme==0.6.3 -sphinxemoji==0.1.8 +linkify-it-py>=1.0.1 # https://myst-parser.readthedocs.io/en/latest/syntax/optional.html?highlight=linkify#linkify +sphinx-togglebutton +sphinx-copybutton +sphinxcontrib-bibtex>=2.4.1 # Version 2.4.1+ supports Python 3.10 +sphinxcontrib-spelling # https://sphinxcontrib-spelling.readthedocs.io/en/latest/index.html +sphinx-thebe +sphinx-design +ablog +pydata_sphinx_theme>=0.13.0 +sphinxemoji torch>=1.12.0 -sphinx-autodoc-typehints==1.12.0 -sphinxcontrib-applehelp==1.0.2 -sphinxcontrib-devhelp==1.0.2 -sphinxcontrib-htmlhelp==2.0.0 -sphinxcontrib-jsmath==1.0.1 -sphinxcontrib-qthelp==1.0.3 -sphinxcontrib-serializinghtml==1.1.5 -sphinxcontrib-mermaid==0.7.1 +sphinx-autodoc-typehints +sphinxcontrib-applehelp +sphinxcontrib-devhelp +sphinxcontrib-htmlhelp +sphinxcontrib-jsmath +sphinxcontrib-qthelp +sphinxcontrib-serializinghtml +sphinxcontrib-mermaid diff --git a/docs/source/conf.py b/docs/source/conf.py index 0f5a4f6a..997e389a 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -83,7 +83,7 @@ "myst_nb", "sphinx_copybutton", "sphinx_togglebutton", - "sphinx_panels", # https://sphinx-panels.readthedocs.io/en/latest/ + "sphinx_design", # https://sphinx-design.readthedocs.io/en/latest/ "ablog", "sphinxemoji.sphinxemoji", # https://myst-parser.readthedocs.io/en/latest/sphinx/use.html#automatically-create-targets-for-section-headers @@ -91,6 +91,7 @@ "sphinx_autodoc_typehints", "sphinxcontrib.mermaid", ] +bibtex_bibfiles = ["refs.bib"] autoclass_content = "both" add_module_names = True @@ -180,10 +181,10 @@ html_css_files = ["custom.css"] html_title = f"{project} {version} Documentation" -# -- Options for sphinx-panels ------------------------------------------------- +# -- Options for sphinx-design ------------------------------------------------- # -# (reference: https://sphinx-panels.readthedocs.io/en/latest/) -panels_add_bootstrap_css = False # pydata-sphinx-theme already loads bootstrap css +# (reference: https://sphinx-design.readthedocs.io/en/latest/) +# No additional configuration needed - sphinx-design works with pydata-sphinx-theme # -- Options for linkcheck builder ------------------------------------------------- # @@ -222,16 +223,17 @@ # -- Options for myst-nb ------------------------------------------------- # # (reference: https://myst-nb.readthedocs.io/en/latest/) -# Prevent the following error -# MyST NB Configuration Error: -# `nb_render_priority` not set for builder: doctest -nb_render_priority = {"doctest": ()} # Prevent creating jupyter_execute folder in dist # https://myst-nb.readthedocs.io/en/latest/use/execute.html#executing-in-temporary-folders # noqa execution_in_temp = True jupyter_execute_notebooks = "off" +# -- Options for sphinxcontrib.bibtex ------------------------------------------------- +# +# (reference: https://sphinxcontrib-bibtex.readthedocs.io/) +bibtex_bibfiles = [] # Add bibliography files here if needed + # -- Options for sphinxcontrib.spelling ------------------------------------------------- # # (reference: https://sphinxcontrib-spelling.readthedocs.io/en/latest/customize.html) diff --git a/examples/apps/ai_remote_infer_app/spleen_seg_operator.py b/examples/apps/ai_remote_infer_app/spleen_seg_operator.py index 197cd58f..814feebb 100644 --- a/examples/apps/ai_remote_infer_app/spleen_seg_operator.py +++ b/examples/apps/ai_remote_infer_app/spleen_seg_operator.py @@ -127,7 +127,7 @@ def pre_process(self, img_reader, out_dir: str = "./input_images") -> Compose: resample=False, output_ext=".nii", ), - Orientationd(keys=my_key, axcodes="LPS"), + Orientationd(keys=my_key, axcodes="RAS"), Spacingd(keys=my_key, pixdim=[1.5, 1.5, 2.9], mode=["bilinear"]), ScaleIntensityRanged(keys=my_key, a_min=-57, a_max=164, b_min=0.0, b_max=1.0, clip=True), EnsureTyped(keys=my_key), diff --git a/examples/apps/ai_unetr_seg_app/unetr_seg_operator.py b/examples/apps/ai_unetr_seg_app/unetr_seg_operator.py index d31f5c2c..dbbb2b30 100644 --- a/examples/apps/ai_unetr_seg_app/unetr_seg_operator.py +++ b/examples/apps/ai_unetr_seg_app/unetr_seg_operator.py @@ -143,7 +143,7 @@ def pre_process(self, img_reader, out_dir: str = "./input_images") -> Compose: output_ext=".nii", ), Spacingd(keys=my_key, pixdim=(1.5, 1.5, 2.0), mode=("bilinear")), - Orientationd(keys=my_key, axcodes="LPS"), + Orientationd(keys=my_key, axcodes="RAS"), ScaleIntensityRanged(my_key, a_min=-175, a_max=250, b_min=0.0, b_max=1.0, clip=True), CropForegroundd(my_key, source_key=my_key), ] diff --git a/examples/apps/cchmc_ped_abd_ct_seg_app/abdomen_seg_operator.py b/examples/apps/cchmc_ped_abd_ct_seg_app/abdomen_seg_operator.py index 6dabfe5e..2f412f14 100644 --- a/examples/apps/cchmc_ped_abd_ct_seg_app/abdomen_seg_operator.py +++ b/examples/apps/cchmc_ped_abd_ct_seg_app/abdomen_seg_operator.py @@ -226,7 +226,7 @@ def pre_process(self, img_reader) -> Compose: # img_reader: specialized InMemImageReader, derived from MONAI ImageReader LoadImaged(keys=my_key, reader=img_reader), EnsureChannelFirstd(keys=my_key), - Orientationd(keys=my_key, axcodes="LPS"), + Orientationd(keys=my_key, axcodes="RAS"), Spacingd(keys=my_key, pixdim=[1.5, 1.5, 3.0], mode=["bilinear"]), ScaleIntensityRanged(keys=my_key, a_min=-250, a_max=400, b_min=0.0, b_max=1.0, clip=True), CropForegroundd(keys=my_key, source_key=my_key, mode="minimum"), diff --git a/monai/deploy/operators/dicom_seg_writer_operator.py b/monai/deploy/operators/dicom_seg_writer_operator.py index 21b3a51f..ff1c1d95 100644 --- a/monai/deploy/operators/dicom_seg_writer_operator.py +++ b/monai/deploy/operators/dicom_seg_writer_operator.py @@ -27,6 +27,8 @@ ImplicitVRLittleEndian, _ = optional_import("pydicom.uid", name="ImplicitVRLittleEndian") Dataset, _ = optional_import("pydicom.dataset", name="Dataset") FileDataset, _ = optional_import("pydicom.dataset", name="FileDataset") +DA, _ = optional_import("pydicom.valuerep", name="DA") +TM, _ = optional_import("pydicom.valuerep", name="TM") PyDicomSequence, _ = optional_import("pydicom.sequence", name="Sequence") sitk, _ = optional_import("SimpleITK") codes, _ = optional_import("pydicom.sr.codedict", name="codes") @@ -341,8 +343,8 @@ def create_dicom_seg(self, image: np.ndarray, dicom_series: DICOMSeries, output_ # Adding a few tags that are not in the Dataset # Also try to set the custom tags that are of string type dt_now = datetime.datetime.now() - seg.SeriesDate = dt_now.strftime("%Y%m%d") # type: ignore[assignment] - seg.SeriesTime = dt_now.strftime("%H%M%S") # type: ignore[assignment] + seg.SeriesDate = DA(dt_now.strftime("%Y%m%d")) + seg.SeriesTime = TM(dt_now.strftime("%H%M%S")) seg.TimezoneOffsetFromUTC = ( dt_now.astimezone().isoformat()[-6:].replace(":", "") ) # '2022-09-27T22:36:20.143857-07:00' diff --git a/monai/deploy/operators/dicom_series_to_volume_operator.py b/monai/deploy/operators/dicom_series_to_volume_operator.py index dc49fbdb..5a06ec52 100644 --- a/monai/deploy/operators/dicom_series_to_volume_operator.py +++ b/monai/deploy/operators/dicom_series_to_volume_operator.py @@ -40,19 +40,26 @@ class DICOMSeriesToVolumeOperator(Operator): """ # Use constants instead of enums in monai to avoid dependency at this level. + MONAI_UTIL_ENUMS_SPACEKEYS_RAS = "RAS" MONAI_UTIL_ENUMS_SPACEKEYS_LPS = "LPS" MONAI_TRANSFORMS_SPATIAL_METADATA_NAME = "space" + METADATA_SPACE_RAS = {MONAI_TRANSFORMS_SPATIAL_METADATA_NAME: MONAI_UTIL_ENUMS_SPACEKEYS_RAS} METADATA_SPACE_LPS = {MONAI_TRANSFORMS_SPATIAL_METADATA_NAME: MONAI_UTIL_ENUMS_SPACEKEYS_LPS} + ATTRIBUTE_NIFTI_AFFINE = "nifti_affine_transform" + ATTRIBUTE_DICOM_AFFINE = "dicom_affine_transform" - def __init__(self, fragment: Fragment, *args, **kwargs): + def __init__(self, fragment: Fragment, *args, affine_lps_to_ras: bool = True, **kwargs): """Create an instance for a containing application object. Args: fragment (Fragment): An instance of the Application class which is derived from Fragment. + affine_lps_to_ras (bool): If true, the affine transform in the image metadata is RAS oriented, + otherwise it is LPS oriented. Default is True. """ self.input_name_series = "study_selected_series_list" self.output_name_image = "image" + self.affine_lps_to_ras = affine_lps_to_ras # Need to call the base class constructor last super().__init__(fragment, *args, **kwargs) @@ -89,18 +96,16 @@ def convert_to_image(self, study_selected_series_list: List[StudySelectedSeries] metadata.update(self._get_instance_properties(study_selected_series.study)) selection_metadata = {"selection_name": selection_name} metadata.update(selection_metadata) - # Add the metadata to specify LPS. - # Previously, this was set in ImageReader class, but moving it here allows other loaders - # to determine this value on its own, e.g. NIfTI loader but it does not set this - # resulting in the MONAI Orientation transform to default the labels to RAS. - # It is assumed that the ImageOrientationPatient will be set accordingly if the - # PatientPosition is other than HFS. - # NOTE: This value is properly parsed by MONAI Orientation transform from v1.5.1 onwards. - # Some early MONAI model inference configs incorrectly specify orientation to RAS - # due part to previous MONAI versions did not correctly parse this metadata from - # the input MetaTensor and defaulting to RAS. Now with LPS properly set, the inference - # configs then need to be updated to specify LPS, to achieve the same result. - metadata.update(self.METADATA_SPACE_LPS) + # The affine transform and the coordinate space are set based on the flag affine_lps_to_ras. + # If the flag is true, the NIFTI affine (RAS) is used, otherwise the DICOM affine (LPS) is used. + if self.affine_lps_to_ras: + if hasattr(dicom_series, self.ATTRIBUTE_NIFTI_AFFINE): + metadata["affine"] = getattr(dicom_series, self.ATTRIBUTE_NIFTI_AFFINE) + metadata.update(self.METADATA_SPACE_RAS) + else: + if hasattr(dicom_series, self.ATTRIBUTE_DICOM_AFFINE): + metadata["affine"] = getattr(dicom_series, self.ATTRIBUTE_DICOM_AFFINE) + metadata.update(self.METADATA_SPACE_LPS) voxel_data = self.generate_voxel_data(dicom_series) image = self.create_volumetric_image(voxel_data, metadata) @@ -366,7 +371,7 @@ def compute_affine_transform(self, s_1, s_n, n, series): zn = 0.0 ip1 = None - ip2 = None + ipn = None try: ip1_de = s_1[0x0020, 0x0032] ipn_de = s_n[0x0020, 0x0032] @@ -404,7 +409,7 @@ def compute_affine_transform(self, s_1, s_n, n, series): m1[3, 2] = 0 m1[3, 3] = 1 - series.dicom_affine_transform = m1 + setattr(series, self.ATTRIBUTE_DICOM_AFFINE, m1) m2[0, 0] = -rx * vr m2[0, 1] = -cx * vc @@ -426,7 +431,7 @@ def compute_affine_transform(self, s_1, s_n, n, series): m2[3, 2] = 0 m2[3, 3] = 1 - series.nifti_affine_transform = m2 + setattr(series, self.ATTRIBUTE_NIFTI_AFFINE, m2) def create_metadata(self, series) -> Dict: """Collects all relevant metadata from the DICOM Series and creates a dictionary. diff --git a/monai/deploy/operators/monai_seg_inference_operator.py b/monai/deploy/operators/monai_seg_inference_operator.py index 6e9bbde8..261be8c0 100644 --- a/monai/deploy/operators/monai_seg_inference_operator.py +++ b/monai/deploy/operators/monai_seg_inference_operator.py @@ -31,16 +31,20 @@ ImageReader: Any = ImageReader_ if not image_reader_ok_: ImageReader = object # for 'class InMemImageReader(ImageReader):' to work +is_no_channel, _ = optional_import("monai.data.utils", name="is_no_channel") decollate_batch, _ = optional_import("monai.data", name="decollate_batch") sliding_window_inference, _ = optional_import("monai.inferers", name="sliding_window_inference") simple_inference, _ = optional_import("monai.inferers", name="SimpleInferer") ensure_tuple, _ = optional_import(MONAI_UTILS, name="ensure_tuple") MetaKeys, _ = optional_import(MONAI_UTILS, name="MetaKeys") SpaceKeys, _ = optional_import(MONAI_UTILS, name="SpaceKeys") +TraceKeys, _ = optional_import(MONAI_UTILS, name="TraceKeys") Compose_, _ = optional_import("monai.transforms", name="Compose") # Dynamic class is not handled so make it Any for now: https://github.com/python/mypy/issues/2477 Compose: Any = Compose_ +cp, has_cp = optional_import("cupy") + from monai.deploy.core import AppContext, Condition, ConditionType, Fragment, Image, OperatorSpec, Resource from .inference_operator import InferenceOperator @@ -362,6 +366,7 @@ def compute(self, op_input, op_output, context): self._executing = True try: input_image = op_input.receive(self._input_name_image) + if not input_image: raise ValueError("Input is None.") op_output.emit(self.compute_impl(input_image, context), self._output_name_seg) @@ -592,7 +597,7 @@ def _get_meta_dict(self, img: Image) -> Dict: return meta_dict -# Reuse MONAI code for the derived ImageReader +# Reuse MONAI code for the derived ImageReader as it is not exposed def _copy_compatible_dict(from_dict: Dict, to_dict: Dict): if not isinstance(to_dict, dict): raise ValueError(f"to_dict must be a Dict, got {type(to_dict)}.") @@ -601,7 +606,9 @@ def _copy_compatible_dict(from_dict: Dict, to_dict: Dict): datum = from_dict[key] if isinstance(datum, np.ndarray) and np_str_obj_array_pattern.search(datum.dtype.str) is not None: continue - to_dict[key] = datum + to_dict[key] = ( + str(TraceKeys.NONE) if datum is None else datum + ) # PyTorch's default_collate cannot handle None values directly else: affine_key, shape_key = MetaKeys.AFFINE, MetaKeys.SPATIAL_SHAPE if affine_key in from_dict and not np.allclose(from_dict[affine_key], to_dict[affine_key]): @@ -616,12 +623,16 @@ def _copy_compatible_dict(from_dict: Dict, to_dict: Dict): ) -def _stack_images(image_list: List, meta_dict: Dict): +def _stack_images(image_list: list, meta_dict: Dict, to_cupy: bool = False): if len(image_list) <= 1: return image_list[0] - if meta_dict.get(MetaKeys.ORIGINAL_CHANNEL_DIM, None) not in ("no_channel", None): + if not is_no_channel(meta_dict.get(MetaKeys.ORIGINAL_CHANNEL_DIM, None)): channel_dim = int(meta_dict[MetaKeys.ORIGINAL_CHANNEL_DIM]) + if to_cupy and has_cp: + return cp.concatenate(image_list, axis=channel_dim) return np.concatenate(image_list, axis=channel_dim) # stack at a new first dim as the channel dim, if `'original_channel_dim'` is unspecified meta_dict[MetaKeys.ORIGINAL_CHANNEL_DIM] = 0 + if to_cupy and has_cp: + return cp.stack(image_list, axis=0) return np.stack(image_list, axis=0) diff --git a/notebooks/tutorials/03_segmentation_app.ipynb b/notebooks/tutorials/03_segmentation_app.ipynb index b6bddd17..bdd81226 100644 --- a/notebooks/tutorials/03_segmentation_app.ipynb +++ b/notebooks/tutorials/03_segmentation_app.ipynb @@ -569,7 +569,7 @@ " resample=False,\n", " output_ext=\".nii\",\n", " ),\n", - " Orientationd(keys=my_key, axcodes=\"LPS\"),\n", + " Orientationd(keys=my_key, axcodes=\"RAS\"),\n", " Spacingd(keys=my_key, pixdim=[1.5, 1.5, 2.9], mode=[\"bilinear\"]),\n", " ScaleIntensityRanged(keys=my_key, a_min=-57, a_max=164, b_min=0.0, b_max=1.0, clip=True),\n", " EnsureTyped(keys=my_key),\n", @@ -751,81 +751,85 @@ "output_type": "stream", "text": [ "[info] [fragment.cpp:969] Loading extensions from configs...\n", - "[2025-09-30 18:29:37,547] [INFO] (root) - Parsed args: Namespace(log_level=None, input=None, output=None, model=None, workdir=None, triton_server_netloc=None, argv=[])\n", - "[2025-09-30 18:29:37,553] [INFO] (root) - AppContext object: AppContext(input_path=dcm, output_path=output, model_path=models, workdir=), triton_server_netloc=\n", - "[2025-09-30 18:29:37,554] [INFO] (__main__.AISpleenSegApp) - App input and output path: dcm, output\n", + "[2025-10-30 12:10:07,210] [INFO] (root) - Parsed args: Namespace(log_level=None, input=None, output=None, model=None, workdir=None, triton_server_netloc=None, argv=[])\n", + "[2025-10-30 12:10:07,220] [INFO] (root) - AppContext object: AppContext(input_path=dcm, output_path=output, model_path=models, workdir=), triton_server_netloc=\n", + "[2025-10-30 12:10:07,221] [INFO] (__main__.AISpleenSegApp) - App input and output path: dcm, output\n", "[info] [gxf_executor.cpp:344] Creating context\n", "[info] [gxf_executor.cpp:2508] Activating Graph...\n", "[info] [gxf_executor.cpp:2579] Running Graph...\n", "[info] [gxf_executor.cpp:2581] Waiting for completion...\n", "[info] [greedy_scheduler.cpp:191] Scheduling 5 entities\n", - "[2025-09-30 18:29:37,727] [INFO] (monai.deploy.operators.dicom_data_loader_operator.DICOMDataLoaderOperator) - No or invalid input path from the optional input port: None\n", - "[2025-09-30 18:29:38,077] [INFO] (root) - Finding series for Selection named: CT Series\n", - "[2025-09-30 18:29:38,078] [INFO] (root) - Searching study, : 1.3.6.1.4.1.14519.5.2.1.7085.2626.822645453932810382886582736291\n", + "[2025-10-30 12:10:07,479] [INFO] (monai.deploy.operators.dicom_data_loader_operator.DICOMDataLoaderOperator) - No or invalid input path from the optional input port: None\n", + "[2025-10-30 12:10:07,781] [INFO] (root) - Finding series for Selection named: CT Series\n", + "[2025-10-30 12:10:07,782] [INFO] (root) - Searching study, : 1.3.6.1.4.1.14519.5.2.1.7085.2626.822645453932810382886582736291\n", " # of series: 1\n", - "[2025-09-30 18:29:38,079] [INFO] (root) - Working on series, instance UID: 1.3.6.1.4.1.14519.5.2.1.7085.2626.119403521930927333027265674239\n", - "[2025-09-30 18:29:38,080] [INFO] (root) - On attribute: 'StudyDescription' to match value: '(.*?)'\n", - "[2025-09-30 18:29:38,081] [INFO] (root) - Series attribute StudyDescription value: CT ABDOMEN W IV CONTRAST\n", - "[2025-09-30 18:29:38,083] [INFO] (root) - On attribute: 'Modality' to match value: '(?i)CT'\n", - "[2025-09-30 18:29:38,084] [INFO] (root) - Series attribute Modality value: CT\n", - "[2025-09-30 18:29:38,085] [INFO] (root) - On attribute: 'SeriesDescription' to match value: '(.*?)'\n", - "[2025-09-30 18:29:38,087] [INFO] (root) - Series attribute SeriesDescription value: ABD/PANC 3.0 B31f\n", - "[2025-09-30 18:29:38,088] [INFO] (root) - On attribute: 'ImageType' to match value: ['PRIMARY', 'ORIGINAL']\n", - "[2025-09-30 18:29:38,089] [INFO] (root) - Series attribute ImageType value: None\n", - "[2025-09-30 18:29:38,091] [INFO] (root) - Instance level attribute ImageType value: [\"['ORIGINAL', 'PRIMARY', 'AXIAL', 'CT_SOM5 SPI']\"]\n", - "[2025-09-30 18:29:38,092] [INFO] (root) - Selected Series, UID: 1.3.6.1.4.1.14519.5.2.1.7085.2626.119403521930927333027265674239\n", - "[2025-09-30 18:29:38,093] [INFO] (root) - Series Selection finalized\n", - "[2025-09-30 18:29:38,094] [INFO] (root) - Series Description of selected DICOM Series for inference: ABD/PANC 3.0 B31f\n", - "[2025-09-30 18:29:38,095] [INFO] (root) - Series Instance UID of selected DICOM Series for inference: 1.3.6.1.4.1.14519.5.2.1.7085.2626.119403521930927333027265674239\n", + "[2025-10-30 12:10:07,783] [INFO] (root) - Working on series, instance UID: 1.3.6.1.4.1.14519.5.2.1.7085.2626.119403521930927333027265674239\n", + "[2025-10-30 12:10:07,784] [INFO] (root) - On attribute: 'StudyDescription' to match value: '(.*?)'\n", + "[2025-10-30 12:10:07,785] [INFO] (root) - Series attribute StudyDescription value: CT ABDOMEN W IV CONTRAST\n", + "[2025-10-30 12:10:07,785] [INFO] (root) - On attribute: 'Modality' to match value: '(?i)CT'\n", + "[2025-10-30 12:10:07,786] [INFO] (root) - Series attribute Modality value: CT\n", + "[2025-10-30 12:10:07,787] [INFO] (root) - On attribute: 'SeriesDescription' to match value: '(.*?)'\n", + "[2025-10-30 12:10:07,788] [INFO] (root) - Series attribute SeriesDescription value: ABD/PANC 3.0 B31f\n", + "[2025-10-30 12:10:07,788] [INFO] (root) - On attribute: 'ImageType' to match value: ['PRIMARY', 'ORIGINAL']\n", + "[2025-10-30 12:10:07,789] [INFO] (root) - Series attribute ImageType value: None\n", + "[2025-10-30 12:10:07,790] [INFO] (root) - Instance level attribute ImageType value: [\"['ORIGINAL', 'PRIMARY', 'AXIAL', 'CT_SOM5 SPI']\"]\n", + "[2025-10-30 12:10:07,791] [INFO] (root) - Selected Series, UID: 1.3.6.1.4.1.14519.5.2.1.7085.2626.119403521930927333027265674239\n", + "[2025-10-30 12:10:07,792] [INFO] (root) - Series Selection finalized\n", + "[2025-10-30 12:10:07,792] [INFO] (root) - Series Description of selected DICOM Series for inference: ABD/PANC 3.0 B31f\n", + "[2025-10-30 12:10:07,793] [INFO] (root) - Series Instance UID of selected DICOM Series for inference: 1.3.6.1.4.1.14519.5.2.1.7085.2626.119403521930927333027265674239\n", "/home/mqin/src/md-app-sdk/.venv/lib/python3.10/site-packages/monai/utils/deprecate_utils.py:321: FutureWarning: monai.transforms.spatial.dictionary Orientationd.__init__:labels: Current default value of argument `labels=(('L', 'R'), ('P', 'A'), ('I', 'S'))` was changed in version None from `labels=(('L', 'R'), ('P', 'A'), ('I', 'S'))` to `labels=None`. Default value changed to None meaning that the transform now uses the 'space' of a meta-tensor, if applicable, to determine appropriate axis labels.\n", " warn_deprecated(argname, msg, warning_category)\n", - "[2025-09-30 18:29:38,529] [INFO] (monai.deploy.operators.monai_seg_inference_operator.MonaiSegInferenceOperator) - Converted Image object metadata:\n", - "[2025-09-30 18:29:38,529] [INFO] (monai.deploy.operators.monai_seg_inference_operator.MonaiSegInferenceOperator) - SeriesInstanceUID: 1.3.6.1.4.1.14519.5.2.1.7085.2626.119403521930927333027265674239, type \n", - "[2025-09-30 18:29:38,530] [INFO] (monai.deploy.operators.monai_seg_inference_operator.MonaiSegInferenceOperator) - SeriesDate: 20090831, type \n", - "[2025-09-30 18:29:38,530] [INFO] (monai.deploy.operators.monai_seg_inference_operator.MonaiSegInferenceOperator) - SeriesTime: 101721.452, type \n", - "[2025-09-30 18:29:38,531] [INFO] (monai.deploy.operators.monai_seg_inference_operator.MonaiSegInferenceOperator) - Modality: CT, type \n", - "[2025-09-30 18:29:38,531] [INFO] (monai.deploy.operators.monai_seg_inference_operator.MonaiSegInferenceOperator) - SeriesDescription: ABD/PANC 3.0 B31f, type \n", - "[2025-09-30 18:29:38,532] [INFO] (monai.deploy.operators.monai_seg_inference_operator.MonaiSegInferenceOperator) - PatientPosition: HFS, type \n", - "[2025-09-30 18:29:38,533] [INFO] (monai.deploy.operators.monai_seg_inference_operator.MonaiSegInferenceOperator) - SeriesNumber: 8, type \n", - "[2025-09-30 18:29:38,533] [INFO] (monai.deploy.operators.monai_seg_inference_operator.MonaiSegInferenceOperator) - row_pixel_spacing: 0.7890625, type \n", - "[2025-09-30 18:29:38,535] [INFO] (monai.deploy.operators.monai_seg_inference_operator.MonaiSegInferenceOperator) - col_pixel_spacing: 0.7890625, type \n", - "[2025-09-30 18:29:38,536] [INFO] (monai.deploy.operators.monai_seg_inference_operator.MonaiSegInferenceOperator) - depth_pixel_spacing: 1.5, type \n", - "[2025-09-30 18:29:38,537] [INFO] (monai.deploy.operators.monai_seg_inference_operator.MonaiSegInferenceOperator) - row_direction_cosine: [1.0, 0.0, 0.0], type \n", - "[2025-09-30 18:29:38,538] [INFO] (monai.deploy.operators.monai_seg_inference_operator.MonaiSegInferenceOperator) - col_direction_cosine: [0.0, 1.0, 0.0], type \n", - "[2025-09-30 18:29:38,538] [INFO] (monai.deploy.operators.monai_seg_inference_operator.MonaiSegInferenceOperator) - depth_direction_cosine: [0.0, 0.0, 1.0], type \n", - "[2025-09-30 18:29:38,540] [INFO] (monai.deploy.operators.monai_seg_inference_operator.MonaiSegInferenceOperator) - dicom_affine_transform: [[ 0.7890625 0. 0. -197.60547 ]\n", + "[2025-10-30 12:10:08,151] [INFO] (monai.deploy.operators.monai_seg_inference_operator.MonaiSegInferenceOperator) - Converted Image object metadata:\n", + "[2025-10-30 12:10:08,152] [INFO] (monai.deploy.operators.monai_seg_inference_operator.MonaiSegInferenceOperator) - SeriesInstanceUID: 1.3.6.1.4.1.14519.5.2.1.7085.2626.119403521930927333027265674239, type \n", + "[2025-10-30 12:10:08,153] [INFO] (monai.deploy.operators.monai_seg_inference_operator.MonaiSegInferenceOperator) - SeriesDate: 20090831, type \n", + "[2025-10-30 12:10:08,154] [INFO] (monai.deploy.operators.monai_seg_inference_operator.MonaiSegInferenceOperator) - SeriesTime: 101721.452, type \n", + "[2025-10-30 12:10:08,154] [INFO] (monai.deploy.operators.monai_seg_inference_operator.MonaiSegInferenceOperator) - Modality: CT, type \n", + "[2025-10-30 12:10:08,155] [INFO] (monai.deploy.operators.monai_seg_inference_operator.MonaiSegInferenceOperator) - SeriesDescription: ABD/PANC 3.0 B31f, type \n", + "[2025-10-30 12:10:08,156] [INFO] (monai.deploy.operators.monai_seg_inference_operator.MonaiSegInferenceOperator) - PatientPosition: HFS, type \n", + "[2025-10-30 12:10:08,156] [INFO] (monai.deploy.operators.monai_seg_inference_operator.MonaiSegInferenceOperator) - SeriesNumber: 8, type \n", + "[2025-10-30 12:10:08,157] [INFO] (monai.deploy.operators.monai_seg_inference_operator.MonaiSegInferenceOperator) - row_pixel_spacing: 0.7890625, type \n", + "[2025-10-30 12:10:08,158] [INFO] (monai.deploy.operators.monai_seg_inference_operator.MonaiSegInferenceOperator) - col_pixel_spacing: 0.7890625, type \n", + "[2025-10-30 12:10:08,159] [INFO] (monai.deploy.operators.monai_seg_inference_operator.MonaiSegInferenceOperator) - depth_pixel_spacing: 1.5, type \n", + "[2025-10-30 12:10:08,160] [INFO] (monai.deploy.operators.monai_seg_inference_operator.MonaiSegInferenceOperator) - row_direction_cosine: [1.0, 0.0, 0.0], type \n", + "[2025-10-30 12:10:08,161] [INFO] (monai.deploy.operators.monai_seg_inference_operator.MonaiSegInferenceOperator) - col_direction_cosine: [0.0, 1.0, 0.0], type \n", + "[2025-10-30 12:10:08,162] [INFO] (monai.deploy.operators.monai_seg_inference_operator.MonaiSegInferenceOperator) - depth_direction_cosine: [0.0, 0.0, 1.0], type \n", + "[2025-10-30 12:10:08,164] [INFO] (monai.deploy.operators.monai_seg_inference_operator.MonaiSegInferenceOperator) - dicom_affine_transform: [[ 0.7890625 0. 0. -197.60547 ]\n", " [ 0. 0.7890625 0. -398.60547 ]\n", " [ 0. 0. 1.5 -383. ]\n", " [ 0. 0. 0. 1. ]], type \n", - "[2025-09-30 18:29:38,541] [INFO] (monai.deploy.operators.monai_seg_inference_operator.MonaiSegInferenceOperator) - nifti_affine_transform: [[ -0.7890625 -0. -0. 197.60547 ]\n", + "[2025-10-30 12:10:08,165] [INFO] (monai.deploy.operators.monai_seg_inference_operator.MonaiSegInferenceOperator) - nifti_affine_transform: [[ -0.7890625 -0. -0. 197.60547 ]\n", " [ -0. -0.7890625 -0. 398.60547 ]\n", " [ 0. 0. 1.5 -383. ]\n", " [ 0. 0. 0. 1. ]], type \n", - "[2025-09-30 18:29:38,542] [INFO] (monai.deploy.operators.monai_seg_inference_operator.MonaiSegInferenceOperator) - StudyInstanceUID: 1.3.6.1.4.1.14519.5.2.1.7085.2626.822645453932810382886582736291, type \n", - "[2025-09-30 18:29:38,543] [INFO] (monai.deploy.operators.monai_seg_inference_operator.MonaiSegInferenceOperator) - StudyID: , type \n", - "[2025-09-30 18:29:38,546] [INFO] (monai.deploy.operators.monai_seg_inference_operator.MonaiSegInferenceOperator) - StudyDate: 20090831, type \n", - "[2025-09-30 18:29:38,547] [INFO] (monai.deploy.operators.monai_seg_inference_operator.MonaiSegInferenceOperator) - StudyTime: 095948.599, type \n", - "[2025-09-30 18:29:38,548] [INFO] (monai.deploy.operators.monai_seg_inference_operator.MonaiSegInferenceOperator) - StudyDescription: CT ABDOMEN W IV CONTRAST, type \n", - "[2025-09-30 18:29:38,548] [INFO] (monai.deploy.operators.monai_seg_inference_operator.MonaiSegInferenceOperator) - AccessionNumber: 5471978513296937, type \n", - "[2025-09-30 18:29:38,549] [INFO] (monai.deploy.operators.monai_seg_inference_operator.MonaiSegInferenceOperator) - selection_name: CT Series, type \n", - "[2025-09-30 18:29:38,549] [INFO] (monai.deploy.operators.monai_seg_inference_operator.MonaiSegInferenceOperator) - space: LPS, type \n" + "[2025-10-30 12:10:08,166] [INFO] (monai.deploy.operators.monai_seg_inference_operator.MonaiSegInferenceOperator) - StudyInstanceUID: 1.3.6.1.4.1.14519.5.2.1.7085.2626.822645453932810382886582736291, type \n", + "[2025-10-30 12:10:08,167] [INFO] (monai.deploy.operators.monai_seg_inference_operator.MonaiSegInferenceOperator) - StudyID: , type \n", + "[2025-10-30 12:10:08,168] [INFO] (monai.deploy.operators.monai_seg_inference_operator.MonaiSegInferenceOperator) - StudyDate: 20090831, type \n", + "[2025-10-30 12:10:08,169] [INFO] (monai.deploy.operators.monai_seg_inference_operator.MonaiSegInferenceOperator) - StudyTime: 095948.599, type \n", + "[2025-10-30 12:10:08,170] [INFO] (monai.deploy.operators.monai_seg_inference_operator.MonaiSegInferenceOperator) - StudyDescription: CT ABDOMEN W IV CONTRAST, type \n", + "[2025-10-30 12:10:08,171] [INFO] (monai.deploy.operators.monai_seg_inference_operator.MonaiSegInferenceOperator) - AccessionNumber: 5471978513296937, type \n", + "[2025-10-30 12:10:08,171] [INFO] (monai.deploy.operators.monai_seg_inference_operator.MonaiSegInferenceOperator) - selection_name: CT Series, type \n", + "[2025-10-30 12:10:08,173] [INFO] (monai.deploy.operators.monai_seg_inference_operator.MonaiSegInferenceOperator) - affine: [[ -0.7890625 -0. -0. 197.60547 ]\n", + " [ -0. -0.7890625 -0. 398.60547 ]\n", + " [ 0. 0. 1.5 -383. ]\n", + " [ 0. 0. 0. 1. ]], type \n", + "[2025-10-30 12:10:08,173] [INFO] (monai.deploy.operators.monai_seg_inference_operator.MonaiSegInferenceOperator) - space: RAS, type \n" ] }, { "name": "stdout", "output_type": "stream", "text": [ - "2025-09-30 18:29:39,201 INFO image_writer.py:197 - writing: /home/mqin/src/md-app-sdk/notebooks/tutorials/output/saved_images_folder/1.3.6.1.4.1.14519.5.2.1.7085.2626/1.3.6.1.4.1.14519.5.2.1.7085.2626.nii\n" + "2025-10-30 12:10:08,788 INFO image_writer.py:197 - writing: /home/mqin/src/md-app-sdk/notebooks/tutorials/output/saved_images_folder/1.3.6.1.4.1.14519.5.2.1.7085.2626/1.3.6.1.4.1.14519.5.2.1.7085.2626.nii\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ - "[2025-09-30 18:29:41,040] [INFO] (monai.deploy.operators.monai_seg_inference_operator.MonaiSegInferenceOperator) - Input of shape: torch.Size([1, 1, 270, 270, 106])\n", - "/home/mqin/src/md-app-sdk/.venv/lib/python3.10/site-packages/monai/inferers/utils.py:226: UserWarning: Using a non-tuple sequence for multidimensional indexing is deprecated and will be changed in pytorch 2.9; use x[tuple(seq)] instead of x[seq]. In pytorch 2.9 this will be interpreted as tensor index, x[torch.tensor(seq)], which will result either in an error or a different result (Triggered internally at /pytorch/torch/csrc/autograd/python_variable_indexing.cpp:306.)\n", + "[2025-10-30 12:10:10,438] [INFO] (monai.deploy.operators.monai_seg_inference_operator.MonaiSegInferenceOperator) - Input of shape: torch.Size([1, 1, 270, 270, 106])\n", + "/home/mqin/src/md-app-sdk/.venv/lib/python3.10/site-packages/monai/inferers/utils.py:226: UserWarning: Using a non-tuple sequence for multidimensional indexing is deprecated and will be changed in pytorch 2.9; use x[tuple(seq)] instead of x[seq]. In pytorch 2.9 this will be interpreted as tensor index, x[torch.tensor(seq)], which will result either in an error or a different result (Triggered internally at /pytorch/torch/csrc/autograd/python_variable_indexing.cpp:345.)\n", " win_data = torch.cat([inputs[win_slice] for win_slice in unravel_slice]).to(sw_device)\n", - "/home/mqin/src/md-app-sdk/.venv/lib/python3.10/site-packages/monai/inferers/utils.py:370: UserWarning: Using a non-tuple sequence for multidimensional indexing is deprecated and will be changed in pytorch 2.9; use x[tuple(seq)] instead of x[seq]. In pytorch 2.9 this will be interpreted as tensor index, x[torch.tensor(seq)], which will result either in an error or a different result (Triggered internally at /pytorch/torch/csrc/autograd/python_variable_indexing.cpp:306.)\n", + "/home/mqin/src/md-app-sdk/.venv/lib/python3.10/site-packages/monai/inferers/utils.py:370: UserWarning: Using a non-tuple sequence for multidimensional indexing is deprecated and will be changed in pytorch 2.9; use x[tuple(seq)] instead of x[seq]. In pytorch 2.9 this will be interpreted as tensor index, x[torch.tensor(seq)], which will result either in an error or a different result (Triggered internally at /pytorch/torch/csrc/autograd/python_variable_indexing.cpp:345.)\n", " out[idx_zm] += p\n" ] }, @@ -833,34 +837,34 @@ "name": "stdout", "output_type": "stream", "text": [ - "2025-09-30 18:29:42,211 INFO image_writer.py:197 - writing: /home/mqin/src/md-app-sdk/notebooks/tutorials/output/saved_images_folder/1.3.6.1.4.1.14519.5.2.1.7085.2626/1.3.6.1.4.1.14519.5.2.1.7085.2626_seg.nii\n" + "2025-10-30 12:10:11,443 INFO image_writer.py:197 - writing: /home/mqin/src/md-app-sdk/notebooks/tutorials/output/saved_images_folder/1.3.6.1.4.1.14519.5.2.1.7085.2626/1.3.6.1.4.1.14519.5.2.1.7085.2626_seg.nii\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ - "[2025-09-30 18:29:43,678] [INFO] (monai.deploy.operators.monai_seg_inference_operator.MonaiSegInferenceOperator) - Post transform length/batch size of output: 1\n", - "[2025-09-30 18:29:43,688] [INFO] (monai.deploy.operators.monai_seg_inference_operator.MonaiSegInferenceOperator) - Post transform pixel spacings for pred: tensor([0.7891, 0.7891, 1.5000], dtype=torch.float64)\n", - "[2025-09-30 18:29:43,822] [INFO] (monai.deploy.operators.monai_seg_inference_operator.MonaiSegInferenceOperator) - Post transform pred of shape: (1, 512, 512, 204)\n", - "[2025-09-30 18:29:43,878] [INFO] (monai.deploy.operators.monai_seg_inference_operator.MonaiSegInferenceOperator) - Output Seg image numpy array of type shape: (204, 512, 512)\n", - "[2025-09-30 18:29:43,885] [INFO] (monai.deploy.operators.monai_seg_inference_operator.MonaiSegInferenceOperator) - Output Seg image pixel max value: 1\n", - "/home/mqin/src/md-app-sdk/.venv/lib/python3.10/site-packages/highdicom/base.py:165: UserWarning: The string \"C3N-00198\" is unlikely to represent the intended person name since it contains only a single component. Construct a person name according to the format in described in https://dicom.nema.org/dicom/2013/output/chtml/part05/sect_6.2.html#sect_6.2.1.2, or, in pydicom 2.2.0 or later, use the pydicom.valuerep.PersonName.from_named_components() method to construct the person name correctly. If a single-component name is really intended, add a trailing caret character to disambiguate the name.\n", + "[2025-10-30 12:10:12,901] [INFO] (monai.deploy.operators.monai_seg_inference_operator.MonaiSegInferenceOperator) - Post transform length/batch size of output: 1\n", + "[2025-10-30 12:10:12,904] [INFO] (monai.deploy.operators.monai_seg_inference_operator.MonaiSegInferenceOperator) - Post transform pixel spacings for pred: tensor([0.7891, 0.7891, 1.5000], dtype=torch.float64)\n", + "[2025-10-30 12:10:13,033] [INFO] (monai.deploy.operators.monai_seg_inference_operator.MonaiSegInferenceOperator) - Post transform pred of shape: (1, 512, 512, 204)\n", + "[2025-10-30 12:10:13,071] [INFO] (monai.deploy.operators.monai_seg_inference_operator.MonaiSegInferenceOperator) - Output Seg image numpy array of type shape: (204, 512, 512)\n", + "[2025-10-30 12:10:13,077] [INFO] (monai.deploy.operators.monai_seg_inference_operator.MonaiSegInferenceOperator) - Output Seg image pixel max value: 1\n", + "/home/mqin/src/md-app-sdk/.venv/lib/python3.10/site-packages/highdicom/base.py:181: UserWarning: The string \"C3N-00198\" is unlikely to represent the intended person name since it contains only a single component. Construct a person name according to the format in described in https://dicom.nema.org/dicom/2013/output/chtml/part05/sect_6.2.html#sect_6.2.1.2, or, in pydicom 2.2.0 or later, use the pydicom.valuerep.PersonName.from_named_components() method to construct the person name correctly. If a single-component name is really intended, add a trailing caret character to disambiguate the name.\n", " check_person_name(patient_name)\n", - "[2025-09-30 18:29:45,433] [INFO] (highdicom.base) - copy Image-related attributes from dataset \"1.3.6.1.4.1.14519.5.2.1.7085.2626.936983343951485811186213470191\"\n", - "[2025-09-30 18:29:45,435] [INFO] (highdicom.base) - copy attributes of module \"Specimen\"\n", - "[2025-09-30 18:29:45,436] [INFO] (highdicom.base) - copy Patient-related attributes from dataset \"1.3.6.1.4.1.14519.5.2.1.7085.2626.936983343951485811186213470191\"\n", - "[2025-09-30 18:29:45,437] [INFO] (highdicom.base) - copy attributes of module \"Patient\"\n", - "[2025-09-30 18:29:45,438] [INFO] (highdicom.base) - copy attributes of module \"Clinical Trial Subject\"\n", - "[2025-09-30 18:29:45,439] [INFO] (highdicom.base) - copy Study-related attributes from dataset \"1.3.6.1.4.1.14519.5.2.1.7085.2626.936983343951485811186213470191\"\n", - "[2025-09-30 18:29:45,440] [INFO] (highdicom.base) - copy attributes of module \"General Study\"\n", - "[2025-09-30 18:29:45,442] [INFO] (highdicom.base) - copy attributes of module \"Patient Study\"\n", - "[2025-09-30 18:29:45,443] [INFO] (highdicom.base) - copy attributes of module \"Clinical Trial Study\"\n", + "[2025-10-30 12:10:14,403] [INFO] (highdicom.base) - copy Image-related attributes from dataset \"1.3.6.1.4.1.14519.5.2.1.7085.2626.936983343951485811186213470191\"\n", + "[2025-10-30 12:10:14,405] [INFO] (highdicom.base) - copy attributes of module \"Specimen\"\n", + "[2025-10-30 12:10:14,405] [INFO] (highdicom.base) - copy Patient-related attributes from dataset \"1.3.6.1.4.1.14519.5.2.1.7085.2626.936983343951485811186213470191\"\n", + "[2025-10-30 12:10:14,406] [INFO] (highdicom.base) - copy attributes of module \"Patient\"\n", + "[2025-10-30 12:10:14,407] [INFO] (highdicom.base) - copy attributes of module \"Clinical Trial Subject\"\n", + "[2025-10-30 12:10:14,408] [INFO] (highdicom.base) - copy Study-related attributes from dataset \"1.3.6.1.4.1.14519.5.2.1.7085.2626.936983343951485811186213470191\"\n", + "[2025-10-30 12:10:14,408] [INFO] (highdicom.base) - copy attributes of module \"General Study\"\n", + "[2025-10-30 12:10:14,409] [INFO] (highdicom.base) - copy attributes of module \"Patient Study\"\n", + "[2025-10-30 12:10:14,410] [INFO] (highdicom.base) - copy attributes of module \"Clinical Trial Study\"\n", "[info] [greedy_scheduler.cpp:372] Scheduler stopped: Some entities are waiting for execution, but there are no periodic or async entities to get out of the deadlock.\n", "[info] [greedy_scheduler.cpp:401] Scheduler finished.\n", "[info] [gxf_executor.cpp:2588] Deactivating Graph...\n", "[info] [gxf_executor.cpp:2597] Graph execution finished.\n", - "[2025-09-30 18:29:45,557] [INFO] (__main__.AISpleenSegApp) - End run\n" + "[2025-10-30 12:10:14,509] [INFO] (__main__.AISpleenSegApp) - End run\n" ] } ], @@ -1039,7 +1043,7 @@ " resample=False,\n", " output_ext=\".nii\",\n", " ),\n", - " Orientationd(keys=my_key, axcodes=\"LPS\"),\n", + " Orientationd(keys=my_key, axcodes=\"RAS\"),\n", " Spacingd(keys=my_key, pixdim=[1.5, 1.5, 2.9], mode=[\"bilinear\"]),\n", " ScaleIntensityRanged(keys=my_key, a_min=-57, a_max=164, b_min=0.0, b_max=1.0, clip=True),\n", " EnsureTyped(keys=my_key),\n", @@ -1302,92 +1306,96 @@ "/home/mqin/src/md-app-sdk/.venv/lib/python3.10/site-packages/monai/deploy/utils/importutil.py:20: UserWarning: pkg_resources is deprecated as an API. See https://setuptools.pypa.io/en/latest/pkg_resources.html. The pkg_resources package is slated for removal as early as 2025-11-30. Refrain from using this package or pin to Setuptools<81.\n", " import pkg_resources\n", "[\u001b[32minfo\u001b[m] [fragment.cpp:969] Loading extensions from configs...\n", - "[2025-09-30 18:29:50,368] [INFO] (root) - Parsed args: Namespace(log_level=None, input=None, output=None, model=None, workdir=None, triton_server_netloc=None, argv=['my_app'])\n", - "[2025-09-30 18:29:50,372] [INFO] (root) - AppContext object: AppContext(input_path=dcm, output_path=output, model_path=models, workdir=), triton_server_netloc=\n", - "[2025-09-30 18:29:50,372] [INFO] (app.AISpleenSegApp) - App input and output path: dcm, output\n", + "[2025-10-30 12:10:18,978] [INFO] (root) - Parsed args: Namespace(log_level=None, input=None, output=None, model=None, workdir=None, triton_server_netloc=None, argv=['my_app'])\n", + "[2025-10-30 12:10:18,980] [INFO] (root) - AppContext object: AppContext(input_path=dcm, output_path=output, model_path=models, workdir=), triton_server_netloc=\n", + "[2025-10-30 12:10:18,980] [INFO] (app.AISpleenSegApp) - App input and output path: dcm, output\n", "[\u001b[32minfo\u001b[m] [gxf_executor.cpp:344] Creating context\n", "[\u001b[32minfo\u001b[m] [gxf_executor.cpp:2508] Activating Graph...\n", "[\u001b[32minfo\u001b[m] [gxf_executor.cpp:2579] Running Graph...\n", "[\u001b[32minfo\u001b[m] [gxf_executor.cpp:2581] Waiting for completion...\n", "[\u001b[32minfo\u001b[m] [greedy_scheduler.cpp:191] Scheduling 5 entities\n", - "[2025-09-30 18:29:50,498] [INFO] (monai.deploy.operators.dicom_data_loader_operator.DICOMDataLoaderOperator) - No or invalid input path from the optional input port: None\n", - "[2025-09-30 18:29:50,911] [INFO] (root) - Finding series for Selection named: CT Series\n", - "[2025-09-30 18:29:50,911] [INFO] (root) - Searching study, : 1.3.6.1.4.1.14519.5.2.1.7085.2626.822645453932810382886582736291\n", + "[2025-10-30 12:10:19,108] [INFO] (monai.deploy.operators.dicom_data_loader_operator.DICOMDataLoaderOperator) - No or invalid input path from the optional input port: None\n", + "[2025-10-30 12:10:19,429] [INFO] (root) - Finding series for Selection named: CT Series\n", + "[2025-10-30 12:10:19,429] [INFO] (root) - Searching study, : 1.3.6.1.4.1.14519.5.2.1.7085.2626.822645453932810382886582736291\n", " # of series: 1\n", - "[2025-09-30 18:29:50,911] [INFO] (root) - Working on series, instance UID: 1.3.6.1.4.1.14519.5.2.1.7085.2626.119403521930927333027265674239\n", - "[2025-09-30 18:29:50,911] [INFO] (root) - On attribute: 'StudyDescription' to match value: '(.*?)'\n", - "[2025-09-30 18:29:50,911] [INFO] (root) - Series attribute StudyDescription value: CT ABDOMEN W IV CONTRAST\n", - "[2025-09-30 18:29:50,911] [INFO] (root) - On attribute: 'Modality' to match value: '(?i)CT'\n", - "[2025-09-30 18:29:50,911] [INFO] (root) - Series attribute Modality value: CT\n", - "[2025-09-30 18:29:50,911] [INFO] (root) - On attribute: 'SeriesDescription' to match value: '(.*?)'\n", - "[2025-09-30 18:29:50,911] [INFO] (root) - Series attribute SeriesDescription value: ABD/PANC 3.0 B31f\n", - "[2025-09-30 18:29:50,911] [INFO] (root) - On attribute: 'ImageType' to match value: ['PRIMARY', 'ORIGINAL']\n", - "[2025-09-30 18:29:50,911] [INFO] (root) - Series attribute ImageType value: None\n", - "[2025-09-30 18:29:50,911] [INFO] (root) - Instance level attribute ImageType value: [\"['ORIGINAL', 'PRIMARY', 'AXIAL', 'CT_SOM5 SPI']\"]\n", - "[2025-09-30 18:29:50,911] [INFO] (root) - Selected Series, UID: 1.3.6.1.4.1.14519.5.2.1.7085.2626.119403521930927333027265674239\n", - "[2025-09-30 18:29:50,911] [INFO] (root) - Series Selection finalized\n", - "[2025-09-30 18:29:50,911] [INFO] (root) - Series Description of selected DICOM Series for inference: ABD/PANC 3.0 B31f\n", - "[2025-09-30 18:29:50,911] [INFO] (root) - Series Instance UID of selected DICOM Series for inference: 1.3.6.1.4.1.14519.5.2.1.7085.2626.119403521930927333027265674239\n", + "[2025-10-30 12:10:19,429] [INFO] (root) - Working on series, instance UID: 1.3.6.1.4.1.14519.5.2.1.7085.2626.119403521930927333027265674239\n", + "[2025-10-30 12:10:19,429] [INFO] (root) - On attribute: 'StudyDescription' to match value: '(.*?)'\n", + "[2025-10-30 12:10:19,429] [INFO] (root) - Series attribute StudyDescription value: CT ABDOMEN W IV CONTRAST\n", + "[2025-10-30 12:10:19,429] [INFO] (root) - On attribute: 'Modality' to match value: '(?i)CT'\n", + "[2025-10-30 12:10:19,429] [INFO] (root) - Series attribute Modality value: CT\n", + "[2025-10-30 12:10:19,429] [INFO] (root) - On attribute: 'SeriesDescription' to match value: '(.*?)'\n", + "[2025-10-30 12:10:19,429] [INFO] (root) - Series attribute SeriesDescription value: ABD/PANC 3.0 B31f\n", + "[2025-10-30 12:10:19,429] [INFO] (root) - On attribute: 'ImageType' to match value: ['PRIMARY', 'ORIGINAL']\n", + "[2025-10-30 12:10:19,430] [INFO] (root) - Series attribute ImageType value: None\n", + "[2025-10-30 12:10:19,430] [INFO] (root) - Instance level attribute ImageType value: [\"['ORIGINAL', 'PRIMARY', 'AXIAL', 'CT_SOM5 SPI']\"]\n", + "[2025-10-30 12:10:19,430] [INFO] (root) - Selected Series, UID: 1.3.6.1.4.1.14519.5.2.1.7085.2626.119403521930927333027265674239\n", + "[2025-10-30 12:10:19,430] [INFO] (root) - Series Selection finalized\n", + "[2025-10-30 12:10:19,430] [INFO] (root) - Series Description of selected DICOM Series for inference: ABD/PANC 3.0 B31f\n", + "[2025-10-30 12:10:19,430] [INFO] (root) - Series Instance UID of selected DICOM Series for inference: 1.3.6.1.4.1.14519.5.2.1.7085.2626.119403521930927333027265674239\n", "/home/mqin/src/md-app-sdk/.venv/lib/python3.10/site-packages/monai/utils/deprecate_utils.py:321: FutureWarning: monai.transforms.spatial.dictionary Orientationd.__init__:labels: Current default value of argument `labels=(('L', 'R'), ('P', 'A'), ('I', 'S'))` was changed in version None from `labels=(('L', 'R'), ('P', 'A'), ('I', 'S'))` to `labels=None`. Default value changed to None meaning that the transform now uses the 'space' of a meta-tensor, if applicable, to determine appropriate axis labels.\n", " warn_deprecated(argname, msg, warning_category)\n", - "[2025-09-30 18:29:51,293] [INFO] (monai.deploy.operators.monai_seg_inference_operator.MonaiSegInferenceOperator) - Converted Image object metadata:\n", - "[2025-09-30 18:29:51,293] [INFO] (monai.deploy.operators.monai_seg_inference_operator.MonaiSegInferenceOperator) - SeriesInstanceUID: 1.3.6.1.4.1.14519.5.2.1.7085.2626.119403521930927333027265674239, type \n", - "[2025-09-30 18:29:51,294] [INFO] (monai.deploy.operators.monai_seg_inference_operator.MonaiSegInferenceOperator) - SeriesDate: 20090831, type \n", - "[2025-09-30 18:29:51,294] [INFO] (monai.deploy.operators.monai_seg_inference_operator.MonaiSegInferenceOperator) - SeriesTime: 101721.452, type \n", - "[2025-09-30 18:29:51,294] [INFO] (monai.deploy.operators.monai_seg_inference_operator.MonaiSegInferenceOperator) - Modality: CT, type \n", - "[2025-09-30 18:29:51,294] [INFO] (monai.deploy.operators.monai_seg_inference_operator.MonaiSegInferenceOperator) - SeriesDescription: ABD/PANC 3.0 B31f, type \n", - "[2025-09-30 18:29:51,294] [INFO] (monai.deploy.operators.monai_seg_inference_operator.MonaiSegInferenceOperator) - PatientPosition: HFS, type \n", - "[2025-09-30 18:29:51,294] [INFO] (monai.deploy.operators.monai_seg_inference_operator.MonaiSegInferenceOperator) - SeriesNumber: 8, type \n", - "[2025-09-30 18:29:51,294] [INFO] (monai.deploy.operators.monai_seg_inference_operator.MonaiSegInferenceOperator) - row_pixel_spacing: 0.7890625, type \n", - "[2025-09-30 18:29:51,294] [INFO] (monai.deploy.operators.monai_seg_inference_operator.MonaiSegInferenceOperator) - col_pixel_spacing: 0.7890625, type \n", - "[2025-09-30 18:29:51,294] [INFO] (monai.deploy.operators.monai_seg_inference_operator.MonaiSegInferenceOperator) - depth_pixel_spacing: 1.5, type \n", - "[2025-09-30 18:29:51,294] [INFO] (monai.deploy.operators.monai_seg_inference_operator.MonaiSegInferenceOperator) - row_direction_cosine: [1.0, 0.0, 0.0], type \n", - "[2025-09-30 18:29:51,294] [INFO] (monai.deploy.operators.monai_seg_inference_operator.MonaiSegInferenceOperator) - col_direction_cosine: [0.0, 1.0, 0.0], type \n", - "[2025-09-30 18:29:51,294] [INFO] (monai.deploy.operators.monai_seg_inference_operator.MonaiSegInferenceOperator) - depth_direction_cosine: [0.0, 0.0, 1.0], type \n", - "[2025-09-30 18:29:51,294] [INFO] (monai.deploy.operators.monai_seg_inference_operator.MonaiSegInferenceOperator) - dicom_affine_transform: [[ 0.7890625 0. 0. -197.60547 ]\n", + "[2025-10-30 12:10:19,992] [INFO] (monai.deploy.operators.monai_seg_inference_operator.MonaiSegInferenceOperator) - Converted Image object metadata:\n", + "[2025-10-30 12:10:19,992] [INFO] (monai.deploy.operators.monai_seg_inference_operator.MonaiSegInferenceOperator) - SeriesInstanceUID: 1.3.6.1.4.1.14519.5.2.1.7085.2626.119403521930927333027265674239, type \n", + "[2025-10-30 12:10:19,992] [INFO] (monai.deploy.operators.monai_seg_inference_operator.MonaiSegInferenceOperator) - SeriesDate: 20090831, type \n", + "[2025-10-30 12:10:19,992] [INFO] (monai.deploy.operators.monai_seg_inference_operator.MonaiSegInferenceOperator) - SeriesTime: 101721.452, type \n", + "[2025-10-30 12:10:19,992] [INFO] (monai.deploy.operators.monai_seg_inference_operator.MonaiSegInferenceOperator) - Modality: CT, type \n", + "[2025-10-30 12:10:19,992] [INFO] (monai.deploy.operators.monai_seg_inference_operator.MonaiSegInferenceOperator) - SeriesDescription: ABD/PANC 3.0 B31f, type \n", + "[2025-10-30 12:10:19,992] [INFO] (monai.deploy.operators.monai_seg_inference_operator.MonaiSegInferenceOperator) - PatientPosition: HFS, type \n", + "[2025-10-30 12:10:19,992] [INFO] (monai.deploy.operators.monai_seg_inference_operator.MonaiSegInferenceOperator) - SeriesNumber: 8, type \n", + "[2025-10-30 12:10:19,992] [INFO] (monai.deploy.operators.monai_seg_inference_operator.MonaiSegInferenceOperator) - row_pixel_spacing: 0.7890625, type \n", + "[2025-10-30 12:10:19,992] [INFO] (monai.deploy.operators.monai_seg_inference_operator.MonaiSegInferenceOperator) - col_pixel_spacing: 0.7890625, type \n", + "[2025-10-30 12:10:19,992] [INFO] (monai.deploy.operators.monai_seg_inference_operator.MonaiSegInferenceOperator) - depth_pixel_spacing: 1.5, type \n", + "[2025-10-30 12:10:19,992] [INFO] (monai.deploy.operators.monai_seg_inference_operator.MonaiSegInferenceOperator) - row_direction_cosine: [1.0, 0.0, 0.0], type \n", + "[2025-10-30 12:10:19,992] [INFO] (monai.deploy.operators.monai_seg_inference_operator.MonaiSegInferenceOperator) - col_direction_cosine: [0.0, 1.0, 0.0], type \n", + "[2025-10-30 12:10:19,992] [INFO] (monai.deploy.operators.monai_seg_inference_operator.MonaiSegInferenceOperator) - depth_direction_cosine: [0.0, 0.0, 1.0], type \n", + "[2025-10-30 12:10:19,993] [INFO] (monai.deploy.operators.monai_seg_inference_operator.MonaiSegInferenceOperator) - dicom_affine_transform: [[ 0.7890625 0. 0. -197.60547 ]\n", " [ 0. 0.7890625 0. -398.60547 ]\n", " [ 0. 0. 1.5 -383. ]\n", " [ 0. 0. 0. 1. ]], type \n", - "[2025-09-30 18:29:51,294] [INFO] (monai.deploy.operators.monai_seg_inference_operator.MonaiSegInferenceOperator) - nifti_affine_transform: [[ -0.7890625 -0. -0. 197.60547 ]\n", + "[2025-10-30 12:10:19,993] [INFO] (monai.deploy.operators.monai_seg_inference_operator.MonaiSegInferenceOperator) - nifti_affine_transform: [[ -0.7890625 -0. -0. 197.60547 ]\n", + " [ -0. -0.7890625 -0. 398.60547 ]\n", + " [ 0. 0. 1.5 -383. ]\n", + " [ 0. 0. 0. 1. ]], type \n", + "[2025-10-30 12:10:19,993] [INFO] (monai.deploy.operators.monai_seg_inference_operator.MonaiSegInferenceOperator) - StudyInstanceUID: 1.3.6.1.4.1.14519.5.2.1.7085.2626.822645453932810382886582736291, type \n", + "[2025-10-30 12:10:19,993] [INFO] (monai.deploy.operators.monai_seg_inference_operator.MonaiSegInferenceOperator) - StudyID: , type \n", + "[2025-10-30 12:10:19,993] [INFO] (monai.deploy.operators.monai_seg_inference_operator.MonaiSegInferenceOperator) - StudyDate: 20090831, type \n", + "[2025-10-30 12:10:19,993] [INFO] (monai.deploy.operators.monai_seg_inference_operator.MonaiSegInferenceOperator) - StudyTime: 095948.599, type \n", + "[2025-10-30 12:10:19,993] [INFO] (monai.deploy.operators.monai_seg_inference_operator.MonaiSegInferenceOperator) - StudyDescription: CT ABDOMEN W IV CONTRAST, type \n", + "[2025-10-30 12:10:19,993] [INFO] (monai.deploy.operators.monai_seg_inference_operator.MonaiSegInferenceOperator) - AccessionNumber: 5471978513296937, type \n", + "[2025-10-30 12:10:19,993] [INFO] (monai.deploy.operators.monai_seg_inference_operator.MonaiSegInferenceOperator) - selection_name: CT Series, type \n", + "[2025-10-30 12:10:19,993] [INFO] (monai.deploy.operators.monai_seg_inference_operator.MonaiSegInferenceOperator) - affine: [[ -0.7890625 -0. -0. 197.60547 ]\n", " [ -0. -0.7890625 -0. 398.60547 ]\n", " [ 0. 0. 1.5 -383. ]\n", " [ 0. 0. 0. 1. ]], type \n", - "[2025-09-30 18:29:51,294] [INFO] (monai.deploy.operators.monai_seg_inference_operator.MonaiSegInferenceOperator) - StudyInstanceUID: 1.3.6.1.4.1.14519.5.2.1.7085.2626.822645453932810382886582736291, type \n", - "[2025-09-30 18:29:51,294] [INFO] (monai.deploy.operators.monai_seg_inference_operator.MonaiSegInferenceOperator) - StudyID: , type \n", - "[2025-09-30 18:29:51,294] [INFO] (monai.deploy.operators.monai_seg_inference_operator.MonaiSegInferenceOperator) - StudyDate: 20090831, type \n", - "[2025-09-30 18:29:51,294] [INFO] (monai.deploy.operators.monai_seg_inference_operator.MonaiSegInferenceOperator) - StudyTime: 095948.599, type \n", - "[2025-09-30 18:29:51,294] [INFO] (monai.deploy.operators.monai_seg_inference_operator.MonaiSegInferenceOperator) - StudyDescription: CT ABDOMEN W IV CONTRAST, type \n", - "[2025-09-30 18:29:51,294] [INFO] (monai.deploy.operators.monai_seg_inference_operator.MonaiSegInferenceOperator) - AccessionNumber: 5471978513296937, type \n", - "[2025-09-30 18:29:51,294] [INFO] (monai.deploy.operators.monai_seg_inference_operator.MonaiSegInferenceOperator) - selection_name: CT Series, type \n", - "[2025-09-30 18:29:51,294] [INFO] (monai.deploy.operators.monai_seg_inference_operator.MonaiSegInferenceOperator) - space: LPS, type \n", - "2025-09-30 18:29:51,956 INFO image_writer.py:197 - writing: /home/mqin/src/md-app-sdk/notebooks/tutorials/output/saved_images_folder/1.3.6.1.4.1.14519.5.2.1.7085.2626/1.3.6.1.4.1.14519.5.2.1.7085.2626.nii\n", - "[2025-09-30 18:29:53,772] [INFO] (monai.deploy.operators.monai_seg_inference_operator.MonaiSegInferenceOperator) - Input of shape: torch.Size([1, 1, 270, 270, 106])\n", - "/home/mqin/src/md-app-sdk/.venv/lib/python3.10/site-packages/monai/inferers/utils.py:226: UserWarning: Using a non-tuple sequence for multidimensional indexing is deprecated and will be changed in pytorch 2.9; use x[tuple(seq)] instead of x[seq]. In pytorch 2.9 this will be interpreted as tensor index, x[torch.tensor(seq)], which will result either in an error or a different result (Triggered internally at /pytorch/torch/csrc/autograd/python_variable_indexing.cpp:306.)\n", + "[2025-10-30 12:10:19,993] [INFO] (monai.deploy.operators.monai_seg_inference_operator.MonaiSegInferenceOperator) - space: RAS, type \n", + "2025-10-30 12:10:20,794 INFO image_writer.py:197 - writing: /home/mqin/src/md-app-sdk/notebooks/tutorials/output/saved_images_folder/1.3.6.1.4.1.14519.5.2.1.7085.2626/1.3.6.1.4.1.14519.5.2.1.7085.2626.nii\n", + "[2025-10-30 12:10:22,562] [INFO] (monai.deploy.operators.monai_seg_inference_operator.MonaiSegInferenceOperator) - Input of shape: torch.Size([1, 1, 270, 270, 106])\n", + "/home/mqin/src/md-app-sdk/.venv/lib/python3.10/site-packages/monai/inferers/utils.py:226: UserWarning: Using a non-tuple sequence for multidimensional indexing is deprecated and will be changed in pytorch 2.9; use x[tuple(seq)] instead of x[seq]. In pytorch 2.9 this will be interpreted as tensor index, x[torch.tensor(seq)], which will result either in an error or a different result (Triggered internally at /pytorch/torch/csrc/autograd/python_variable_indexing.cpp:345.)\n", " win_data = torch.cat([inputs[win_slice] for win_slice in unravel_slice]).to(sw_device)\n", - "/home/mqin/src/md-app-sdk/.venv/lib/python3.10/site-packages/monai/inferers/utils.py:370: UserWarning: Using a non-tuple sequence for multidimensional indexing is deprecated and will be changed in pytorch 2.9; use x[tuple(seq)] instead of x[seq]. In pytorch 2.9 this will be interpreted as tensor index, x[torch.tensor(seq)], which will result either in an error or a different result (Triggered internally at /pytorch/torch/csrc/autograd/python_variable_indexing.cpp:306.)\n", + "/home/mqin/src/md-app-sdk/.venv/lib/python3.10/site-packages/monai/inferers/utils.py:370: UserWarning: Using a non-tuple sequence for multidimensional indexing is deprecated and will be changed in pytorch 2.9; use x[tuple(seq)] instead of x[seq]. In pytorch 2.9 this will be interpreted as tensor index, x[torch.tensor(seq)], which will result either in an error or a different result (Triggered internally at /pytorch/torch/csrc/autograd/python_variable_indexing.cpp:345.)\n", " out[idx_zm] += p\n", - "2025-09-30 18:29:54,989 INFO image_writer.py:197 - writing: /home/mqin/src/md-app-sdk/notebooks/tutorials/output/saved_images_folder/1.3.6.1.4.1.14519.5.2.1.7085.2626/1.3.6.1.4.1.14519.5.2.1.7085.2626_seg.nii\n", - "[2025-09-30 18:29:56,547] [INFO] (monai.deploy.operators.monai_seg_inference_operator.MonaiSegInferenceOperator) - Post transform length/batch size of output: 1\n", - "[2025-09-30 18:29:56,549] [INFO] (monai.deploy.operators.monai_seg_inference_operator.MonaiSegInferenceOperator) - Post transform pixel spacings for pred: tensor([0.7891, 0.7891, 1.5000], dtype=torch.float64)\n", - "[2025-09-30 18:29:56,689] [INFO] (monai.deploy.operators.monai_seg_inference_operator.MonaiSegInferenceOperator) - Post transform pred of shape: (1, 512, 512, 204)\n", - "[2025-09-30 18:29:56,732] [INFO] (monai.deploy.operators.monai_seg_inference_operator.MonaiSegInferenceOperator) - Output Seg image numpy array of type shape: (204, 512, 512)\n", - "[2025-09-30 18:29:56,737] [INFO] (monai.deploy.operators.monai_seg_inference_operator.MonaiSegInferenceOperator) - Output Seg image pixel max value: 1\n", - "/home/mqin/src/md-app-sdk/.venv/lib/python3.10/site-packages/highdicom/base.py:165: UserWarning: The string \"C3N-00198\" is unlikely to represent the intended person name since it contains only a single component. Construct a person name according to the format in described in https://dicom.nema.org/dicom/2013/output/chtml/part05/sect_6.2.html#sect_6.2.1.2, or, in pydicom 2.2.0 or later, use the pydicom.valuerep.PersonName.from_named_components() method to construct the person name correctly. If a single-component name is really intended, add a trailing caret character to disambiguate the name.\n", + "2025-10-30 12:10:23,584 INFO image_writer.py:197 - writing: /home/mqin/src/md-app-sdk/notebooks/tutorials/output/saved_images_folder/1.3.6.1.4.1.14519.5.2.1.7085.2626/1.3.6.1.4.1.14519.5.2.1.7085.2626_seg.nii\n", + "[2025-10-30 12:10:25,013] [INFO] (monai.deploy.operators.monai_seg_inference_operator.MonaiSegInferenceOperator) - Post transform length/batch size of output: 1\n", + "[2025-10-30 12:10:25,014] [INFO] (monai.deploy.operators.monai_seg_inference_operator.MonaiSegInferenceOperator) - Post transform pixel spacings for pred: tensor([0.7891, 0.7891, 1.5000], dtype=torch.float64)\n", + "[2025-10-30 12:10:25,139] [INFO] (monai.deploy.operators.monai_seg_inference_operator.MonaiSegInferenceOperator) - Post transform pred of shape: (1, 512, 512, 204)\n", + "[2025-10-30 12:10:25,175] [INFO] (monai.deploy.operators.monai_seg_inference_operator.MonaiSegInferenceOperator) - Output Seg image numpy array of type shape: (204, 512, 512)\n", + "[2025-10-30 12:10:25,180] [INFO] (monai.deploy.operators.monai_seg_inference_operator.MonaiSegInferenceOperator) - Output Seg image pixel max value: 1\n", + "/home/mqin/src/md-app-sdk/.venv/lib/python3.10/site-packages/highdicom/base.py:181: UserWarning: The string \"C3N-00198\" is unlikely to represent the intended person name since it contains only a single component. Construct a person name according to the format in described in https://dicom.nema.org/dicom/2013/output/chtml/part05/sect_6.2.html#sect_6.2.1.2, or, in pydicom 2.2.0 or later, use the pydicom.valuerep.PersonName.from_named_components() method to construct the person name correctly. If a single-component name is really intended, add a trailing caret character to disambiguate the name.\n", " check_person_name(patient_name)\n", - "[2025-09-30 18:29:58,268] [INFO] (highdicom.base) - copy Image-related attributes from dataset \"1.3.6.1.4.1.14519.5.2.1.7085.2626.936983343951485811186213470191\"\n", - "[2025-09-30 18:29:58,269] [INFO] (highdicom.base) - copy attributes of module \"Specimen\"\n", - "[2025-09-30 18:29:58,269] [INFO] (highdicom.base) - copy Patient-related attributes from dataset \"1.3.6.1.4.1.14519.5.2.1.7085.2626.936983343951485811186213470191\"\n", - "[2025-09-30 18:29:58,269] [INFO] (highdicom.base) - copy attributes of module \"Patient\"\n", - "[2025-09-30 18:29:58,269] [INFO] (highdicom.base) - copy attributes of module \"Clinical Trial Subject\"\n", - "[2025-09-30 18:29:58,269] [INFO] (highdicom.base) - copy Study-related attributes from dataset \"1.3.6.1.4.1.14519.5.2.1.7085.2626.936983343951485811186213470191\"\n", - "[2025-09-30 18:29:58,270] [INFO] (highdicom.base) - copy attributes of module \"General Study\"\n", - "[2025-09-30 18:29:58,270] [INFO] (highdicom.base) - copy attributes of module \"Patient Study\"\n", - "[2025-09-30 18:29:58,270] [INFO] (highdicom.base) - copy attributes of module \"Clinical Trial Study\"\n", + "[2025-10-30 12:10:26,311] [INFO] (highdicom.base) - copy Image-related attributes from dataset \"1.3.6.1.4.1.14519.5.2.1.7085.2626.936983343951485811186213470191\"\n", + "[2025-10-30 12:10:26,311] [INFO] (highdicom.base) - copy attributes of module \"Specimen\"\n", + "[2025-10-30 12:10:26,311] [INFO] (highdicom.base) - copy Patient-related attributes from dataset \"1.3.6.1.4.1.14519.5.2.1.7085.2626.936983343951485811186213470191\"\n", + "[2025-10-30 12:10:26,311] [INFO] (highdicom.base) - copy attributes of module \"Patient\"\n", + "[2025-10-30 12:10:26,312] [INFO] (highdicom.base) - copy attributes of module \"Clinical Trial Subject\"\n", + "[2025-10-30 12:10:26,312] [INFO] (highdicom.base) - copy Study-related attributes from dataset \"1.3.6.1.4.1.14519.5.2.1.7085.2626.936983343951485811186213470191\"\n", + "[2025-10-30 12:10:26,312] [INFO] (highdicom.base) - copy attributes of module \"General Study\"\n", + "[2025-10-30 12:10:26,312] [INFO] (highdicom.base) - copy attributes of module \"Patient Study\"\n", + "[2025-10-30 12:10:26,313] [INFO] (highdicom.base) - copy attributes of module \"Clinical Trial Study\"\n", "[\u001b[32minfo\u001b[m] [greedy_scheduler.cpp:372] Scheduler stopped: Some entities are waiting for execution, but there are no periodic or async entities to get out of the deadlock.\n", "[\u001b[32minfo\u001b[m] [greedy_scheduler.cpp:401] Scheduler finished.\n", "[\u001b[32minfo\u001b[m] [gxf_executor.cpp:2588] Deactivating Graph...\n", "[\u001b[32minfo\u001b[m] [gxf_executor.cpp:2597] Graph execution finished.\n", - "[2025-09-30 18:29:58,398] [INFO] (app.AISpleenSegApp) - End run\n", + "[2025-10-30 12:10:26,411] [INFO] (app.AISpleenSegApp) - End run\n", "[\u001b[32minfo\u001b[m] [gxf_executor.cpp:379] Destroying context\n" ] } @@ -1407,7 +1415,7 @@ "output_type": "stream", "text": [ "output:\n", - "1.2.826.0.1.3680043.10.511.3.3214240315526252042636124072020054.dcm\n", + "1.2.826.0.1.3680043.10.511.3.83688787597818986137093182993481694.dcm\n", "saved_images_folder\n", "\n", "output/saved_images_folder:\n", @@ -1514,24 +1522,1264 @@ "name": "stdout", "output_type": "stream", "text": [ - "usage: monai-deploy package [-h] [-l {DEBUG,INFO,WARN,ERROR,CRITICAL}]\n", - " [--add ADDITIONAL_LIBS] --config CONFIG\n", - " [--docs DOCS] [--models MODELS] --platform\n", - " PLATFORM [--timeout TIMEOUT] [--version VERSION]\n", - " [--add-host ADD_HOSTS] [--base-image BASE_IMAGE]\n", - " [--build-image BUILD_IMAGE]\n", - " [--build-cache BUILD_CACHE]\n", - " [--cmake-args CMAKE_ARGS]\n", - " [--holoscan-sdk-file HOLOSCAN_SDK_FILE]\n", - " [--includes [{debug,holoviz,torch,onnx} ...]]\n", - " [--input-data INPUT_DATA]\n", - " [--monai-deploy-sdk-file MONAI_DEPLOY_SDK_FILE]\n", - " [--no-cache] [--sdk SDK]\n", - " [--sdk-version SDK_VERSION] [--source SOURCE]\n", - " [--output OUTPUT] --tag TAG [--username USERNAME]\n", - " [--uid UID] [--gid GID]\n", - " application\n", - "monai-deploy package: error: argument -l/--log-level: invalid choice: 'MONAI-DEPLOY-SDK-FILE' (choose from 'DEBUG', 'INFO', 'WARN', 'ERROR', 'CRITICAL')\n" + "[2025-10-30 12:10:28,398] [INFO] (common) - Downloading CLI manifest file from https://raw.githubusercontent.com/nvidia-holoscan/holoscan-cli/refs/heads/main/releases/3.7.0/artifacts.json...\n", + "[2025-10-30 12:10:28,616] [DEBUG] (common) - Validating CLI manifest file...\n", + "[2025-10-30 12:10:28,617] [INFO] (packager.parameters) - Application: /home/mqin/src/md-app-sdk/notebooks/tutorials/my_app\n", + "[2025-10-30 12:10:28,618] [INFO] (packager.parameters) - Detected application type: Python Module\n", + "[2025-10-30 12:10:28,618] [INFO] (packager) - Scanning for models in /home/mqin/src/md-app-sdk/notebooks/tutorials/models...\n", + "[2025-10-30 12:10:28,619] [DEBUG] (packager) - Model model=/home/mqin/src/md-app-sdk/notebooks/tutorials/models/model added.\n", + "[2025-10-30 12:10:28,619] [INFO] (packager) - Reading application configuration from /home/mqin/src/md-app-sdk/notebooks/tutorials/my_app/app.yaml...\n", + "[2025-10-30 12:10:28,622] [INFO] (packager) - Generating app.json...\n", + "[2025-10-30 12:10:28,622] [INFO] (packager) - Generating pkg.json...\n", + "[2025-10-30 12:10:28,690] [DEBUG] (common) - \n", + "=============== Begin app.json ===============\n", + "{\n", + " \"apiVersion\": \"1.0.0\",\n", + " \"command\": \"[\\\"python3\\\", \\\"/opt/holoscan/app\\\"]\",\n", + " \"environment\": {\n", + " \"HOLOSCAN_APPLICATION\": \"/opt/holoscan/app\",\n", + " \"HOLOSCAN_INPUT_PATH\": \"input/\",\n", + " \"HOLOSCAN_OUTPUT_PATH\": \"output/\",\n", + " \"HOLOSCAN_WORKDIR\": \"/var/holoscan\",\n", + " \"HOLOSCAN_MODEL_PATH\": \"/opt/holoscan/models\",\n", + " \"HOLOSCAN_CONFIG_PATH\": \"/var/holoscan/app.yaml\",\n", + " \"HOLOSCAN_APP_MANIFEST_PATH\": \"/etc/holoscan/app.json\",\n", + " \"HOLOSCAN_PKG_MANIFEST_PATH\": \"/etc/holoscan/pkg.json\",\n", + " \"HOLOSCAN_DOCS_PATH\": \"/opt/holoscan/docs\",\n", + " \"HOLOSCAN_LOGS_PATH\": \"/var/holoscan/logs\"\n", + " },\n", + " \"input\": {\n", + " \"path\": \"input/\",\n", + " \"formats\": null\n", + " },\n", + " \"liveness\": null,\n", + " \"output\": {\n", + " \"path\": \"output/\",\n", + " \"formats\": null\n", + " },\n", + " \"readiness\": null,\n", + " \"sdk\": \"monai-deploy\",\n", + " \"sdkVersion\": \"1.0.0\",\n", + " \"timeout\": 0,\n", + " \"version\": 1.0,\n", + " \"workingDirectory\": \"/var/holoscan\"\n", + "}\n", + "================ End app.json ================\n", + " \n", + "[2025-10-30 12:10:28,691] [DEBUG] (common) - \n", + "=============== Begin pkg.json ===============\n", + "{\n", + " \"apiVersion\": \"1.0.0\",\n", + " \"applicationRoot\": \"/opt/holoscan/app\",\n", + " \"modelRoot\": \"/opt/holoscan/models\",\n", + " \"models\": {\n", + " \"model\": \"/opt/holoscan/models/model\"\n", + " },\n", + " \"resources\": {\n", + " \"cpu\": 1,\n", + " \"gpu\": 1,\n", + " \"memory\": \"1Gi\",\n", + " \"gpuMemory\": \"6Gi\"\n", + " },\n", + " \"version\": 1.0,\n", + " \"platformConfig\": \"dgpu\"\n", + "}\n", + "================ End pkg.json ================\n", + " \n", + "[2025-10-30 12:10:28,706] [DEBUG] (packager.builder) - ================ Begin requirements.txt ================\n", + "[2025-10-30 12:10:28,706] [DEBUG] (packager.builder) - highdicom>=0.18.2\n", + "[2025-10-30 12:10:28,706] [DEBUG] (packager.builder) - monai>=1.0\n", + "[2025-10-30 12:10:28,706] [DEBUG] (packager.builder) - nibabel>=3.2.1\n", + "[2025-10-30 12:10:28,706] [DEBUG] (packager.builder) - numpy>=1.21.6\n", + "[2025-10-30 12:10:28,706] [DEBUG] (packager.builder) - pydicom>=2.3.0\n", + "[2025-10-30 12:10:28,706] [DEBUG] (packager.builder) - setuptools>=59.5.0 # for pkg_resources\n", + "[2025-10-30 12:10:28,707] [DEBUG] (packager.builder) - SimpleITK>=2.0.0\n", + "[2025-10-30 12:10:28,707] [DEBUG] (packager.builder) - torch>=1.12.0\n", + "[2025-10-30 12:10:28,707] [DEBUG] (packager.builder) - \n", + "[2025-10-30 12:10:28,707] [DEBUG] (packager.builder) - ================ End requirements.txt ==================\n", + "[2025-10-30 12:10:28,707] [DEBUG] (packager.builder) - \n", + "========== Begin Build Parameters ==========\n", + "{'add_hosts': None,\n", + " 'additional_lib_paths': '',\n", + " 'app_config_file_path': PosixPath('/home/mqin/src/md-app-sdk/notebooks/tutorials/my_app/app.yaml'),\n", + " 'app_dir': PosixPath('/opt/holoscan/app'),\n", + " 'app_json': '/etc/holoscan/app.json',\n", + " 'application': PosixPath('/home/mqin/src/md-app-sdk/notebooks/tutorials/my_app'),\n", + " 'application_directory': PosixPath('/home/mqin/src/md-app-sdk/notebooks/tutorials/my_app'),\n", + " 'application_type': 'PythonModule',\n", + " 'build_cache': PosixPath('/home/mqin/.holoscan_build_cache'),\n", + " 'cmake_args': '',\n", + " 'command': '[\"python3\", \"/opt/holoscan/app\"]',\n", + " 'command_filename': 'my_app',\n", + " 'config_file_path': PosixPath('/var/holoscan/app.yaml'),\n", + " 'docs_dir': PosixPath('/opt/holoscan/docs'),\n", + " 'full_input_path': PosixPath('/var/holoscan/input'),\n", + " 'full_output_path': PosixPath('/var/holoscan/output'),\n", + " 'gid': 1000,\n", + " 'holoscan_sdk_version': '3.7.0',\n", + " 'includes': [],\n", + " 'input_data': None,\n", + " 'input_dir': 'input/',\n", + " 'lib_dir': PosixPath('/opt/holoscan/lib'),\n", + " 'logs_dir': PosixPath('/var/holoscan/logs'),\n", + " 'models': {'model': PosixPath('/home/mqin/src/md-app-sdk/notebooks/tutorials/models/model')},\n", + " 'models_dir': PosixPath('/opt/holoscan/models'),\n", + " 'monai_deploy_app_sdk_version': '1.0.0',\n", + " 'no_cache': False,\n", + " 'output_dir': 'output/',\n", + " 'pip_packages': None,\n", + " 'pkg_json': '/etc/holoscan/pkg.json',\n", + " 'requirements_file_path': PosixPath('/home/mqin/src/md-app-sdk/notebooks/tutorials/my_app/requirements.txt'),\n", + " 'sdk': ,\n", + " 'sdk_type': 'monai-deploy',\n", + " 'tarball_output': None,\n", + " 'timeout': 0,\n", + " 'title': 'MONAI Deploy App Package - MONAI Bundle AI App',\n", + " 'uid': 1000,\n", + " 'username': 'holoscan',\n", + " 'version': 1.0,\n", + " 'working_dir': PosixPath('/var/holoscan')}\n", + "=========== End Build Parameters ===========\n", + "\n", + "[2025-10-30 12:10:28,707] [DEBUG] (packager.builder) - \n", + "========== Begin Platform Parameters ==========\n", + "{'base_image': 'nvcr.io/nvidia/cuda:13.0.0-runtime-ubuntu24.04',\n", + " 'build_image': None,\n", + " 'cuda_deb_arch': 'x86_64',\n", + " 'cuda_version': 13,\n", + " 'custom_base_image': False,\n", + " 'custom_holoscan_sdk': False,\n", + " 'custom_monai_deploy_sdk': True,\n", + " 'gpu_type': 'dgpu',\n", + " 'holoscan_deb_arch': 'amd64',\n", + " 'holoscan_sdk_file': '3.7.0',\n", + " 'holoscan_sdk_filename': '3.7.0',\n", + " 'monai_deploy_sdk_file': PosixPath('/home/mqin/src/md-app-sdk/dist/monai_deploy_app_sdk-1.0.0+48.gfd5999e.dirty-py3-none-any.whl'),\n", + " 'monai_deploy_sdk_filename': 'monai_deploy_app_sdk-1.0.0+48.gfd5999e.dirty-py3-none-any.whl',\n", + " 'tag': 'my_app:1.0',\n", + " 'target_arch': 'x86_64'}\n", + "=========== End Platform Parameters ===========\n", + "\n", + "[2025-10-30 12:10:28,724] [DEBUG] (packager.builder) - \n", + "========== Begin Dockerfile ==========\n", + "# SPDX-FileCopyrightText: Copyright (c) 2023-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.\n", + "# SPDX-License-Identifier: Apache-2.0\n", + "#\n", + "# Licensed under the Apache License, Version 2.0 (the \"License\");\n", + "# you may not use this file except in compliance with the License.\n", + "# You may obtain a copy of the License at\n", + "#\n", + "# http://www.apache.org/licenses/LICENSE-2.0\n", + "#\n", + "# Unless required by applicable law or agreed to in writing, software\n", + "# distributed under the License is distributed on an \"AS IS\" BASIS,\n", + "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n", + "# See the License for the specific language governing permissions and\n", + "# limitations under the License.\n", + "\n", + "ARG GPU_TYPE=dgpu\n", + "ARG LIBTORCH_VERSION_ARM64=\"2.9.0.dev20250828+cu130\"\n", + "ARG LIBTORCH_VERSION_AMD64=\"2.9.0.dev20250829+cu130\"\n", + "ARG LIBTORCH_VISION_VERSION=\"0.24.0.dev20250829\"\n", + "\n", + "\n", + "\n", + "\n", + "FROM nvcr.io/nvidia/cuda:13.0.0-runtime-ubuntu24.04 AS base\n", + "\n", + "RUN apt-get update \\\n", + " && apt-get install -y --no-install-recommends --no-install-suggests \\\n", + " curl \\\n", + " jq \\\n", + " && rm -rf /var/lib/apt/lists/*\n", + "\n", + "\n", + "FROM base AS release\n", + "ENV DEBIAN_FRONTEND=noninteractive\n", + "ENV TERM=xterm-256color\n", + "\n", + "ARG GPU_TYPE\n", + "ARG UNAME\n", + "ARG UID\n", + "ARG GID\n", + "ARG LIBTORCH_VERSION_ARM64\n", + "ARG LIBTORCH_VERSION_AMD64\n", + "ARG LIBTORCH_VISION_VERSION\n", + "\n", + "RUN mkdir -p /etc/holoscan/ \\\n", + " && mkdir -p /opt/holoscan/ \\\n", + " && mkdir -p /var/holoscan \\\n", + " && mkdir -p /opt/holoscan/app \\\n", + " && mkdir -p /var/holoscan/input \\\n", + " && mkdir -p /var/holoscan/output\n", + "\n", + "LABEL base=\"nvcr.io/nvidia/cuda:13.0.0-runtime-ubuntu24.04\"\n", + "LABEL tag=\"my_app:1.0\"\n", + "LABEL org.opencontainers.image.title=\"MONAI Deploy App Package - MONAI Bundle AI App\"\n", + "LABEL org.opencontainers.image.version=\"1.0\"\n", + "LABEL org.nvidia.holoscan=\"3.7.0\"\n", + "\n", + "LABEL org.monai.deploy.app-sdk=\"1.0.0\"\n", + "\n", + "ENV HOLOSCAN_INPUT_PATH=/var/holoscan/input\n", + "ENV HOLOSCAN_OUTPUT_PATH=/var/holoscan/output\n", + "ENV HOLOSCAN_WORKDIR=/var/holoscan\n", + "ENV HOLOSCAN_APPLICATION=/opt/holoscan/app\n", + "ENV HOLOSCAN_TIMEOUT=0\n", + "ENV HOLOSCAN_MODEL_PATH=/opt/holoscan/models\n", + "ENV HOLOSCAN_DOCS_PATH=/opt/holoscan/docs\n", + "ENV HOLOSCAN_CONFIG_PATH=/var/holoscan/app.yaml\n", + "ENV HOLOSCAN_APP_MANIFEST_PATH=/etc/holoscan/app.json\n", + "ENV HOLOSCAN_PKG_MANIFEST_PATH=/etc/holoscan/pkg.json\n", + "ENV HOLOSCAN_LOGS_PATH=/var/holoscan/logs\n", + "ENV HOLOSCAN_VERSION=3.7.0\n", + "\n", + "# Update NV GPG repo key\n", + "# https://developer.nvidia.com/blog/updating-the-cuda-linux-gpg-repository-key/\n", + "RUN rm -f /etc/apt/sources.list.d/cuda*.list \\\n", + " && curl -OL https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2404/x86_64/cuda-keyring_1.1-1_all.deb \\\n", + " && dpkg -i cuda-keyring_1.1-1_all.deb \\\n", + " && rm -f cuda-keyring_1.1-1_all.deb \\\n", + " && apt-get update\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "# If torch is installed, we can skip installing Python\n", + "ENV PYTHON_VERSION=3.12.3-*\n", + "ENV PYTHON_PIP_VERSION=24.0+dfsg-*\n", + "\n", + "RUN apt update \\\n", + " && apt-get install -y --no-install-recommends --no-install-suggests \\\n", + " python3-minimal=${PYTHON_VERSION} \\\n", + " libpython3-stdlib=${PYTHON_VERSION} \\\n", + " python3=${PYTHON_VERSION} \\\n", + " python3-venv=${PYTHON_VERSION} \\\n", + " python3-pip=${PYTHON_PIP_VERSION} \\\n", + " && rm -rf /var/lib/apt/lists/*\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "RUN if id \"ubuntu\" >/dev/null 2>&1; then touch /var/mail/ubuntu && chown ubuntu /var/mail/ubuntu && userdel -r ubuntu; fi\n", + "RUN groupadd -f -g $GID $UNAME\n", + "RUN useradd -rm -d /home/$UNAME -s /bin/bash -g $GID -G sudo -u $UID $UNAME\n", + "RUN chown -R holoscan /var/holoscan && \\\n", + " chown -R holoscan /var/holoscan/input && \\\n", + " chown -R holoscan /var/holoscan/output\n", + "\n", + "# Set the working directory\n", + "WORKDIR /var/holoscan\n", + "\n", + "# Copy HAP/MAP tool script\n", + "COPY ./tools /var/holoscan/tools\n", + "RUN chmod +x /var/holoscan/tools\n", + "\n", + "# Remove EXTERNALLY-MANAGED directory\n", + "RUN rm -rf /usr/lib/python3.12/EXTERNALLY-MANAGED\n", + "\n", + "# Set the working directory\n", + "WORKDIR /var/holoscan\n", + "\n", + "USER $UNAME\n", + "\n", + "ENV PATH=/home/${UNAME}/.local/bin:/opt/nvidia/holoscan/bin:$PATH\n", + "ENV LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/home/${UNAME}/.local/lib/python3.12/site-packages/holoscan/lib\n", + "\n", + "COPY ./pip/requirements.txt /tmp/requirements.txt\n", + "\n", + "RUN pip install --upgrade pip\n", + "RUN pip install --no-cache-dir --user -r /tmp/requirements.txt\n", + "\n", + "\n", + "# Install MONAI Deploy App SDK\n", + "# Copy user-specified MONAI Deploy SDK file\n", + "COPY ./monai_deploy_app_sdk-1.0.0+48.gfd5999e.dirty-py3-none-any.whl /tmp/monai_deploy_app_sdk-1.0.0+48.gfd5999e.dirty-py3-none-any.whl\n", + "RUN pip install /tmp/monai_deploy_app_sdk-1.0.0+48.gfd5999e.dirty-py3-none-any.whl\n", + "\n", + "COPY ./models /opt/holoscan/models\n", + "\n", + "\n", + "COPY ./map/app.json /etc/holoscan/app.json\n", + "COPY ./app.config /var/holoscan/app.yaml\n", + "COPY ./map/pkg.json /etc/holoscan/pkg.json\n", + "\n", + "COPY ./app /opt/holoscan/app\n", + "\n", + "\n", + "\n", + "ENTRYPOINT [\"/var/holoscan/tools\"]\n", + "=========== End Dockerfile ===========\n", + "\n", + "[2025-10-30 12:10:29,052] [INFO] (common) - Using existing Docker BuildKit builder `holoscan_app_builder`\n", + "[2025-10-30 12:10:29,052] [DEBUG] (packager.builder) - Building Holoscan Application Package: tag=my_app-x64-workstation-dgpu-linux-amd64:1.0\n", + "[2025-10-30 12:10:29,053] [INFO] (packager.builder) - \n", + "===============================================================================\n", + "Building image for: x64-workstation\n", + " Architecture: linux/amd64\n", + " Base Image: nvcr.io/nvidia/cuda:13.0.0-runtime-ubuntu24.04\n", + " Build Image: N/A\n", + " CUDA Version: 13\n", + " Cache: Enabled\n", + " Configuration: dgpu\n", + " Holoscan SDK Package: 3.7.0\n", + " MONAI Deploy App SDK Package: /home/mqin/src/md-app-sdk/dist/monai_deploy_app_sdk-1.0.0+48.gfd5999e.dirty-py3-none-any.whl\n", + " gRPC Health Probe: N/A\n", + " SDK Version: 3.7.0\n", + " SDK: monai-deploy\n", + " Tag: my_app-x64-workstation-dgpu-linux-amd64:1.0\n", + " Included features/dependencies: N/A\n", + " \n", + "#0 building with \"holoscan_app_builder\" instance using docker-container driver\n", + "\n", + "#1 [internal] load build definition from Dockerfile\n", + "#1 transferring dockerfile: 4.70kB done\n", + "#1 DONE 0.1s\n", + "\n", + "#2 [auth] nvidia/cuda:pull token for nvcr.io\n", + "#2 DONE 0.0s\n", + "\n", + "#3 [internal] load metadata for nvcr.io/nvidia/cuda:13.0.0-runtime-ubuntu24.04\n", + "#3 DONE 0.8s\n", + "\n", + "#4 [internal] load .dockerignore\n", + "#4 transferring context: 1.80kB done\n", + "#4 DONE 0.1s\n", + "\n", + "#5 importing cache manifest from nvcr.io/nvidia/cuda:13.0.0-runtime-ubuntu24.04\n", + "#5 ...\n", + "\n", + "#6 [internal] load build context\n", + "#6 DONE 0.0s\n", + "\n", + "#7 importing cache manifest from local:10832056283413565963\n", + "#7 inferred cache manifest type: application/vnd.oci.image.index.v1+json done\n", + "#7 DONE 0.0s\n", + "\n", + "#8 [base 1/2] FROM nvcr.io/nvidia/cuda:13.0.0-runtime-ubuntu24.04@sha256:95318efecfd68ab3d109da5277863257b06137c84f34a87f38de970d5cd035d3\n", + "#8 resolve nvcr.io/nvidia/cuda:13.0.0-runtime-ubuntu24.04@sha256:95318efecfd68ab3d109da5277863257b06137c84f34a87f38de970d5cd035d3 0.0s done\n", + "#8 DONE 0.1s\n", + "\n", + "#5 importing cache manifest from nvcr.io/nvidia/cuda:13.0.0-runtime-ubuntu24.04\n", + "#5 inferred cache manifest type: application/vnd.docker.distribution.manifest.list.v2+json done\n", + "#5 DONE 0.3s\n", + "\n", + "#6 [internal] load build context\n", + "#6 transferring context: 19.58MB 0.1s done\n", + "#6 DONE 0.3s\n", + "\n", + "#8 [base 1/2] FROM nvcr.io/nvidia/cuda:13.0.0-runtime-ubuntu24.04@sha256:95318efecfd68ab3d109da5277863257b06137c84f34a87f38de970d5cd035d3\n", + "#8 DONE 0.5s\n", + "\n", + "#8 [base 1/2] FROM nvcr.io/nvidia/cuda:13.0.0-runtime-ubuntu24.04@sha256:95318efecfd68ab3d109da5277863257b06137c84f34a87f38de970d5cd035d3\n", + "#8 sha256:932162d4fcf6e1094ee1544e8fde0ae2a02b2c4e9545f64f373ce3a4479189e6 1.52kB / 1.52kB 0.2s\n", + "#8 sha256:932162d4fcf6e1094ee1544e8fde0ae2a02b2c4e9545f64f373ce3a4479189e6 1.52kB / 1.52kB 0.2s done\n", + "#8 sha256:492db7b3e492442f7a1ad30fea534f61ad89da451c675ccab2488e41034d0886 1.68kB / 1.68kB 0.2s done\n", + "#8 sha256:84fef9f1ca4f21e9c7411db3c57fe91a1f401d7051d87a3bfed97ff70a2cf72c 59.61kB / 59.61kB 0.2s done\n", + "#8 sha256:1ba07b1309cf3cbf6f4649e357d9a21e94039b6100973ef20599eb4a11a8b338 1.05MB / 1.51GB 0.2s\n", + "#8 sha256:13e8f87efde86df96bfe73da211eb196d0416702b69d92947ec617138e6db64b 6.88kB / 6.88kB 0.1s done\n", + "#8 sha256:ddc61996788ff6833bbe82138d6fc5000e848953b90df5055cbae21479218914 186B / 186B 0.1s done\n", + "#8 sha256:0acb0bb33f9956b78fbfc026a81d9f3fbcf52f6c3c51ed7ff503b2f5db52d651 4.19MB / 105.07MB 0.2s\n", + "#8 sha256:9c9b39ad83d512d5af47e9c22f4458cb586f05ea478656a372c5e739cb7280e5 1.05MB / 4.55MB 0.2s\n", + "#8 sha256:32f112e3802cadcab3543160f4d2aa607b3cc1c62140d57b4f5441384f40e927 1.05MB / 29.72MB 0.2s\n", + "#8 sha256:0acb0bb33f9956b78fbfc026a81d9f3fbcf52f6c3c51ed7ff503b2f5db52d651 13.63MB / 105.07MB 0.5s\n", + "#8 sha256:9c9b39ad83d512d5af47e9c22f4458cb586f05ea478656a372c5e739cb7280e5 2.10MB / 4.55MB 0.3s\n", + "#8 sha256:32f112e3802cadcab3543160f4d2aa607b3cc1c62140d57b4f5441384f40e927 5.24MB / 29.72MB 0.3s\n", + "#8 sha256:9c9b39ad83d512d5af47e9c22f4458cb586f05ea478656a372c5e739cb7280e5 4.55MB / 4.55MB 0.4s done\n", + "#8 sha256:32f112e3802cadcab3543160f4d2aa607b3cc1c62140d57b4f5441384f40e927 9.44MB / 29.72MB 0.5s\n", + "#8 sha256:0acb0bb33f9956b78fbfc026a81d9f3fbcf52f6c3c51ed7ff503b2f5db52d651 23.13MB / 105.07MB 0.8s\n", + "#8 sha256:32f112e3802cadcab3543160f4d2aa607b3cc1c62140d57b4f5441384f40e927 14.68MB / 29.72MB 0.6s\n", + "#8 sha256:0acb0bb33f9956b78fbfc026a81d9f3fbcf52f6c3c51ed7ff503b2f5db52d651 29.36MB / 105.07MB 0.9s\n", + "#8 sha256:32f112e3802cadcab3543160f4d2aa607b3cc1c62140d57b4f5441384f40e927 22.02MB / 29.72MB 0.8s\n", + "#8 sha256:0acb0bb33f9956b78fbfc026a81d9f3fbcf52f6c3c51ed7ff503b2f5db52d651 35.65MB / 105.07MB 1.1s\n", + "#8 sha256:32f112e3802cadcab3543160f4d2aa607b3cc1c62140d57b4f5441384f40e927 27.31MB / 29.72MB 0.9s\n", + "#8 sha256:0acb0bb33f9956b78fbfc026a81d9f3fbcf52f6c3c51ed7ff503b2f5db52d651 44.04MB / 105.07MB 1.2s\n", + "#8 sha256:32f112e3802cadcab3543160f4d2aa607b3cc1c62140d57b4f5441384f40e927 29.72MB / 29.72MB 1.0s done\n", + "#8 extracting sha256:32f112e3802cadcab3543160f4d2aa607b3cc1c62140d57b4f5441384f40e927\n", + "#8 sha256:0acb0bb33f9956b78fbfc026a81d9f3fbcf52f6c3c51ed7ff503b2f5db52d651 51.38MB / 105.07MB 1.4s\n", + "#8 sha256:0acb0bb33f9956b78fbfc026a81d9f3fbcf52f6c3c51ed7ff503b2f5db52d651 58.72MB / 105.07MB 1.5s\n", + "#8 sha256:1ba07b1309cf3cbf6f4649e357d9a21e94039b6100973ef20599eb4a11a8b338 81.79MB / 1.51GB 1.7s\n", + "#8 sha256:0acb0bb33f9956b78fbfc026a81d9f3fbcf52f6c3c51ed7ff503b2f5db52d651 66.06MB / 105.07MB 1.7s\n", + "#8 sha256:0acb0bb33f9956b78fbfc026a81d9f3fbcf52f6c3c51ed7ff503b2f5db52d651 73.40MB / 105.07MB 1.8s\n", + "#8 sha256:0acb0bb33f9956b78fbfc026a81d9f3fbcf52f6c3c51ed7ff503b2f5db52d651 80.74MB / 105.07MB 2.0s\n", + "#8 sha256:0acb0bb33f9956b78fbfc026a81d9f3fbcf52f6c3c51ed7ff503b2f5db52d651 88.08MB / 105.07MB 2.1s\n", + "#8 sha256:0acb0bb33f9956b78fbfc026a81d9f3fbcf52f6c3c51ed7ff503b2f5db52d651 95.42MB / 105.07MB 2.3s\n", + "#8 sha256:0acb0bb33f9956b78fbfc026a81d9f3fbcf52f6c3c51ed7ff503b2f5db52d651 103.19MB / 105.07MB 2.4s\n", + "#8 sha256:1ba07b1309cf3cbf6f4649e357d9a21e94039b6100973ef20599eb4a11a8b338 165.68MB / 1.51GB 2.9s\n", + "#8 sha256:0acb0bb33f9956b78fbfc026a81d9f3fbcf52f6c3c51ed7ff503b2f5db52d651 105.07MB / 105.07MB 3.2s done\n", + "#8 sha256:1ba07b1309cf3cbf6f4649e357d9a21e94039b6100973ef20599eb4a11a8b338 251.66MB / 1.51GB 3.6s\n", + "#8 extracting sha256:32f112e3802cadcab3543160f4d2aa607b3cc1c62140d57b4f5441384f40e927 2.4s done\n", + "#8 extracting sha256:9c9b39ad83d512d5af47e9c22f4458cb586f05ea478656a372c5e739cb7280e5\n", + "#8 extracting sha256:9c9b39ad83d512d5af47e9c22f4458cb586f05ea478656a372c5e739cb7280e5 0.4s done\n", + "#8 extracting sha256:0acb0bb33f9956b78fbfc026a81d9f3fbcf52f6c3c51ed7ff503b2f5db52d651\n", + "#8 sha256:1ba07b1309cf3cbf6f4649e357d9a21e94039b6100973ef20599eb4a11a8b338 338.69MB / 1.51GB 4.4s\n", + "#8 sha256:1ba07b1309cf3cbf6f4649e357d9a21e94039b6100973ef20599eb4a11a8b338 424.67MB / 1.51GB 5.1s\n", + "#8 sha256:1ba07b1309cf3cbf6f4649e357d9a21e94039b6100973ef20599eb4a11a8b338 511.71MB / 1.51GB 5.9s\n", + "#8 sha256:1ba07b1309cf3cbf6f4649e357d9a21e94039b6100973ef20599eb4a11a8b338 598.33MB / 1.51GB 6.6s\n", + "#8 sha256:1ba07b1309cf3cbf6f4649e357d9a21e94039b6100973ef20599eb4a11a8b338 684.72MB / 1.51GB 7.4s\n", + "#8 extracting sha256:0acb0bb33f9956b78fbfc026a81d9f3fbcf52f6c3c51ed7ff503b2f5db52d651 3.8s done\n", + "#8 extracting sha256:ddc61996788ff6833bbe82138d6fc5000e848953b90df5055cbae21479218914 0.0s done\n", + "#8 extracting sha256:13e8f87efde86df96bfe73da211eb196d0416702b69d92947ec617138e6db64b 0.0s done\n", + "#8 sha256:1ba07b1309cf3cbf6f4649e357d9a21e94039b6100973ef20599eb4a11a8b338 771.75MB / 1.51GB 8.1s\n", + "#8 sha256:1ba07b1309cf3cbf6f4649e357d9a21e94039b6100973ef20599eb4a11a8b338 858.78MB / 1.51GB 8.9s\n", + "#8 sha256:1ba07b1309cf3cbf6f4649e357d9a21e94039b6100973ef20599eb4a11a8b338 947.91MB / 1.51GB 9.6s\n", + "#8 sha256:1ba07b1309cf3cbf6f4649e357d9a21e94039b6100973ef20599eb4a11a8b338 1.02GB / 1.51GB 10.4s\n", + "#8 sha256:1ba07b1309cf3cbf6f4649e357d9a21e94039b6100973ef20599eb4a11a8b338 1.11GB / 1.51GB 11.3s\n", + "#8 sha256:1ba07b1309cf3cbf6f4649e357d9a21e94039b6100973ef20599eb4a11a8b338 1.19GB / 1.51GB 12.0s\n", + "#8 sha256:1ba07b1309cf3cbf6f4649e357d9a21e94039b6100973ef20599eb4a11a8b338 1.27GB / 1.51GB 12.8s\n", + "#8 sha256:1ba07b1309cf3cbf6f4649e357d9a21e94039b6100973ef20599eb4a11a8b338 1.36GB / 1.51GB 13.7s\n", + "#8 sha256:1ba07b1309cf3cbf6f4649e357d9a21e94039b6100973ef20599eb4a11a8b338 1.44GB / 1.51GB 14.6s\n", + "#8 sha256:1ba07b1309cf3cbf6f4649e357d9a21e94039b6100973ef20599eb4a11a8b338 1.51GB / 1.51GB 19.7s\n", + "#8 sha256:1ba07b1309cf3cbf6f4649e357d9a21e94039b6100973ef20599eb4a11a8b338 1.51GB / 1.51GB 21.6s done\n", + "#8 extracting sha256:1ba07b1309cf3cbf6f4649e357d9a21e94039b6100973ef20599eb4a11a8b338\n", + "#8 extracting sha256:1ba07b1309cf3cbf6f4649e357d9a21e94039b6100973ef20599eb4a11a8b338 24.3s done\n", + "#8 DONE 46.6s\n", + "\n", + "#8 [base 1/2] FROM nvcr.io/nvidia/cuda:13.0.0-runtime-ubuntu24.04@sha256:95318efecfd68ab3d109da5277863257b06137c84f34a87f38de970d5cd035d3\n", + "#8 extracting sha256:84fef9f1ca4f21e9c7411db3c57fe91a1f401d7051d87a3bfed97ff70a2cf72c 0.0s done\n", + "#8 extracting sha256:492db7b3e492442f7a1ad30fea534f61ad89da451c675ccab2488e41034d0886 0.0s done\n", + "#8 extracting sha256:932162d4fcf6e1094ee1544e8fde0ae2a02b2c4e9545f64f373ce3a4479189e6 0.0s done\n", + "#8 DONE 46.7s\n", + "\n", + "#9 [base 2/2] RUN apt-get update && apt-get install -y --no-install-recommends --no-install-suggests curl jq && rm -rf /var/lib/apt/lists/*\n", + "#9 0.381 Get:1 https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2404/x86_64 InRelease [1581 B]\n", + "#9 0.567 Get:2 http://archive.ubuntu.com/ubuntu noble InRelease [256 kB]\n", + "#9 0.574 Get:3 http://security.ubuntu.com/ubuntu noble-security InRelease [126 kB]\n", + "#9 0.906 Get:4 https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2404/x86_64 Packages [966 kB]\n", + "#9 1.302 Get:5 http://archive.ubuntu.com/ubuntu noble-updates InRelease [126 kB]\n", + "#9 1.336 Get:6 http://security.ubuntu.com/ubuntu noble-security/universe amd64 Packages [1170 kB]\n", + "#9 1.484 Get:7 http://archive.ubuntu.com/ubuntu noble-backports InRelease [126 kB]\n", + "#9 1.674 Get:8 http://archive.ubuntu.com/ubuntu noble/multiverse amd64 Packages [331 kB]\n", + "#9 1.802 Get:9 http://archive.ubuntu.com/ubuntu noble/restricted amd64 Packages [117 kB]\n", + "#9 1.850 Get:10 http://archive.ubuntu.com/ubuntu noble/universe amd64 Packages [19.3 MB]\n", + "#9 1.985 Get:11 http://security.ubuntu.com/ubuntu noble-security/main amd64 Packages [1599 kB]\n", + "#9 2.148 Get:12 http://security.ubuntu.com/ubuntu noble-security/multiverse amd64 Packages [33.1 kB]\n", + "#9 2.150 Get:13 http://security.ubuntu.com/ubuntu noble-security/restricted amd64 Packages [2638 kB]\n", + "#9 3.407 Get:14 http://archive.ubuntu.com/ubuntu noble/main amd64 Packages [1808 kB]\n", + "#9 3.458 Get:15 http://archive.ubuntu.com/ubuntu noble-updates/restricted amd64 Packages [2813 kB]\n", + "#9 3.595 Get:16 http://archive.ubuntu.com/ubuntu noble-updates/main amd64 Packages [1988 kB]\n", + "#9 3.680 Get:17 http://archive.ubuntu.com/ubuntu noble-updates/universe amd64 Packages [1939 kB]\n", + "#9 3.760 Get:18 http://archive.ubuntu.com/ubuntu noble-updates/multiverse amd64 Packages [35.9 kB]\n", + "#9 3.762 Get:19 http://archive.ubuntu.com/ubuntu noble-backports/main amd64 Packages [49.4 kB]\n", + "#9 3.763 Get:20 http://archive.ubuntu.com/ubuntu noble-backports/universe amd64 Packages [33.9 kB]\n", + "#9 4.526 Fetched 35.5 MB in 4s (8373 kB/s)\n", + "#9 4.526 Reading package lists...\n", + "#9 5.474 W: https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2404/x86_64/InRelease: Key is stored in legacy trusted.gpg keyring (/etc/apt/trusted.gpg), see the DEPRECATION section in apt-key(8) for details.\n", + "#9 5.492 Reading package lists...\n", + "#9 6.479 Building dependency tree...\n", + "#9 6.710 Reading state information...\n", + "#9 7.035 The following additional packages will be installed:\n", + "#9 7.035 libbrotli1 libcurl4t64 libgssapi-krb5-2 libjq1 libk5crypto3 libkeyutils1\n", + "#9 7.036 libkrb5-3 libkrb5support0 libnghttp2-14 libonig5 libpsl5t64 librtmp1\n", + "#9 7.037 libssh-4\n", + "#9 7.039 Suggested packages:\n", + "#9 7.039 krb5-doc krb5-user\n", + "#9 7.039 Recommended packages:\n", + "#9 7.039 krb5-locales publicsuffix\n", + "#9 7.095 The following NEW packages will be installed:\n", + "#9 7.096 curl jq libbrotli1 libcurl4t64 libgssapi-krb5-2 libjq1 libk5crypto3\n", + "#9 7.097 libkeyutils1 libkrb5-3 libkrb5support0 libnghttp2-14 libonig5 libpsl5t64\n", + "#9 7.098 librtmp1 libssh-4\n", + "#9 7.459 0 upgraded, 15 newly installed, 0 to remove and 34 not upgraded.\n", + "#9 7.459 Need to get 2270 kB of archives.\n", + "#9 7.459 After this operation, 6343 kB of additional disk space will be used.\n", + "#9 7.459 Get:1 http://archive.ubuntu.com/ubuntu noble-updates/main amd64 libkrb5support0 amd64 1.20.1-6ubuntu2.6 [34.4 kB]\n", + "#9 7.461 Get:2 http://security.ubuntu.com/ubuntu noble-security/main amd64 libssh-4 amd64 0.10.6-2ubuntu0.2 [188 kB]\n", + "#9 7.816 Get:3 http://archive.ubuntu.com/ubuntu noble-updates/main amd64 libk5crypto3 amd64 1.20.1-6ubuntu2.6 [82.0 kB]\n", + "#9 8.063 Get:4 http://archive.ubuntu.com/ubuntu noble/main amd64 libkeyutils1 amd64 1.6.3-3build1 [9490 B]\n", + "#9 8.089 Get:5 http://archive.ubuntu.com/ubuntu noble-updates/main amd64 libkrb5-3 amd64 1.20.1-6ubuntu2.6 [348 kB]\n", + "#9 8.380 Get:6 http://archive.ubuntu.com/ubuntu noble-updates/main amd64 libgssapi-krb5-2 amd64 1.20.1-6ubuntu2.6 [143 kB]\n", + "#9 8.429 Get:7 http://archive.ubuntu.com/ubuntu noble-updates/main amd64 libnghttp2-14 amd64 1.59.0-1ubuntu0.2 [74.3 kB]\n", + "#9 8.613 Get:8 http://archive.ubuntu.com/ubuntu noble/main amd64 libpsl5t64 amd64 0.21.2-1.1build1 [57.1 kB]\n", + "#9 8.615 Get:9 http://archive.ubuntu.com/ubuntu noble/main amd64 libbrotli1 amd64 1.1.0-2build2 [331 kB]\n", + "#9 8.654 Get:10 http://archive.ubuntu.com/ubuntu noble/main amd64 librtmp1 amd64 2.4+20151223.gitfa8646d.1-2build7 [56.3 kB]\n", + "#9 8.656 Get:11 http://archive.ubuntu.com/ubuntu noble-updates/main amd64 libcurl4t64 amd64 8.5.0-2ubuntu10.6 [341 kB]\n", + "#9 8.709 Get:12 http://archive.ubuntu.com/ubuntu noble-updates/main amd64 curl amd64 8.5.0-2ubuntu10.6 [226 kB]\n", + "#9 9.023 Get:13 http://archive.ubuntu.com/ubuntu noble/main amd64 libonig5 amd64 6.9.9-1build1 [172 kB]\n", + "#9 9.720 Get:14 http://archive.ubuntu.com/ubuntu noble-updates/main amd64 libjq1 amd64 1.7.1-3ubuntu0.24.04.1 [141 kB]\n", + "#9 9.813 Get:15 http://archive.ubuntu.com/ubuntu noble-updates/main amd64 jq amd64 1.7.1-3ubuntu0.24.04.1 [65.7 kB]\n", + "#9 10.98 debconf: delaying package configuration, since apt-utils is not installed\n", + "#9 11.05 Fetched 2270 kB in 3s (825 kB/s)\n", + "#9 11.12 Selecting previously unselected package libkrb5support0:amd64.\n", + "(Reading database ... 5321 files and directories currently installed.)\n", + "#9 11.15 Preparing to unpack .../00-libkrb5support0_1.20.1-6ubuntu2.6_amd64.deb ...\n", + "#9 11.17 Unpacking libkrb5support0:amd64 (1.20.1-6ubuntu2.6) ...\n", + "#9 11.24 Selecting previously unselected package libk5crypto3:amd64.\n", + "#9 11.24 Preparing to unpack .../01-libk5crypto3_1.20.1-6ubuntu2.6_amd64.deb ...\n", + "#9 11.25 Unpacking libk5crypto3:amd64 (1.20.1-6ubuntu2.6) ...\n", + "#9 11.30 Selecting previously unselected package libkeyutils1:amd64.\n", + "#9 11.30 Preparing to unpack .../02-libkeyutils1_1.6.3-3build1_amd64.deb ...\n", + "#9 11.30 Unpacking libkeyutils1:amd64 (1.6.3-3build1) ...\n", + "#9 11.36 Selecting previously unselected package libkrb5-3:amd64.\n", + "#9 11.37 Preparing to unpack .../03-libkrb5-3_1.20.1-6ubuntu2.6_amd64.deb ...\n", + "#9 11.37 Unpacking libkrb5-3:amd64 (1.20.1-6ubuntu2.6) ...\n", + "#9 11.50 Selecting previously unselected package libgssapi-krb5-2:amd64.\n", + "#9 11.51 Preparing to unpack .../04-libgssapi-krb5-2_1.20.1-6ubuntu2.6_amd64.deb ...\n", + "#9 11.52 Unpacking libgssapi-krb5-2:amd64 (1.20.1-6ubuntu2.6) ...\n", + "#9 11.61 Selecting previously unselected package libnghttp2-14:amd64.\n", + "#9 11.61 Preparing to unpack .../05-libnghttp2-14_1.59.0-1ubuntu0.2_amd64.deb ...\n", + "#9 11.63 Unpacking libnghttp2-14:amd64 (1.59.0-1ubuntu0.2) ...\n", + "#9 11.73 Selecting previously unselected package libpsl5t64:amd64.\n", + "#9 11.73 Preparing to unpack .../06-libpsl5t64_0.21.2-1.1build1_amd64.deb ...\n", + "#9 11.75 Unpacking libpsl5t64:amd64 (0.21.2-1.1build1) ...\n", + "#9 11.85 Selecting previously unselected package libbrotli1:amd64.\n", + "#9 11.85 Preparing to unpack .../07-libbrotli1_1.1.0-2build2_amd64.deb ...\n", + "#9 11.86 Unpacking libbrotli1:amd64 (1.1.0-2build2) ...\n", + "#9 11.98 Selecting previously unselected package librtmp1:amd64.\n", + "#9 11.99 Preparing to unpack .../08-librtmp1_2.4+20151223.gitfa8646d.1-2build7_amd64.deb ...\n", + "#9 12.00 Unpacking librtmp1:amd64 (2.4+20151223.gitfa8646d.1-2build7) ...\n", + "#9 12.10 Selecting previously unselected package libssh-4:amd64.\n", + "#9 12.11 Preparing to unpack .../09-libssh-4_0.10.6-2ubuntu0.2_amd64.deb ...\n", + "#9 12.12 Unpacking libssh-4:amd64 (0.10.6-2ubuntu0.2) ...\n", + "#9 12.23 Selecting previously unselected package libcurl4t64:amd64.\n", + "#9 12.23 Preparing to unpack .../10-libcurl4t64_8.5.0-2ubuntu10.6_amd64.deb ...\n", + "#9 12.24 Unpacking libcurl4t64:amd64 (8.5.0-2ubuntu10.6) ...\n", + "#9 12.33 Selecting previously unselected package curl.\n", + "#9 12.33 Preparing to unpack .../11-curl_8.5.0-2ubuntu10.6_amd64.deb ...\n", + "#9 12.34 Unpacking curl (8.5.0-2ubuntu10.6) ...\n", + "#9 12.46 Selecting previously unselected package libonig5:amd64.\n", + "#9 12.47 Preparing to unpack .../12-libonig5_6.9.9-1build1_amd64.deb ...\n", + "#9 12.48 Unpacking libonig5:amd64 (6.9.9-1build1) ...\n", + "#9 12.59 Selecting previously unselected package libjq1:amd64.\n", + "#9 12.59 Preparing to unpack .../13-libjq1_1.7.1-3ubuntu0.24.04.1_amd64.deb ...\n", + "#9 12.60 Unpacking libjq1:amd64 (1.7.1-3ubuntu0.24.04.1) ...\n", + "#9 12.69 Selecting previously unselected package jq.\n", + "#9 12.69 Preparing to unpack .../14-jq_1.7.1-3ubuntu0.24.04.1_amd64.deb ...\n", + "#9 12.70 Unpacking jq (1.7.1-3ubuntu0.24.04.1) ...\n", + "#9 12.80 Setting up libkeyutils1:amd64 (1.6.3-3build1) ...\n", + "#9 12.84 Setting up libbrotli1:amd64 (1.1.0-2build2) ...\n", + "#9 12.88 Setting up libpsl5t64:amd64 (0.21.2-1.1build1) ...\n", + "#9 12.91 Setting up libnghttp2-14:amd64 (1.59.0-1ubuntu0.2) ...\n", + "#9 12.95 Setting up libkrb5support0:amd64 (1.20.1-6ubuntu2.6) ...\n", + "#9 12.99 Setting up librtmp1:amd64 (2.4+20151223.gitfa8646d.1-2build7) ...\n", + "#9 13.02 Setting up libk5crypto3:amd64 (1.20.1-6ubuntu2.6) ...\n", + "#9 13.06 Setting up libkrb5-3:amd64 (1.20.1-6ubuntu2.6) ...\n", + "#9 13.10 Setting up libonig5:amd64 (6.9.9-1build1) ...\n", + "#9 13.13 Setting up libjq1:amd64 (1.7.1-3ubuntu0.24.04.1) ...\n", + "#9 13.17 Setting up libgssapi-krb5-2:amd64 (1.20.1-6ubuntu2.6) ...\n", + "#9 13.21 Setting up libssh-4:amd64 (0.10.6-2ubuntu0.2) ...\n", + "#9 13.24 Setting up jq (1.7.1-3ubuntu0.24.04.1) ...\n", + "#9 13.28 Setting up libcurl4t64:amd64 (8.5.0-2ubuntu10.6) ...\n", + "#9 13.32 Setting up curl (8.5.0-2ubuntu10.6) ...\n", + "#9 13.35 Processing triggers for libc-bin (2.39-0ubuntu8.5) ...\n", + "#9 DONE 15.2s\n", + "\n", + "#10 [release 1/22] RUN mkdir -p /etc/holoscan/ && mkdir -p /opt/holoscan/ && mkdir -p /var/holoscan && mkdir -p /opt/holoscan/app && mkdir -p /var/holoscan/input && mkdir -p /var/holoscan/output\n", + "#10 DONE 0.2s\n", + "\n", + "#11 [release 2/22] RUN rm -f /etc/apt/sources.list.d/cuda*.list && curl -OL https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2404/x86_64/cuda-keyring_1.1-1_all.deb && dpkg -i cuda-keyring_1.1-1_all.deb && rm -f cuda-keyring_1.1-1_all.deb && apt-get update\n", + "#11 0.208 % Total % Received % Xferd Average Speed Time Time Time Current\n", + "#11 0.208 Dload Upload Total Spent Left Speed\n", + "100 4328 100 4328 0 0 67934 0 --:--:-- --:--:-- --:--:-- 68698\n", + "#11 0.340 Selecting previously unselected package cuda-keyring.\n", + "#11 0.352 (Reading database ... 5425 files and directories currently installed.)\n", + "#11 0.353 Preparing to unpack cuda-keyring_1.1-1_all.deb ...\n", + "#11 0.365 Unpacking cuda-keyring (1.1-1) ...\n", + "#11 0.421 Setting up cuda-keyring (1.1-1) ...\n", + "#11 0.689 Get:1 https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2404/x86_64 InRelease [1581 B]\n", + "#11 0.761 Get:2 http://archive.ubuntu.com/ubuntu noble InRelease [256 kB]\n", + "#11 0.838 Get:3 https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2404/x86_64 Packages [966 kB]\n", + "#11 0.918 Get:4 http://security.ubuntu.com/ubuntu noble-security InRelease [126 kB]\n", + "#11 1.166 Get:5 http://archive.ubuntu.com/ubuntu noble-updates InRelease [126 kB]\n", + "#11 1.267 Get:6 http://archive.ubuntu.com/ubuntu noble-backports InRelease [126 kB]\n", + "#11 1.376 Get:7 http://archive.ubuntu.com/ubuntu noble/main amd64 Packages [1808 kB]\n", + "#11 1.626 Get:8 http://archive.ubuntu.com/ubuntu noble/restricted amd64 Packages [117 kB]\n", + "#11 1.630 Get:9 http://archive.ubuntu.com/ubuntu noble/universe amd64 Packages [19.3 MB]\n", + "#11 1.846 Get:10 http://security.ubuntu.com/ubuntu noble-security/multiverse amd64 Packages [33.1 kB]\n", + "#11 2.010 Get:11 http://security.ubuntu.com/ubuntu noble-security/universe amd64 Packages [1170 kB]\n", + "#11 2.148 Get:12 http://archive.ubuntu.com/ubuntu noble/multiverse amd64 Packages [331 kB]\n", + "#11 2.155 Get:13 http://archive.ubuntu.com/ubuntu noble-updates/restricted amd64 Packages [2813 kB]\n", + "#11 2.219 Get:14 http://archive.ubuntu.com/ubuntu noble-updates/universe amd64 Packages [1939 kB]\n", + "#11 2.271 Get:15 http://archive.ubuntu.com/ubuntu noble-updates/main amd64 Packages [1988 kB]\n", + "#11 2.318 Get:16 http://archive.ubuntu.com/ubuntu noble-updates/multiverse amd64 Packages [35.9 kB]\n", + "#11 2.319 Get:17 http://archive.ubuntu.com/ubuntu noble-backports/main amd64 Packages [49.4 kB]\n", + "#11 2.319 Get:18 http://archive.ubuntu.com/ubuntu noble-backports/universe amd64 Packages [33.9 kB]\n", + "#11 2.515 Get:19 http://security.ubuntu.com/ubuntu noble-security/main amd64 Packages [1599 kB]\n", + "#11 2.681 Get:20 http://security.ubuntu.com/ubuntu noble-security/restricted amd64 Packages [2638 kB]\n", + "#11 3.462 Fetched 35.5 MB in 3s (12.4 MB/s)\n", + "#11 3.462 Reading package lists...\n", + "#11 DONE 4.6s\n", + "\n", + "#12 [release 3/22] RUN apt update && apt-get install -y --no-install-recommends --no-install-suggests python3-minimal=3.12.3-* libpython3-stdlib=3.12.3-* python3=3.12.3-* python3-venv=3.12.3-* python3-pip=24.0+dfsg-* && rm -rf /var/lib/apt/lists/*\n", + "#12 0.183 \n", + "#12 0.183 WARNING: apt does not have a stable CLI interface. Use with caution in scripts.\n", + "#12 0.183 \n", + "#12 0.313 Hit:1 https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2404/x86_64 InRelease\n", + "#12 0.410 Hit:2 http://security.ubuntu.com/ubuntu noble-security InRelease\n", + "#12 0.526 Hit:3 http://archive.ubuntu.com/ubuntu noble InRelease\n", + "#12 0.661 Hit:4 http://archive.ubuntu.com/ubuntu noble-updates InRelease\n", + "#12 0.805 Hit:5 http://archive.ubuntu.com/ubuntu noble-backports InRelease\n", + "#12 1.130 Reading package lists...\n", + "#12 2.067 Building dependency tree...\n", + "#12 2.283 Reading state information...\n", + "#12 2.314 34 packages can be upgraded. Run 'apt list --upgradable' to see them.\n", + "#12 2.332 Reading package lists...\n", + "#12 3.286 Building dependency tree...\n", + "#12 3.509 Reading state information...\n", + "#12 3.812 The following additional packages will be installed:\n", + "#12 3.814 libexpat1 libpython3.12-minimal libpython3.12-stdlib media-types netbase\n", + "#12 3.814 python3-pip-whl python3-pkg-resources python3-setuptools\n", + "#12 3.814 python3-setuptools-whl python3-wheel python3.12 python3.12-minimal\n", + "#12 3.814 python3.12-venv tzdata\n", + "#12 3.816 Suggested packages:\n", + "#12 3.816 python3-doc python3-tk python-setuptools-doc python3.12-doc binutils\n", + "#12 3.816 binfmt-support\n", + "#12 3.816 Recommended packages:\n", + "#12 3.816 build-essential python3-dev\n", + "#12 3.915 The following NEW packages will be installed:\n", + "#12 3.916 libexpat1 libpython3-stdlib libpython3.12-minimal libpython3.12-stdlib\n", + "#12 3.919 media-types netbase python3 python3-minimal python3-pip python3-pip-whl\n", + "#12 3.919 python3-pkg-resources python3-setuptools python3-setuptools-whl python3-venv\n", + "#12 3.919 python3-wheel python3.12 python3.12-minimal python3.12-venv tzdata\n", + "#12 4.234 0 upgraded, 19 newly installed, 0 to remove and 34 not upgraded.\n", + "#12 4.234 Need to get 10.7 MB of archives.\n", + "#12 4.234 After this operation, 38.7 MB of additional disk space will be used.\n", + "#12 4.234 Get:1 http://archive.ubuntu.com/ubuntu noble-updates/main amd64 libpython3.12-minimal amd64 3.12.3-1ubuntu0.8 [836 kB]\n", + "#12 5.240 Get:2 http://archive.ubuntu.com/ubuntu noble-updates/main amd64 libexpat1 amd64 2.6.1-2ubuntu0.3 [88.0 kB]\n", + "#12 5.250 Get:3 http://archive.ubuntu.com/ubuntu noble-updates/main amd64 python3.12-minimal amd64 3.12.3-1ubuntu0.8 [2334 kB]\n", + "#12 5.904 Get:4 http://archive.ubuntu.com/ubuntu noble-updates/main amd64 python3-minimal amd64 3.12.3-0ubuntu2 [27.4 kB]\n", + "#12 5.907 Get:5 http://archive.ubuntu.com/ubuntu noble/main amd64 media-types all 10.1.0 [27.5 kB]\n", + "#12 5.911 Get:6 http://archive.ubuntu.com/ubuntu noble/main amd64 netbase all 6.4 [13.1 kB]\n", + "#12 5.913 Get:7 http://archive.ubuntu.com/ubuntu noble-updates/main amd64 tzdata all 2025b-0ubuntu0.24.04.1 [276 kB]\n", + "#12 5.933 Get:8 http://archive.ubuntu.com/ubuntu noble-updates/main amd64 libpython3.12-stdlib amd64 3.12.3-1ubuntu0.8 [2068 kB]\n", + "#12 6.270 Get:9 http://archive.ubuntu.com/ubuntu noble-updates/main amd64 python3.12 amd64 3.12.3-1ubuntu0.8 [651 kB]\n", + "#12 6.394 Get:10 http://archive.ubuntu.com/ubuntu noble-updates/main amd64 libpython3-stdlib amd64 3.12.3-0ubuntu2 [10.0 kB]\n", + "#12 6.395 Get:11 http://archive.ubuntu.com/ubuntu noble-updates/main amd64 python3 amd64 3.12.3-0ubuntu2 [23.0 kB]\n", + "#12 6.679 Get:12 http://archive.ubuntu.com/ubuntu noble-updates/main amd64 python3-pkg-resources all 68.1.2-2ubuntu1.2 [168 kB]\n", + "#12 7.383 Get:13 http://archive.ubuntu.com/ubuntu noble-updates/main amd64 python3-setuptools all 68.1.2-2ubuntu1.2 [397 kB]\n", + "#12 7.666 Get:14 http://archive.ubuntu.com/ubuntu noble/universe amd64 python3-wheel all 0.42.0-2 [53.1 kB]\n", + "#12 7.683 Get:15 http://archive.ubuntu.com/ubuntu noble-updates/universe amd64 python3-pip all 24.0+dfsg-1ubuntu1.3 [1320 kB]\n", + "#12 7.939 Get:16 http://archive.ubuntu.com/ubuntu noble-updates/universe amd64 python3-pip-whl all 24.0+dfsg-1ubuntu1.3 [1707 kB]\n", + "#12 8.069 Get:17 http://archive.ubuntu.com/ubuntu noble-updates/universe amd64 python3-setuptools-whl all 68.1.2-2ubuntu1.2 [716 kB]\n", + "#12 8.104 Get:18 http://archive.ubuntu.com/ubuntu noble-updates/universe amd64 python3.12-venv amd64 3.12.3-1ubuntu0.8 [5678 B]\n", + "#12 8.104 Get:19 http://archive.ubuntu.com/ubuntu noble-updates/universe amd64 python3-venv amd64 3.12.3-0ubuntu2 [1034 B]\n", + "#12 8.272 debconf: delaying package configuration, since apt-utils is not installed\n", + "#12 8.345 Fetched 10.7 MB in 4s (2575 kB/s)\n", + "#12 8.422 Selecting previously unselected package libpython3.12-minimal:amd64.\n", + "(Reading database ... 5430 files and directories currently installed.)\n", + "#12 8.439 Preparing to unpack .../libpython3.12-minimal_3.12.3-1ubuntu0.8_amd64.deb ...\n", + "#12 8.451 Unpacking libpython3.12-minimal:amd64 (3.12.3-1ubuntu0.8) ...\n", + "#12 8.619 Selecting previously unselected package libexpat1:amd64.\n", + "#12 8.622 Preparing to unpack .../libexpat1_2.6.1-2ubuntu0.3_amd64.deb ...\n", + "#12 8.646 Unpacking libexpat1:amd64 (2.6.1-2ubuntu0.3) ...\n", + "#12 8.758 Selecting previously unselected package python3.12-minimal.\n", + "#12 8.760 Preparing to unpack .../python3.12-minimal_3.12.3-1ubuntu0.8_amd64.deb ...\n", + "#12 8.780 Unpacking python3.12-minimal (3.12.3-1ubuntu0.8) ...\n", + "#12 8.981 Setting up libpython3.12-minimal:amd64 (3.12.3-1ubuntu0.8) ...\n", + "#12 9.033 Setting up libexpat1:amd64 (2.6.1-2ubuntu0.3) ...\n", + "#12 9.068 Setting up python3.12-minimal (3.12.3-1ubuntu0.8) ...\n", + "#12 9.915 Selecting previously unselected package python3-minimal.\n", + "(Reading database ... 5749 files and directories currently installed.)\n", + "#12 9.930 Preparing to unpack .../0-python3-minimal_3.12.3-0ubuntu2_amd64.deb ...\n", + "#12 9.943 Unpacking python3-minimal (3.12.3-0ubuntu2) ...\n", + "#12 10.04 Selecting previously unselected package media-types.\n", + "#12 10.04 Preparing to unpack .../1-media-types_10.1.0_all.deb ...\n", + "#12 10.05 Unpacking media-types (10.1.0) ...\n", + "#12 10.15 Selecting previously unselected package netbase.\n", + "#12 10.16 Preparing to unpack .../2-netbase_6.4_all.deb ...\n", + "#12 10.17 Unpacking netbase (6.4) ...\n", + "#12 10.28 Selecting previously unselected package tzdata.\n", + "#12 10.28 Preparing to unpack .../3-tzdata_2025b-0ubuntu0.24.04.1_all.deb ...\n", + "#12 10.30 Unpacking tzdata (2025b-0ubuntu0.24.04.1) ...\n", + "#12 10.45 Selecting previously unselected package libpython3.12-stdlib:amd64.\n", + "#12 10.46 Preparing to unpack .../4-libpython3.12-stdlib_3.12.3-1ubuntu0.8_amd64.deb ...\n", + "#12 10.47 Unpacking libpython3.12-stdlib:amd64 (3.12.3-1ubuntu0.8) ...\n", + "#12 10.69 Selecting previously unselected package python3.12.\n", + "#12 10.69 Preparing to unpack .../5-python3.12_3.12.3-1ubuntu0.8_amd64.deb ...\n", + "#12 10.71 Unpacking python3.12 (3.12.3-1ubuntu0.8) ...\n", + "#12 10.80 Selecting previously unselected package libpython3-stdlib:amd64.\n", + "#12 10.80 Preparing to unpack .../6-libpython3-stdlib_3.12.3-0ubuntu2_amd64.deb ...\n", + "#12 10.81 Unpacking libpython3-stdlib:amd64 (3.12.3-0ubuntu2) ...\n", + "#12 10.91 Setting up python3-minimal (3.12.3-0ubuntu2) ...\n", + "#12 11.20 Selecting previously unselected package python3.\n", + "(Reading database ... 6719 files and directories currently installed.)\n", + "#12 11.22 Preparing to unpack .../0-python3_3.12.3-0ubuntu2_amd64.deb ...\n", + "#12 11.24 Unpacking python3 (3.12.3-0ubuntu2) ...\n", + "#12 11.34 Selecting previously unselected package python3-pkg-resources.\n", + "#12 11.34 Preparing to unpack .../1-python3-pkg-resources_68.1.2-2ubuntu1.2_all.deb ...\n", + "#12 11.36 Unpacking python3-pkg-resources (68.1.2-2ubuntu1.2) ...\n", + "#12 11.47 Selecting previously unselected package python3-setuptools.\n", + "#12 11.48 Preparing to unpack .../2-python3-setuptools_68.1.2-2ubuntu1.2_all.deb ...\n", + "#12 11.49 Unpacking python3-setuptools (68.1.2-2ubuntu1.2) ...\n", + "#12 11.63 Selecting previously unselected package python3-wheel.\n", + "#12 11.63 Preparing to unpack .../3-python3-wheel_0.42.0-2_all.deb ...\n", + "#12 11.64 Unpacking python3-wheel (0.42.0-2) ...\n", + "#12 11.75 Selecting previously unselected package python3-pip.\n", + "#12 11.75 Preparing to unpack .../4-python3-pip_24.0+dfsg-1ubuntu1.3_all.deb ...\n", + "#12 11.76 Unpacking python3-pip (24.0+dfsg-1ubuntu1.3) ...\n", + "#12 11.96 Selecting previously unselected package python3-pip-whl.\n", + "#12 11.97 Preparing to unpack .../5-python3-pip-whl_24.0+dfsg-1ubuntu1.3_all.deb ...\n", + "#12 11.98 Unpacking python3-pip-whl (24.0+dfsg-1ubuntu1.3) ...\n", + "#12 12.07 Selecting previously unselected package python3-setuptools-whl.\n", + "#12 12.08 Preparing to unpack .../6-python3-setuptools-whl_68.1.2-2ubuntu1.2_all.deb ...\n", + "#12 12.09 Unpacking python3-setuptools-whl (68.1.2-2ubuntu1.2) ...\n", + "#12 12.20 Selecting previously unselected package python3.12-venv.\n", + "#12 12.20 Preparing to unpack .../7-python3.12-venv_3.12.3-1ubuntu0.8_amd64.deb ...\n", + "#12 12.21 Unpacking python3.12-venv (3.12.3-1ubuntu0.8) ...\n", + "#12 12.29 Selecting previously unselected package python3-venv.\n", + "#12 12.30 Preparing to unpack .../8-python3-venv_3.12.3-0ubuntu2_amd64.deb ...\n", + "#12 12.31 Unpacking python3-venv (3.12.3-0ubuntu2) ...\n", + "#12 12.40 Setting up media-types (10.1.0) ...\n", + "#12 12.45 Setting up python3-setuptools-whl (68.1.2-2ubuntu1.2) ...\n", + "#12 12.48 Setting up python3-pip-whl (24.0+dfsg-1ubuntu1.3) ...\n", + "#12 12.52 Setting up tzdata (2025b-0ubuntu0.24.04.1) ...\n", + "#12 12.65 \n", + "#12 12.65 Current default time zone: 'Etc/UTC'\n", + "#12 12.65 Local time is now: Thu Oct 30 19:11:50 UTC 2025.\n", + "#12 12.65 Universal Time is now: Thu Oct 30 19:11:50 UTC 2025.\n", + "#12 12.65 Run 'dpkg-reconfigure tzdata' if you wish to change it.\n", + "#12 12.65 \n", + "#12 12.70 Setting up netbase (6.4) ...\n", + "#12 12.78 Setting up libpython3.12-stdlib:amd64 (3.12.3-1ubuntu0.8) ...\n", + "#12 12.82 Setting up python3.12 (3.12.3-1ubuntu0.8) ...\n", + "#12 13.69 Setting up libpython3-stdlib:amd64 (3.12.3-0ubuntu2) ...\n", + "#12 13.72 Setting up python3.12-venv (3.12.3-1ubuntu0.8) ...\n", + "#12 13.83 Setting up python3 (3.12.3-0ubuntu2) ...\n", + "#12 14.00 Setting up python3-wheel (0.42.0-2) ...\n", + "#12 14.21 Setting up python3-venv (3.12.3-0ubuntu2) ...\n", + "#12 14.25 Setting up python3-pkg-resources (68.1.2-2ubuntu1.2) ...\n", + "#12 14.53 Setting up python3-setuptools (68.1.2-2ubuntu1.2) ...\n", + "#12 15.06 Setting up python3-pip (24.0+dfsg-1ubuntu1.3) ...\n", + "#12 16.27 Processing triggers for libc-bin (2.39-0ubuntu8.5) ...\n", + "#12 DONE 16.8s\n", + "\n", + "#13 [release 4/22] RUN if id \"ubuntu\" >/dev/null 2>&1; then touch /var/mail/ubuntu && chown ubuntu /var/mail/ubuntu && userdel -r ubuntu; fi\n", + "#13 DONE 0.4s\n", + "\n", + "#14 [release 5/22] RUN groupadd -f -g 1000 holoscan\n", + "#14 DONE 0.2s\n", + "\n", + "#15 [release 6/22] RUN useradd -rm -d /home/holoscan -s /bin/bash -g 1000 -G sudo -u 1000 holoscan\n", + "#15 0.233 useradd warning: holoscan's uid 1000 is greater than SYS_UID_MAX 999\n", + "#15 DONE 0.3s\n", + "\n", + "#16 [release 7/22] RUN chown -R holoscan /var/holoscan && chown -R holoscan /var/holoscan/input && chown -R holoscan /var/holoscan/output\n", + "#16 DONE 0.2s\n", + "\n", + "#17 [release 8/22] WORKDIR /var/holoscan\n", + "#17 DONE 0.1s\n", + "\n", + "#18 [release 9/22] COPY ./tools /var/holoscan/tools\n", + "#18 DONE 0.1s\n", + "\n", + "#19 [release 10/22] RUN chmod +x /var/holoscan/tools\n", + "#19 DONE 0.2s\n", + "\n", + "#20 [release 11/22] RUN rm -rf /usr/lib/python3.12/EXTERNALLY-MANAGED\n", + "#20 DONE 0.2s\n", + "\n", + "#21 [release 12/22] WORKDIR /var/holoscan\n", + "#21 DONE 0.1s\n", + "\n", + "#22 [release 13/22] COPY ./pip/requirements.txt /tmp/requirements.txt\n", + "#22 DONE 0.1s\n", + "\n", + "#23 [release 14/22] RUN pip install --upgrade pip\n", + "#23 0.451 Defaulting to user installation because normal site-packages is not writeable\n", + "#23 0.487 Requirement already satisfied: pip in /usr/lib/python3/dist-packages (24.0)\n", + "#23 0.666 Collecting pip\n", + "#23 0.749 Downloading pip-25.3-py3-none-any.whl.metadata (4.7 kB)\n", + "#23 0.789 Downloading pip-25.3-py3-none-any.whl (1.8 MB)\n", + "#23 0.921 ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 1.8/1.8 MB 17.8 MB/s eta 0:00:00\n", + "#23 0.943 Installing collected packages: pip\n", + "#23 1.727 Successfully installed pip-25.3\n", + "#23 DONE 1.9s\n", + "\n", + "#24 [release 15/22] RUN pip install --no-cache-dir --user -r /tmp/requirements.txt\n", + "#24 0.612 Collecting highdicom>=0.18.2 (from -r /tmp/requirements.txt (line 1))\n", + "#24 0.675 Downloading highdicom-0.27.0-py3-none-any.whl.metadata (5.8 kB)\n", + "#24 0.766 Collecting monai>=1.0 (from -r /tmp/requirements.txt (line 2))\n", + "#24 0.773 Downloading monai-1.5.1-py3-none-any.whl.metadata (13 kB)\n", + "#24 0.820 Collecting nibabel>=3.2.1 (from -r /tmp/requirements.txt (line 3))\n", + "#24 0.825 Downloading nibabel-5.3.2-py3-none-any.whl.metadata (9.1 kB)\n", + "#24 1.050 Collecting numpy>=1.21.6 (from -r /tmp/requirements.txt (line 4))\n", + "#24 1.053 Downloading numpy-2.3.4-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl.metadata (62 kB)\n", + "#24 1.103 Collecting pydicom>=2.3.0 (from -r /tmp/requirements.txt (line 5))\n", + "#24 1.113 Downloading pydicom-3.0.1-py3-none-any.whl.metadata (9.4 kB)\n", + "#24 1.130 Requirement already satisfied: setuptools>=59.5.0 in /usr/lib/python3/dist-packages (from -r /tmp/requirements.txt (line 6)) (68.1.2)\n", + "#24 1.161 Collecting SimpleITK>=2.0.0 (from -r /tmp/requirements.txt (line 7))\n", + "#24 1.166 Downloading simpleitk-2.5.2-cp311-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl.metadata (7.2 kB)\n", + "#24 1.210 Collecting torch>=1.12.0 (from -r /tmp/requirements.txt (line 8))\n", + "#24 1.214 Downloading torch-2.9.0-cp312-cp312-manylinux_2_28_x86_64.whl.metadata (30 kB)\n", + "#24 1.368 Collecting pillow>=8.3 (from highdicom>=0.18.2->-r /tmp/requirements.txt (line 1))\n", + "#24 1.372 Downloading pillow-12.0.0-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl.metadata (8.8 kB)\n", + "#24 1.476 Collecting pyjpegls>=1.0.0 (from highdicom>=0.18.2->-r /tmp/requirements.txt (line 1))\n", + "#24 1.555 Downloading pyjpegls-1.5.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl.metadata (4.5 kB)\n", + "#24 1.585 Collecting typing-extensions>=4.0.0 (from highdicom>=0.18.2->-r /tmp/requirements.txt (line 1))\n", + "#24 1.588 Downloading typing_extensions-4.15.0-py3-none-any.whl.metadata (3.3 kB)\n", + "#24 1.623 Collecting packaging>=20 (from nibabel>=3.2.1->-r /tmp/requirements.txt (line 3))\n", + "#24 1.628 Downloading packaging-25.0-py3-none-any.whl.metadata (3.3 kB)\n", + "#24 1.663 Collecting filelock (from torch>=1.12.0->-r /tmp/requirements.txt (line 8))\n", + "#24 1.667 Downloading filelock-3.20.0-py3-none-any.whl.metadata (2.1 kB)\n", + "#24 1.683 Collecting sympy>=1.13.3 (from torch>=1.12.0->-r /tmp/requirements.txt (line 8))\n", + "#24 1.687 Downloading sympy-1.14.0-py3-none-any.whl.metadata (12 kB)\n", + "#24 1.709 Collecting networkx>=2.5.1 (from torch>=1.12.0->-r /tmp/requirements.txt (line 8))\n", + "#24 1.712 Downloading networkx-3.5-py3-none-any.whl.metadata (6.3 kB)\n", + "#24 1.727 Collecting jinja2 (from torch>=1.12.0->-r /tmp/requirements.txt (line 8))\n", + "#24 1.730 Downloading jinja2-3.1.6-py3-none-any.whl.metadata (2.9 kB)\n", + "#24 1.751 Collecting fsspec>=0.8.5 (from torch>=1.12.0->-r /tmp/requirements.txt (line 8))\n", + "#24 1.754 Downloading fsspec-2025.10.0-py3-none-any.whl.metadata (10 kB)\n", + "#24 1.785 Collecting nvidia-cuda-nvrtc-cu12==12.8.93 (from torch>=1.12.0->-r /tmp/requirements.txt (line 8))\n", + "#24 1.788 Downloading nvidia_cuda_nvrtc_cu12-12.8.93-py3-none-manylinux2010_x86_64.manylinux_2_12_x86_64.whl.metadata (1.7 kB)\n", + "#24 1.800 Collecting nvidia-cuda-runtime-cu12==12.8.90 (from torch>=1.12.0->-r /tmp/requirements.txt (line 8))\n", + "#24 1.803 Downloading nvidia_cuda_runtime_cu12-12.8.90-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl.metadata (1.7 kB)\n", + "#24 1.812 Collecting nvidia-cuda-cupti-cu12==12.8.90 (from torch>=1.12.0->-r /tmp/requirements.txt (line 8))\n", + "#24 1.816 Downloading nvidia_cuda_cupti_cu12-12.8.90-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl.metadata (1.7 kB)\n", + "#24 1.827 Collecting nvidia-cudnn-cu12==9.10.2.21 (from torch>=1.12.0->-r /tmp/requirements.txt (line 8))\n", + "#24 1.830 Downloading nvidia_cudnn_cu12-9.10.2.21-py3-none-manylinux_2_27_x86_64.whl.metadata (1.8 kB)\n", + "#24 1.839 Collecting nvidia-cublas-cu12==12.8.4.1 (from torch>=1.12.0->-r /tmp/requirements.txt (line 8))\n", + "#24 1.842 Downloading nvidia_cublas_cu12-12.8.4.1-py3-none-manylinux_2_27_x86_64.whl.metadata (1.7 kB)\n", + "#24 1.860 Collecting nvidia-cufft-cu12==11.3.3.83 (from torch>=1.12.0->-r /tmp/requirements.txt (line 8))\n", + "#24 1.867 Downloading nvidia_cufft_cu12-11.3.3.83-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl.metadata (1.7 kB)\n", + "#24 1.882 Collecting nvidia-curand-cu12==10.3.9.90 (from torch>=1.12.0->-r /tmp/requirements.txt (line 8))\n", + "#24 1.885 Downloading nvidia_curand_cu12-10.3.9.90-py3-none-manylinux_2_27_x86_64.whl.metadata (1.7 kB)\n", + "#24 1.895 Collecting nvidia-cusolver-cu12==11.7.3.90 (from torch>=1.12.0->-r /tmp/requirements.txt (line 8))\n", + "#24 1.899 Downloading nvidia_cusolver_cu12-11.7.3.90-py3-none-manylinux_2_27_x86_64.whl.metadata (1.8 kB)\n", + "#24 1.907 Collecting nvidia-cusparse-cu12==12.5.8.93 (from torch>=1.12.0->-r /tmp/requirements.txt (line 8))\n", + "#24 1.910 Downloading nvidia_cusparse_cu12-12.5.8.93-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl.metadata (1.8 kB)\n", + "#24 1.918 Collecting nvidia-cusparselt-cu12==0.7.1 (from torch>=1.12.0->-r /tmp/requirements.txt (line 8))\n", + "#24 1.921 Downloading nvidia_cusparselt_cu12-0.7.1-py3-none-manylinux2014_x86_64.whl.metadata (7.0 kB)\n", + "#24 1.931 Collecting nvidia-nccl-cu12==2.27.5 (from torch>=1.12.0->-r /tmp/requirements.txt (line 8))\n", + "#24 1.934 Downloading nvidia_nccl_cu12-2.27.5-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl.metadata (2.0 kB)\n", + "#24 1.942 Collecting nvidia-nvshmem-cu12==3.3.20 (from torch>=1.12.0->-r /tmp/requirements.txt (line 8))\n", + "#24 1.945 Downloading nvidia_nvshmem_cu12-3.3.20-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl.metadata (2.1 kB)\n", + "#24 1.964 Collecting nvidia-nvtx-cu12==12.8.90 (from torch>=1.12.0->-r /tmp/requirements.txt (line 8))\n", + "#24 1.976 Downloading nvidia_nvtx_cu12-12.8.90-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl.metadata (1.8 kB)\n", + "#24 1.993 Collecting nvidia-nvjitlink-cu12==12.8.93 (from torch>=1.12.0->-r /tmp/requirements.txt (line 8))\n", + "#24 1.997 Downloading nvidia_nvjitlink_cu12-12.8.93-py3-none-manylinux2010_x86_64.manylinux_2_12_x86_64.whl.metadata (1.7 kB)\n", + "#24 2.006 Collecting nvidia-cufile-cu12==1.13.1.3 (from torch>=1.12.0->-r /tmp/requirements.txt (line 8))\n", + "#24 2.010 Downloading nvidia_cufile_cu12-1.13.1.3-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl.metadata (1.7 kB)\n", + "#24 2.027 Collecting triton==3.5.0 (from torch>=1.12.0->-r /tmp/requirements.txt (line 8))\n", + "#24 2.033 Downloading triton-3.5.0-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl.metadata (1.7 kB)\n", + "#24 2.079 Collecting mpmath<1.4,>=1.1.0 (from sympy>=1.13.3->torch>=1.12.0->-r /tmp/requirements.txt (line 8))\n", + "#24 2.082 Downloading mpmath-1.3.0-py3-none-any.whl.metadata (8.6 kB)\n", + "#24 2.128 Collecting MarkupSafe>=2.0 (from jinja2->torch>=1.12.0->-r /tmp/requirements.txt (line 8))\n", + "#24 2.132 Downloading markupsafe-3.0.3-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl.metadata (2.7 kB)\n", + "#24 2.141 Downloading highdicom-0.27.0-py3-none-any.whl (1.1 MB)\n", + "#24 2.162 ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 1.1/1.1 MB 68.1 MB/s 0:00:00\n", + "#24 2.167 Downloading monai-1.5.1-py3-none-any.whl (2.7 MB)\n", + "#24 2.200 ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 2.7/2.7 MB 93.1 MB/s 0:00:00\n", + "#24 2.205 Downloading numpy-2.3.4-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl (16.6 MB)\n", + "#24 2.351 ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 16.6/16.6 MB 117.7 MB/s 0:00:00\n", + "#24 2.358 Downloading nibabel-5.3.2-py3-none-any.whl (3.3 MB)\n", + "#24 2.390 ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 3.3/3.3 MB 118.2 MB/s 0:00:00\n", + "#24 2.396 Downloading pydicom-3.0.1-py3-none-any.whl (2.4 MB)\n", + "#24 2.420 ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 2.4/2.4 MB 118.3 MB/s 0:00:00\n", + "#24 2.428 Downloading simpleitk-2.5.2-cp311-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl (52.6 MB)\n", + "#24 3.192 ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 52.6/52.6 MB 69.8 MB/s 0:00:00\n", + "#24 3.199 Downloading torch-2.9.0-cp312-cp312-manylinux_2_28_x86_64.whl (899.7 MB)\n", + "#24 15.06 ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 899.7/899.7 MB 87.9 MB/s 0:00:11\n", + "#24 15.06 Downloading nvidia_cublas_cu12-12.8.4.1-py3-none-manylinux_2_27_x86_64.whl (594.3 MB)\n", + "#24 20.38 ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 594.3/594.3 MB 112.5 MB/s 0:00:05\n", + "#24 20.38 Downloading nvidia_cuda_cupti_cu12-12.8.90-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl (10.2 MB)\n", + "#24 20.51 ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 10.2/10.2 MB 84.3 MB/s 0:00:00\n", + "#24 20.51 Downloading nvidia_cuda_nvrtc_cu12-12.8.93-py3-none-manylinux2010_x86_64.manylinux_2_12_x86_64.whl (88.0 MB)\n", + "#24 21.30 ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 88.0/88.0 MB 112.7 MB/s 0:00:00\n", + "#24 21.31 Downloading nvidia_cuda_runtime_cu12-12.8.90-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl (954 kB)\n", + "#24 21.32 ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 954.8/954.8 kB 123.3 MB/s 0:00:00\n", + "#24 21.33 Downloading nvidia_cudnn_cu12-9.10.2.21-py3-none-manylinux_2_27_x86_64.whl (706.8 MB)\n", + "#24 29.89 ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 706.8/706.8 MB 83.3 MB/s 0:00:08\n", + "#24 29.90 Downloading nvidia_cufft_cu12-11.3.3.83-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl (193.1 MB)\n", + "#24 31.97 ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 193.1/193.1 MB 93.4 MB/s 0:00:02\n", + "#24 31.97 Downloading nvidia_cufile_cu12-1.13.1.3-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl (1.2 MB)\n", + "#24 31.99 ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 1.2/1.2 MB 122.2 MB/s 0:00:00\n", + "#24 32.00 Downloading nvidia_curand_cu12-10.3.9.90-py3-none-manylinux_2_27_x86_64.whl (63.6 MB)\n", + "#24 32.63 ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 63.6/63.6 MB 100.8 MB/s 0:00:00\n", + "#24 32.64 Downloading nvidia_cusolver_cu12-11.7.3.90-py3-none-manylinux_2_27_x86_64.whl (267.5 MB)\n", + "#24 35.30 ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 267.5/267.5 MB 100.5 MB/s 0:00:02\n", + "#24 35.30 Downloading nvidia_cusparse_cu12-12.5.8.93-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl (288.2 MB)\n", + "#24 37.78 ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 288.2/288.2 MB 116.2 MB/s 0:00:02\n", + "#24 37.79 Downloading nvidia_cusparselt_cu12-0.7.1-py3-none-manylinux2014_x86_64.whl (287.2 MB)\n", + "#24 40.73 ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 287.2/287.2 MB 96.5 MB/s 0:00:02\n", + "#24 40.73 Downloading nvidia_nccl_cu12-2.27.5-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl (322.3 MB)\n", + "#24 44.37 ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 322.3/322.3 MB 88.1 MB/s 0:00:03\n", + "#24 44.37 Downloading nvidia_nvjitlink_cu12-12.8.93-py3-none-manylinux2010_x86_64.manylinux_2_12_x86_64.whl (39.3 MB)\n", + "#24 44.72 ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 39.3/39.3 MB 113.1 MB/s 0:00:00\n", + "#24 44.73 Downloading nvidia_nvshmem_cu12-3.3.20-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl (124.7 MB)\n", + "#24 45.80 ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 124.7/124.7 MB 116.5 MB/s 0:00:01\n", + "#24 45.81 Downloading nvidia_nvtx_cu12-12.8.90-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl (89 kB)\n", + "#24 45.82 Downloading triton-3.5.0-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl (170.5 MB)\n", + "#24 48.20 ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 170.5/170.5 MB 71.5 MB/s 0:00:02\n", + "#24 48.21 Downloading fsspec-2025.10.0-py3-none-any.whl (200 kB)\n", + "#24 48.21 Downloading networkx-3.5-py3-none-any.whl (2.0 MB)\n", + "#24 48.23 ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 2.0/2.0 MB 113.0 MB/s 0:00:00\n", + "#24 48.24 Downloading packaging-25.0-py3-none-any.whl (66 kB)\n", + "#24 48.25 Downloading pillow-12.0.0-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl (7.0 MB)\n", + "#24 48.31 ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 7.0/7.0 MB 118.4 MB/s 0:00:00\n", + "#24 48.39 Downloading pyjpegls-1.5.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (2.7 MB)\n", + "#24 48.57 ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 2.7/2.7 MB 14.3 MB/s 0:00:00\n", + "#24 48.58 Downloading sympy-1.14.0-py3-none-any.whl (6.3 MB)\n", + "#24 48.63 ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 6.3/6.3 MB 116.8 MB/s 0:00:00\n", + "#24 48.64 Downloading mpmath-1.3.0-py3-none-any.whl (536 kB)\n", + "#24 48.64 ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 536.2/536.2 kB 104.3 MB/s 0:00:00\n", + "#24 48.65 Downloading typing_extensions-4.15.0-py3-none-any.whl (44 kB)\n", + "#24 48.65 Downloading filelock-3.20.0-py3-none-any.whl (16 kB)\n", + "#24 48.66 Downloading jinja2-3.1.6-py3-none-any.whl (134 kB)\n", + "#24 48.66 Downloading markupsafe-3.0.3-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl (22 kB)\n", + "#24 59.64 Installing collected packages: SimpleITK, nvidia-cusparselt-cu12, mpmath, typing-extensions, triton, sympy, pydicom, pillow, packaging, nvidia-nvtx-cu12, nvidia-nvshmem-cu12, nvidia-nvjitlink-cu12, nvidia-nccl-cu12, nvidia-curand-cu12, nvidia-cufile-cu12, nvidia-cuda-runtime-cu12, nvidia-cuda-nvrtc-cu12, nvidia-cuda-cupti-cu12, nvidia-cublas-cu12, numpy, networkx, MarkupSafe, fsspec, filelock, pyjpegls, nvidia-cusparse-cu12, nvidia-cufft-cu12, nvidia-cudnn-cu12, nibabel, jinja2, nvidia-cusolver-cu12, highdicom, torch, monai\n", + "#24 138.2 \n", + "#24 138.3 Successfully installed MarkupSafe-3.0.3 SimpleITK-2.5.2 filelock-3.20.0 fsspec-2025.10.0 highdicom-0.27.0 jinja2-3.1.6 monai-1.5.1 mpmath-1.3.0 networkx-3.5 nibabel-5.3.2 numpy-2.3.4 nvidia-cublas-cu12-12.8.4.1 nvidia-cuda-cupti-cu12-12.8.90 nvidia-cuda-nvrtc-cu12-12.8.93 nvidia-cuda-runtime-cu12-12.8.90 nvidia-cudnn-cu12-9.10.2.21 nvidia-cufft-cu12-11.3.3.83 nvidia-cufile-cu12-1.13.1.3 nvidia-curand-cu12-10.3.9.90 nvidia-cusolver-cu12-11.7.3.90 nvidia-cusparse-cu12-12.5.8.93 nvidia-cusparselt-cu12-0.7.1 nvidia-nccl-cu12-2.27.5 nvidia-nvjitlink-cu12-12.8.93 nvidia-nvshmem-cu12-3.3.20 nvidia-nvtx-cu12-12.8.90 packaging-25.0 pillow-12.0.0 pydicom-3.0.1 pyjpegls-1.5.1 sympy-1.14.0 torch-2.9.0 triton-3.5.0 typing-extensions-4.15.0\n", + "#24 DONE 140.5s\n", + "\n", + "#25 [release 16/22] COPY ./monai_deploy_app_sdk-1.0.0+48.gfd5999e.dirty-py3-none-any.whl /tmp/monai_deploy_app_sdk-1.0.0+48.gfd5999e.dirty-py3-none-any.whl\n", + "#25 DONE 0.3s\n", + "\n", + "#26 [release 17/22] RUN pip install /tmp/monai_deploy_app_sdk-1.0.0+48.gfd5999e.dirty-py3-none-any.whl\n", + "#26 0.567 Defaulting to user installation because normal site-packages is not writeable\n", + "#26 0.637 Processing /tmp/monai_deploy_app_sdk-1.0.0+48.gfd5999e.dirty-py3-none-any.whl\n", + "#26 0.646 Requirement already satisfied: numpy>=1.21.6 in /home/holoscan/.local/lib/python3.12/site-packages (from monai-deploy-app-sdk==1.0.0+48.gfd5999e.dirty) (2.3.4)\n", + "#26 0.795 Collecting holoscan-cu12 (from monai-deploy-app-sdk==1.0.0+48.gfd5999e.dirty)\n", + "#26 0.886 Downloading holoscan_cu12-3.7.0-cp312-cp312-manylinux_2_35_x86_64.whl.metadata (7.1 kB)\n", + "#26 1.007 Collecting holoscan-cli (from monai-deploy-app-sdk==1.0.0+48.gfd5999e.dirty)\n", + "#26 1.019 Downloading holoscan_cli-3.7.0-py3-none-any.whl.metadata (4.0 kB)\n", + "#26 1.074 Collecting colorama>=0.4.1 (from monai-deploy-app-sdk==1.0.0+48.gfd5999e.dirty)\n", + "#26 1.080 Downloading colorama-0.4.6-py2.py3-none-any.whl.metadata (17 kB)\n", + "#26 1.156 Collecting tritonclient>=2.53.0 (from tritonclient[all]>=2.53.0->monai-deploy-app-sdk==1.0.0+48.gfd5999e.dirty)\n", + "#26 1.162 Downloading tritonclient-2.61.0-py3-none-manylinux1_x86_64.whl.metadata (2.9 kB)\n", + "#26 1.222 Collecting typeguard>=3.0.0 (from monai-deploy-app-sdk==1.0.0+48.gfd5999e.dirty)\n", + "#26 1.225 Downloading typeguard-4.4.4-py3-none-any.whl.metadata (3.3 kB)\n", + "#26 1.272 Collecting perf-analyzer (from tritonclient>=2.53.0->tritonclient[all]>=2.53.0->monai-deploy-app-sdk==1.0.0+48.gfd5999e.dirty)\n", + "#26 1.280 Downloading perf_analyzer-2.59.1-py3-none-manylinux_2_38_x86_64.whl.metadata (6.3 kB)\n", + "#26 1.425 Collecting python-rapidjson>=0.9.1 (from tritonclient>=2.53.0->tritonclient[all]>=2.53.0->monai-deploy-app-sdk==1.0.0+48.gfd5999e.dirty)\n", + "#26 1.430 Downloading python_rapidjson-1.22-cp312-cp312-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl.metadata (24 kB)\n", + "#26 1.522 Collecting urllib3>=2.0.7 (from tritonclient>=2.53.0->tritonclient[all]>=2.53.0->monai-deploy-app-sdk==1.0.0+48.gfd5999e.dirty)\n", + "#26 1.526 Downloading urllib3-2.5.0-py3-none-any.whl.metadata (6.5 kB)\n", + "#26 2.024 Collecting aiohttp<4.0.0,>=3.8.1 (from tritonclient[all]>=2.53.0->monai-deploy-app-sdk==1.0.0+48.gfd5999e.dirty)\n", + "#26 2.028 Downloading aiohttp-3.13.2-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl.metadata (8.1 kB)\n", + "#26 2.112 Collecting cuda-python (from tritonclient[all]>=2.53.0->monai-deploy-app-sdk==1.0.0+48.gfd5999e.dirty)\n", + "#26 2.116 Downloading cuda_python-13.0.3-py3-none-any.whl.metadata (4.7 kB)\n", + "#26 2.291 Collecting geventhttpclient>=2.3.3 (from tritonclient[all]>=2.53.0->monai-deploy-app-sdk==1.0.0+48.gfd5999e.dirty)\n", + "#26 2.296 Downloading geventhttpclient-2.3.5-cp312-cp312-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl.metadata (8.4 kB)\n", + "#26 2.764 Collecting grpcio<1.68,>=1.63.0 (from tritonclient[all]>=2.53.0->monai-deploy-app-sdk==1.0.0+48.gfd5999e.dirty)\n", + "#26 2.768 Downloading grpcio-1.67.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl.metadata (3.9 kB)\n", + "#26 2.785 Requirement already satisfied: packaging>=14.1 in /home/holoscan/.local/lib/python3.12/site-packages (from tritonclient[all]>=2.53.0->monai-deploy-app-sdk==1.0.0+48.gfd5999e.dirty) (25.0)\n", + "#26 3.026 Collecting protobuf<6.0dev,>=5.26.1 (from tritonclient[all]>=2.53.0->monai-deploy-app-sdk==1.0.0+48.gfd5999e.dirty)\n", + "#26 3.029 Downloading protobuf-5.29.5-cp38-abi3-manylinux2014_x86_64.whl.metadata (592 bytes)\n", + "#26 3.085 Collecting aiohappyeyeballs>=2.5.0 (from aiohttp<4.0.0,>=3.8.1->tritonclient[all]>=2.53.0->monai-deploy-app-sdk==1.0.0+48.gfd5999e.dirty)\n", + "#26 3.090 Downloading aiohappyeyeballs-2.6.1-py3-none-any.whl.metadata (5.9 kB)\n", + "#26 3.138 Collecting aiosignal>=1.4.0 (from aiohttp<4.0.0,>=3.8.1->tritonclient[all]>=2.53.0->monai-deploy-app-sdk==1.0.0+48.gfd5999e.dirty)\n", + "#26 3.145 Downloading aiosignal-1.4.0-py3-none-any.whl.metadata (3.7 kB)\n", + "#26 3.205 Collecting attrs>=17.3.0 (from aiohttp<4.0.0,>=3.8.1->tritonclient[all]>=2.53.0->monai-deploy-app-sdk==1.0.0+48.gfd5999e.dirty)\n", + "#26 3.209 Downloading attrs-25.4.0-py3-none-any.whl.metadata (10 kB)\n", + "#26 3.322 Collecting frozenlist>=1.1.1 (from aiohttp<4.0.0,>=3.8.1->tritonclient[all]>=2.53.0->monai-deploy-app-sdk==1.0.0+48.gfd5999e.dirty)\n", + "#26 3.329 Downloading frozenlist-1.8.0-cp312-cp312-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl.metadata (20 kB)\n", + "#26 3.584 Collecting multidict<7.0,>=4.5 (from aiohttp<4.0.0,>=3.8.1->tritonclient[all]>=2.53.0->monai-deploy-app-sdk==1.0.0+48.gfd5999e.dirty)\n", + "#26 3.588 Downloading multidict-6.7.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl.metadata (5.3 kB)\n", + "#26 3.684 Collecting propcache>=0.2.0 (from aiohttp<4.0.0,>=3.8.1->tritonclient[all]>=2.53.0->monai-deploy-app-sdk==1.0.0+48.gfd5999e.dirty)\n", + "#26 3.688 Downloading propcache-0.4.1-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl.metadata (13 kB)\n", + "#26 3.943 Collecting yarl<2.0,>=1.17.0 (from aiohttp<4.0.0,>=3.8.1->tritonclient[all]>=2.53.0->monai-deploy-app-sdk==1.0.0+48.gfd5999e.dirty)\n", + "#26 3.947 Downloading yarl-1.22.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl.metadata (75 kB)\n", + "#26 4.024 Collecting idna>=2.0 (from yarl<2.0,>=1.17.0->aiohttp<4.0.0,>=3.8.1->tritonclient[all]>=2.53.0->monai-deploy-app-sdk==1.0.0+48.gfd5999e.dirty)\n", + "#26 4.028 Downloading idna-3.11-py3-none-any.whl.metadata (8.4 kB)\n", + "#26 4.065 Requirement already satisfied: typing-extensions>=4.2 in /home/holoscan/.local/lib/python3.12/site-packages (from aiosignal>=1.4.0->aiohttp<4.0.0,>=3.8.1->tritonclient[all]>=2.53.0->monai-deploy-app-sdk==1.0.0+48.gfd5999e.dirty) (4.15.0)\n", + "#26 4.241 Collecting gevent (from geventhttpclient>=2.3.3->tritonclient[all]>=2.53.0->monai-deploy-app-sdk==1.0.0+48.gfd5999e.dirty)\n", + "#26 4.249 Downloading gevent-25.9.1-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl.metadata (14 kB)\n", + "#26 4.313 Collecting certifi (from geventhttpclient>=2.3.3->tritonclient[all]>=2.53.0->monai-deploy-app-sdk==1.0.0+48.gfd5999e.dirty)\n", + "#26 4.317 Downloading certifi-2025.10.5-py3-none-any.whl.metadata (2.5 kB)\n", + "#26 4.387 Collecting brotli (from geventhttpclient>=2.3.3->tritonclient[all]>=2.53.0->monai-deploy-app-sdk==1.0.0+48.gfd5999e.dirty)\n", + "#26 4.393 Downloading Brotli-1.1.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl.metadata (5.5 kB)\n", + "#26 4.467 Collecting cuda-bindings~=13.0.3 (from cuda-python->tritonclient[all]>=2.53.0->monai-deploy-app-sdk==1.0.0+48.gfd5999e.dirty)\n", + "#26 4.471 Downloading cuda_bindings-13.0.3-cp312-cp312-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl.metadata (2.3 kB)\n", + "#26 4.513 Collecting cuda-pathfinder~=1.1 (from cuda-python->tritonclient[all]>=2.53.0->monai-deploy-app-sdk==1.0.0+48.gfd5999e.dirty)\n", + "#26 4.519 Downloading cuda_pathfinder-1.3.2-py3-none-any.whl.metadata (1.9 kB)\n", + "#26 4.682 Collecting greenlet>=3.2.2 (from gevent->geventhttpclient>=2.3.3->tritonclient[all]>=2.53.0->monai-deploy-app-sdk==1.0.0+48.gfd5999e.dirty)\n", + "#26 4.685 Downloading greenlet-3.2.4-cp312-cp312-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl.metadata (4.1 kB)\n", + "#26 4.737 Collecting zope.event (from gevent->geventhttpclient>=2.3.3->tritonclient[all]>=2.53.0->monai-deploy-app-sdk==1.0.0+48.gfd5999e.dirty)\n", + "#26 4.741 Downloading zope_event-6.0-py3-none-any.whl.metadata (5.1 kB)\n", + "#26 4.889 Collecting zope.interface (from gevent->geventhttpclient>=2.3.3->tritonclient[all]>=2.53.0->monai-deploy-app-sdk==1.0.0+48.gfd5999e.dirty)\n", + "#26 4.895 Downloading zope_interface-8.0.1-cp312-cp312-manylinux1_x86_64.manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_5_x86_64.whl.metadata (44 kB)\n", + "#26 4.928 Requirement already satisfied: Jinja2<4.0.0,>=3.1.6 in /home/holoscan/.local/lib/python3.12/site-packages (from holoscan-cli->monai-deploy-app-sdk==1.0.0+48.gfd5999e.dirty) (3.1.6)\n", + "#26 4.929 Requirement already satisfied: pip>25.1.0 in /home/holoscan/.local/lib/python3.12/site-packages (from holoscan-cli->monai-deploy-app-sdk==1.0.0+48.gfd5999e.dirty) (25.3)\n", + "#26 5.014 Collecting psutil<8.0,>=7.0.0 (from holoscan-cli->monai-deploy-app-sdk==1.0.0+48.gfd5999e.dirty)\n", + "#26 5.018 Downloading psutil-7.1.2-cp36-abi3-manylinux2010_x86_64.manylinux_2_12_x86_64.manylinux_2_28_x86_64.whl.metadata (23 kB)\n", + "#26 5.101 Collecting python-on-whales>=0.77.0 (from holoscan-cli->monai-deploy-app-sdk==1.0.0+48.gfd5999e.dirty)\n", + "#26 5.106 Downloading python_on_whales-0.79.0-py3-none-any.whl.metadata (18 kB)\n", + "#26 5.181 Collecting pyyaml<7.0,>=6.0 (from holoscan-cli->monai-deploy-app-sdk==1.0.0+48.gfd5999e.dirty)\n", + "#26 5.184 Downloading pyyaml-6.0.3-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl.metadata (2.4 kB)\n", + "#26 5.250 Collecting requests<3.0,>=2.32 (from holoscan-cli->monai-deploy-app-sdk==1.0.0+48.gfd5999e.dirty)\n", + "#26 5.255 Downloading requests-2.32.5-py3-none-any.whl.metadata (4.9 kB)\n", + "#26 5.274 Requirement already satisfied: MarkupSafe>=2.0 in /home/holoscan/.local/lib/python3.12/site-packages (from Jinja2<4.0.0,>=3.1.6->holoscan-cli->monai-deploy-app-sdk==1.0.0+48.gfd5999e.dirty) (3.0.3)\n", + "#26 5.376 Collecting charset_normalizer<4,>=2 (from requests<3.0,>=2.32->holoscan-cli->monai-deploy-app-sdk==1.0.0+48.gfd5999e.dirty)\n", + "#26 5.385 Downloading charset_normalizer-3.4.4-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl.metadata (37 kB)\n", + "#26 5.633 Collecting pydantic!=2.0.*,<3,>=2 (from python-on-whales>=0.77.0->holoscan-cli->monai-deploy-app-sdk==1.0.0+48.gfd5999e.dirty)\n", + "#26 5.637 Downloading pydantic-2.12.3-py3-none-any.whl.metadata (87 kB)\n", + "#26 5.682 Collecting annotated-types>=0.6.0 (from pydantic!=2.0.*,<3,>=2->python-on-whales>=0.77.0->holoscan-cli->monai-deploy-app-sdk==1.0.0+48.gfd5999e.dirty)\n", + "#26 5.687 Downloading annotated_types-0.7.0-py3-none-any.whl.metadata (15 kB)\n", + "#26 6.424 Collecting pydantic-core==2.41.4 (from pydantic!=2.0.*,<3,>=2->python-on-whales>=0.77.0->holoscan-cli->monai-deploy-app-sdk==1.0.0+48.gfd5999e.dirty)\n", + "#26 6.428 Downloading pydantic_core-2.41.4-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl.metadata (7.3 kB)\n", + "#26 6.469 Collecting typing-inspection>=0.4.2 (from pydantic!=2.0.*,<3,>=2->python-on-whales>=0.77.0->holoscan-cli->monai-deploy-app-sdk==1.0.0+48.gfd5999e.dirty)\n", + "#26 6.476 Downloading typing_inspection-0.4.2-py3-none-any.whl.metadata (2.6 kB)\n", + "#26 6.543 Collecting cloudpickle<4.0,>=3.0 (from holoscan-cu12->monai-deploy-app-sdk==1.0.0+48.gfd5999e.dirty)\n", + "#26 6.548 Downloading cloudpickle-3.1.1-py3-none-any.whl.metadata (7.1 kB)\n", + "#26 6.566 Requirement already satisfied: pillow>=11.2 in /home/holoscan/.local/lib/python3.12/site-packages (from holoscan-cu12->monai-deploy-app-sdk==1.0.0+48.gfd5999e.dirty) (12.0.0)\n", + "#26 6.610 Collecting cupy-cuda12x<14.0,>=12.2 (from holoscan-cu12->monai-deploy-app-sdk==1.0.0+48.gfd5999e.dirty)\n", + "#26 6.615 Downloading cupy_cuda12x-13.6.0-cp312-cp312-manylinux2014_x86_64.whl.metadata (2.4 kB)\n", + "#26 6.660 Collecting wheel-axle-runtime<1.0 (from holoscan-cu12->monai-deploy-app-sdk==1.0.0+48.gfd5999e.dirty)\n", + "#26 6.667 Downloading wheel_axle_runtime-0.0.7-py3-none-any.whl.metadata (8.3 kB)\n", + "#26 6.750 Collecting fastrlock>=0.5 (from cupy-cuda12x<14.0,>=12.2->holoscan-cu12->monai-deploy-app-sdk==1.0.0+48.gfd5999e.dirty)\n", + "#26 6.755 Downloading fastrlock-0.8.3-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_28_x86_64.whl.metadata (7.7 kB)\n", + "#26 6.776 Requirement already satisfied: filelock in /home/holoscan/.local/lib/python3.12/site-packages (from wheel-axle-runtime<1.0->holoscan-cu12->monai-deploy-app-sdk==1.0.0+48.gfd5999e.dirty) (3.20.0)\n", + "#26 6.894 Collecting setuptools>=75.8.2 (from zope.event->gevent->geventhttpclient>=2.3.3->tritonclient[all]>=2.53.0->monai-deploy-app-sdk==1.0.0+48.gfd5999e.dirty)\n", + "#26 6.898 Downloading setuptools-80.9.0-py3-none-any.whl.metadata (6.6 kB)\n", + "#26 6.929 Downloading colorama-0.4.6-py2.py3-none-any.whl (25 kB)\n", + "#26 6.953 Downloading tritonclient-2.61.0-py3-none-manylinux1_x86_64.whl (111 kB)\n", + "#26 6.977 Downloading python_rapidjson-1.22-cp312-cp312-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl (1.7 MB)\n", + "#26 7.022 ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 1.7/1.7 MB 38.0 MB/s 0:00:00\n", + "#26 7.029 Downloading aiohttp-3.13.2-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl (1.8 MB)\n", + "#26 7.077 ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 1.8/1.8 MB 36.1 MB/s 0:00:00\n", + "#26 7.084 Downloading grpcio-1.67.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (5.9 MB)\n", + "#26 7.271 ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 5.9/5.9 MB 31.3 MB/s 0:00:00\n", + "#26 7.280 Downloading multidict-6.7.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl (256 kB)\n", + "#26 7.310 Downloading protobuf-5.29.5-cp38-abi3-manylinux2014_x86_64.whl (319 kB)\n", + "#26 7.378 Downloading yarl-1.22.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl (377 kB)\n", + "#26 7.406 Downloading aiohappyeyeballs-2.6.1-py3-none-any.whl (15 kB)\n", + "#26 7.427 Downloading aiosignal-1.4.0-py3-none-any.whl (7.5 kB)\n", + "#26 7.451 Downloading attrs-25.4.0-py3-none-any.whl (67 kB)\n", + "#26 7.475 Downloading frozenlist-1.8.0-cp312-cp312-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl (242 kB)\n", + "#26 7.501 Downloading geventhttpclient-2.3.5-cp312-cp312-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl (114 kB)\n", + "#26 7.524 Downloading idna-3.11-py3-none-any.whl (71 kB)\n", + "#26 7.548 Downloading propcache-0.4.1-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl (221 kB)\n", + "#26 7.574 Downloading typeguard-4.4.4-py3-none-any.whl (34 kB)\n", + "#26 7.605 Downloading urllib3-2.5.0-py3-none-any.whl (129 kB)\n", + "#26 7.627 Downloading Brotli-1.1.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (2.9 MB)\n", + "#26 7.691 ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 2.9/2.9 MB 47.1 MB/s 0:00:00\n", + "#26 7.698 Downloading certifi-2025.10.5-py3-none-any.whl (163 kB)\n", + "#26 7.723 Downloading cuda_python-13.0.3-py3-none-any.whl (7.6 kB)\n", + "#26 7.750 Downloading cuda_bindings-13.0.3-cp312-cp312-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl (12.1 MB)\n", + "#26 7.970 ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 12.1/12.1 MB 55.6 MB/s 0:00:00\n", + "#26 7.976 Downloading cuda_pathfinder-1.3.2-py3-none-any.whl (27 kB)\n", + "#26 7.998 Downloading gevent-25.9.1-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl (2.1 MB)\n", + "#26 8.045 ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 2.1/2.1 MB 45.2 MB/s 0:00:00\n", + "#26 8.051 Downloading greenlet-3.2.4-cp312-cp312-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl (607 kB)\n", + "#26 8.079 ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 607.6/607.6 kB 15.3 MB/s 0:00:00\n", + "#26 8.087 Downloading holoscan_cli-3.7.0-py3-none-any.whl (77 kB)\n", + "#26 8.109 Downloading psutil-7.1.2-cp36-abi3-manylinux2010_x86_64.manylinux_2_12_x86_64.manylinux_2_28_x86_64.whl (258 kB)\n", + "#26 8.135 Downloading pyyaml-6.0.3-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl (807 kB)\n", + "#26 8.166 ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 807.9/807.9 kB 21.5 MB/s 0:00:00\n", + "#26 8.173 Downloading requests-2.32.5-py3-none-any.whl (64 kB)\n", + "#26 8.197 Downloading charset_normalizer-3.4.4-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl (153 kB)\n", + "#26 8.225 Downloading python_on_whales-0.79.0-py3-none-any.whl (118 kB)\n", + "#26 8.251 Downloading pydantic-2.12.3-py3-none-any.whl (462 kB)\n", + "#26 8.283 Downloading pydantic_core-2.41.4-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (2.1 MB)\n", + "#26 8.330 ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 2.1/2.1 MB 44.8 MB/s 0:00:00\n", + "#26 8.337 Downloading annotated_types-0.7.0-py3-none-any.whl (13 kB)\n", + "#26 8.359 Downloading typing_inspection-0.4.2-py3-none-any.whl (14 kB)\n", + "#26 8.381 Downloading holoscan_cu12-3.7.0-cp312-cp312-manylinux_2_35_x86_64.whl (40.7 MB)\n", + "#26 9.287 ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 40.7/40.7 MB 45.0 MB/s 0:00:00\n", + "#26 9.291 Downloading cloudpickle-3.1.1-py3-none-any.whl (20 kB)\n", + "#26 9.315 Downloading cupy_cuda12x-13.6.0-cp312-cp312-manylinux2014_x86_64.whl (112.9 MB)\n", + "#26 11.44 ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 112.9/112.9 MB 53.1 MB/s 0:00:02\n", + "#26 11.45 Downloading wheel_axle_runtime-0.0.7-py3-none-any.whl (14 kB)\n", + "#26 11.47 Downloading fastrlock-0.8.3-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_28_x86_64.whl (53 kB)\n", + "#26 11.49 Downloading perf_analyzer-2.59.1-py3-none-manylinux_2_38_x86_64.whl (7.2 MB)\n", + "#26 11.80 ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 7.2/7.2 MB 23.1 MB/s 0:00:00\n", + "#26 11.80 Downloading zope_event-6.0-py3-none-any.whl (6.4 kB)\n", + "#26 11.82 Downloading setuptools-80.9.0-py3-none-any.whl (1.2 MB)\n", + "#26 11.86 ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 1.2/1.2 MB 32.5 MB/s 0:00:00\n", + "#26 11.87 Downloading zope_interface-8.0.1-cp312-cp312-manylinux1_x86_64.manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_5_x86_64.whl (264 kB)\n", + "#26 12.51 Installing collected packages: perf-analyzer, fastrlock, brotli, zope.interface, wheel-axle-runtime, urllib3, typing-inspection, typeguard, setuptools, pyyaml, python-rapidjson, pydantic-core, psutil, protobuf, propcache, multidict, idna, grpcio, greenlet, frozenlist, cupy-cuda12x, cuda-pathfinder, colorama, cloudpickle, charset_normalizer, certifi, attrs, annotated-types, aiohappyeyeballs, zope.event, yarl, tritonclient, requests, pydantic, holoscan-cu12, cuda-bindings, aiosignal, python-on-whales, gevent, cuda-python, aiohttp, holoscan-cli, geventhttpclient, monai-deploy-app-sdk\n", + "#26 19.38 \n", + "#26 19.40 Successfully installed aiohappyeyeballs-2.6.1 aiohttp-3.13.2 aiosignal-1.4.0 annotated-types-0.7.0 attrs-25.4.0 brotli-1.1.0 certifi-2025.10.5 charset_normalizer-3.4.4 cloudpickle-3.1.1 colorama-0.4.6 cuda-bindings-13.0.3 cuda-pathfinder-1.3.2 cuda-python-13.0.3 cupy-cuda12x-13.6.0 fastrlock-0.8.3 frozenlist-1.8.0 gevent-25.9.1 geventhttpclient-2.3.5 greenlet-3.2.4 grpcio-1.67.1 holoscan-cli-3.7.0 holoscan-cu12-3.7.0 idna-3.11 monai-deploy-app-sdk-1.0.0+48.gfd5999e.dirty multidict-6.7.0 perf-analyzer-2.59.1 propcache-0.4.1 protobuf-5.29.5 psutil-7.1.2 pydantic-2.12.3 pydantic-core-2.41.4 python-on-whales-0.79.0 python-rapidjson-1.22 pyyaml-6.0.3 requests-2.32.5 setuptools-80.9.0 tritonclient-2.61.0 typeguard-4.4.4 typing-inspection-0.4.2 urllib3-2.5.0 wheel-axle-runtime-0.0.7 yarl-1.22.0 zope.event-6.0 zope.interface-8.0.1\n", + "#26 DONE 24.0s\n", + "\n", + "#27 [release 18/22] COPY ./models /opt/holoscan/models\n", + "#27 DONE 0.3s\n", + "\n", + "#28 [release 19/22] COPY ./map/app.json /etc/holoscan/app.json\n", + "#28 DONE 0.1s\n", + "\n", + "#29 [release 20/22] COPY ./app.config /var/holoscan/app.yaml\n", + "#29 DONE 0.1s\n", + "\n", + "#30 [release 21/22] COPY ./map/pkg.json /etc/holoscan/pkg.json\n", + "#30 DONE 0.0s\n", + "\n", + "#31 [release 22/22] COPY ./app /opt/holoscan/app\n", + "#31 DONE 0.1s\n", + "\n", + "#32 exporting to docker image format\n", + "#32 exporting layers\n", + "#32 exporting layers 221.5s done\n", + "#32 exporting manifest sha256:0a8e962a77fecbccb2355fea6e36ce945cc5ef31532adf41f2983330e352a38c 0.0s done\n", + "#32 exporting config sha256:091c1b0dabf9b14f31cd49c2099ae82793a38b59decac84678aebfb60bd1f2c0 0.0s done\n", + "#32 sending tarball\n", + "#32 ...\n", + "\n", + "#33 importing to docker\n", + "#33 loading layer 107cbdaeec04 327.68kB / 29.72MB\n", + "#33 loading layer 5b4d734793bb 65.54kB / 4.55MB\n", + "#33 loading layer d476e855ba3c 557.06kB / 105.07MB\n", + "#33 loading layer d476e855ba3c 47.35MB / 105.07MB 2.1s\n", + "#33 loading layer 67f0dd5c7f3f 186B / 186B\n", + "#33 loading layer 5a78bf589884 6.88kB / 6.88kB\n", + "#33 loading layer 4fc6b425ed47 557.06kB / 1.51GB\n", + "#33 loading layer 4fc6b425ed47 165.45MB / 1.51GB 2.1s\n", + "#33 loading layer 4fc6b425ed47 251.79MB / 1.51GB 4.1s\n", + "#33 loading layer 4fc6b425ed47 377.13MB / 1.51GB 6.1s\n", + "#33 loading layer 4fc6b425ed47 502.46MB / 1.51GB 8.1s\n", + "#33 loading layer 4fc6b425ed47 631.14MB / 1.51GB 10.2s\n", + "#33 loading layer 4fc6b425ed47 743.11MB / 1.51GB 12.2s\n", + "#33 loading layer 4fc6b425ed47 863.44MB / 1.51GB 14.3s\n", + "#33 loading layer 4fc6b425ed47 999.36MB / 1.51GB 16.3s\n", + "#33 loading layer 4fc6b425ed47 1.12GB / 1.51GB 18.3s\n", + "#33 loading layer 4fc6b425ed47 1.27GB / 1.51GB 20.4s\n", + "#33 loading layer 4fc6b425ed47 1.39GB / 1.51GB 22.5s\n", + "#33 loading layer 905bb691d828 32.77kB / 59.61kB\n", + "#33 loading layer 5e896433c89f 1.68kB / 1.68kB\n", + "#33 loading layer d9d614cc706c 1.52kB / 1.52kB\n", + "#33 loading layer 2e82ae164bf2 32.77kB / 2.58MB\n", + "#33 loading layer d2a2631ea504 200B / 200B\n", + "#33 loading layer c3812e2eef35 425.98kB / 40.21MB\n", + "#33 loading layer 7ae752670ba9 229.38kB / 20.86MB\n", + "#33 loading layer b1dd5d5e6dd7 1.30kB / 1.30kB\n", + "#33 loading layer 12bc547cb353 612B / 612B\n", + "#33 loading layer 0ced5c112476 3.37kB / 3.37kB\n", + "#33 loading layer 35d0c6af7304 156B / 156B\n", + "#33 loading layer 5f70bf18a086 32B / 32B\n", + "#33 loading layer 1a70ec7b5f95 3.60kB / 3.60kB\n", + "#33 loading layer 7883affb98a9 3.60kB / 3.60kB\n", + "#33 loading layer 232470ddca8c 197B / 197B\n", + "#33 loading layer 15de3fa4006e 252B / 252B\n", + "#33 loading layer 89c05ac10d6a 65.54kB / 5.38MB\n", + "#33 loading layer 263e6f285311 557.06kB / 4.23GB\n", + "#33 loading layer 263e6f285311 84.67MB / 4.23GB 6.2s\n", + "#33 loading layer 263e6f285311 350.39MB / 4.23GB 12.3s\n", + "#33 loading layer 263e6f285311 592.71MB / 4.23GB 16.3s\n", + "#33 loading layer 263e6f285311 867.34MB / 4.23GB 22.5s\n", + "#33 loading layer 263e6f285311 1.08GB / 4.23GB 26.7s\n", + "#33 loading layer 263e6f285311 1.32GB / 4.23GB 30.9s\n", + "#33 loading layer 263e6f285311 1.54GB / 4.23GB 34.9s\n", + "#33 loading layer 263e6f285311 1.85GB / 4.23GB 41.1s\n", + "#33 loading layer 263e6f285311 2.06GB / 4.23GB 45.2s\n", + "#33 loading layer 263e6f285311 2.31GB / 4.23GB 49.3s\n", + "#33 loading layer 263e6f285311 2.63GB / 4.23GB 55.5s\n", + "#33 loading layer 263e6f285311 2.88GB / 4.23GB 59.7s\n", + "#33 loading layer 263e6f285311 3.13GB / 4.23GB 65.8s\n", + "#33 loading layer 263e6f285311 3.16GB / 4.23GB 71.5s\n", + "#33 loading layer 263e6f285311 3.27GB / 4.23GB 77.7s\n", + "#33 loading layer 263e6f285311 3.58GB / 4.23GB 83.9s\n", + "#33 loading layer 263e6f285311 3.79GB / 4.23GB 87.9s\n", + "#33 loading layer 263e6f285311 4.06GB / 4.23GB 94.2s\n", + "#33 loading layer 263e6f285311 4.20GB / 4.23GB 100.3s\n", + "#33 loading layer 0cf1cce37748 32.77kB / 141.77kB\n", + "#33 loading layer afd31ee4b88a 557.06kB / 405.66MB\n", + "#33 loading layer afd31ee4b88a 35.65MB / 405.66MB 2.0s\n", + "#33 loading layer afd31ee4b88a 76.87MB / 405.66MB 4.1s\n", + "#33 loading layer afd31ee4b88a 113.64MB / 405.66MB 6.2s\n", + "#33 loading layer afd31ee4b88a 174.36MB / 405.66MB 8.2s\n", + "#33 loading layer afd31ee4b88a 217.81MB / 405.66MB 10.2s\n", + "#33 loading layer afd31ee4b88a 258.47MB / 405.66MB 12.3s\n", + "#33 loading layer afd31ee4b88a 335.35MB / 405.66MB 14.3s\n", + "#33 loading layer afd31ee4b88a 384.37MB / 405.66MB 16.4s\n", + "#33 loading layer 72b5457169d1 196.61kB / 17.81MB\n", + "#33 loading layer 365947273ac7 483B / 483B\n", + "#33 loading layer 8b08f0cd86e6 311B / 311B\n", + "#33 loading layer 724079108c2a 298B / 298B\n", + "#33 loading layer 7745de7f2569 3.90kB / 3.90kB\n", + "#33 loading layer 2e82ae164bf2 2.58MB / 2.58MB 127.8s done\n", + "#33 loading layer 107cbdaeec04 29.72MB / 29.72MB 164.2s done\n", + "#33 loading layer 5b4d734793bb 4.55MB / 4.55MB 162.2s done\n", + "#33 loading layer d476e855ba3c 105.07MB / 105.07MB 161.6s done\n", + "#33 loading layer 67f0dd5c7f3f 186B / 186B 157.4s done\n", + "#33 loading layer 5a78bf589884 6.88kB / 6.88kB 156.7s done\n", + "#33 loading layer 4fc6b425ed47 1.51GB / 1.51GB 155.8s done\n", + "#33 loading layer 905bb691d828 59.61kB / 59.61kB 129.5s done\n", + "#33 loading layer 5e896433c89f 1.68kB / 1.68kB 128.9s done\n", + "#33 loading layer d9d614cc706c 1.52kB / 1.52kB 128.4s done\n", + "#33 loading layer d2a2631ea504 200B / 200B 127.3s done\n", + "#33 loading layer c3812e2eef35 40.21MB / 40.21MB 126.5s done\n", + "#33 loading layer 7ae752670ba9 20.86MB / 20.86MB 125.2s done\n", + "#33 loading layer b1dd5d5e6dd7 1.30kB / 1.30kB 123.1s done\n", + "#33 loading layer 12bc547cb353 612B / 612B 123.1s done\n", + "#33 loading layer 0ced5c112476 3.37kB / 3.37kB 123.0s done\n", + "#33 loading layer 35d0c6af7304 156B / 156B 122.9s done\n", + "#33 loading layer 5f70bf18a086 32B / 32B 122.9s done\n", + "#33 loading layer 1a70ec7b5f95 3.60kB / 3.60kB 122.8s done\n", + "#33 loading layer 7883affb98a9 3.60kB / 3.60kB 122.8s done\n", + "#33 loading layer 232470ddca8c 197B / 197B 122.7s done\n", + "#33 loading layer 15de3fa4006e 252B / 252B 122.6s done\n", + "#33 loading layer 89c05ac10d6a 5.38MB / 5.38MB 122.5s done\n", + "#33 loading layer 263e6f285311 4.23GB / 4.23GB 121.9s done\n", + "#33 loading layer 0cf1cce37748 141.77kB / 141.77kB 19.6s done\n", + "#33 loading layer afd31ee4b88a 405.66MB / 405.66MB 19.1s done\n", + "#33 loading layer 72b5457169d1 17.81MB / 17.81MB 0.9s done\n", + "#33 loading layer 365947273ac7 483B / 483B 0.7s done\n", + "#33 loading layer 8b08f0cd86e6 311B / 311B 0.6s done\n", + "#33 loading layer 724079108c2a 298B / 298B 0.5s done\n", + "#33 loading layer 7745de7f2569 3.90kB / 3.90kB 0.5s done\n", + "#33 DONE 164.2s\n", + "\n", + "#32 exporting to docker image format\n", + "#32 sending tarball 226.2s done\n", + "#32 DONE 447.7s\n", + "\n", + "#34 exporting cache to client directory\n", + "#34 preparing build cache for export\n", + "#34 writing layer sha256:01dbb24602a6492bfd4e66a528f13460639f4379641f6d5d9a5764fd8b929a33\n", + "#34 writing layer sha256:01dbb24602a6492bfd4e66a528f13460639f4379641f6d5d9a5764fd8b929a33 0.1s done\n", + "#34 writing layer sha256:056e5fea9ff576db67854c0f98fa42ec5b27b7a084c4185a4f4e7637445640df\n", + "#34 writing layer sha256:056e5fea9ff576db67854c0f98fa42ec5b27b7a084c4185a4f4e7637445640df 9.0s done\n", + "#34 writing layer sha256:07570fd93aa2f5551f918cd92030d4f6f49e9c891d25901554c80136a4455b63\n", + "#34 writing layer sha256:07570fd93aa2f5551f918cd92030d4f6f49e9c891d25901554c80136a4455b63 0.5s done\n", + "#34 writing layer sha256:0acb0bb33f9956b78fbfc026a81d9f3fbcf52f6c3c51ed7ff503b2f5db52d651\n", + "#34 writing layer sha256:0acb0bb33f9956b78fbfc026a81d9f3fbcf52f6c3c51ed7ff503b2f5db52d651 2.6s done\n", + "#34 writing layer sha256:13e8f87efde86df96bfe73da211eb196d0416702b69d92947ec617138e6db64b\n", + "#34 writing layer sha256:13e8f87efde86df96bfe73da211eb196d0416702b69d92947ec617138e6db64b 0.0s done\n", + "#34 writing layer sha256:187325367188ecee39028e06ba38d4643146198b0c73ed22276a3a95c6d5a056 0.0s done\n", + "#34 writing layer sha256:1ba07b1309cf3cbf6f4649e357d9a21e94039b6100973ef20599eb4a11a8b338\n", + "#34 writing layer sha256:1ba07b1309cf3cbf6f4649e357d9a21e94039b6100973ef20599eb4a11a8b338 28.9s done\n", + "#34 writing layer sha256:1d0d0ae70c2710c2734923b3fa133474464425b01045082fb3aa0a48ea6c8817\n", + "#34 writing layer sha256:1d0d0ae70c2710c2734923b3fa133474464425b01045082fb3aa0a48ea6c8817 78.8s done\n", + "#34 writing layer sha256:32f112e3802cadcab3543160f4d2aa607b3cc1c62140d57b4f5441384f40e927\n", + "#34 writing layer sha256:32f112e3802cadcab3543160f4d2aa607b3cc1c62140d57b4f5441384f40e927 0.7s done\n", + "#34 writing layer sha256:350975150fe900917041cf4f051e534aa21b2e19a4117e7f5466605130aea18e\n", + "#34 writing layer sha256:350975150fe900917041cf4f051e534aa21b2e19a4117e7f5466605130aea18e 0.0s done\n", + "#34 writing layer sha256:3cbe008dab298e1242e9a82e7095428a4063a7938cd244b877a8fe6e2a926803 0.0s done\n", + "#34 writing layer sha256:43846f52d4e77633dd2172b60046ec25ccec47d34a5e2ad7a4dcf1c4d9d2c188 0.0s done\n", + "#34 writing layer sha256:492db7b3e492442f7a1ad30fea534f61ad89da451c675ccab2488e41034d0886 0.0s done\n", + "#34 writing layer sha256:4f4fb700ef54461cfa02571ae0db9a0dc1e0cdb5577484a6d75e68dc38e8acc1\n", + "#34 writing layer sha256:4f4fb700ef54461cfa02571ae0db9a0dc1e0cdb5577484a6d75e68dc38e8acc1 done\n", + "#34 writing layer sha256:5aafa836c821b153fce2c5357ac5601ce0f4195675ed0aeea8f611b3b9cd7f27 0.1s done\n", + "#34 writing layer sha256:69cf5c7e491e16af81cfc9508eb14f87019049bf3341b20b819a8dcd70b78a8d 0.0s done\n", + "#34 writing layer sha256:6ba06822853bad16ce86072266460ad2363e31641ff5e85be04803787a8aa695\n", + "#34 writing layer sha256:6ba06822853bad16ce86072266460ad2363e31641ff5e85be04803787a8aa695 0.0s done\n", + "#34 writing layer sha256:74ff783cfc1660e3f3e1e81075a25de97b07fad907ca77509ba507ea1d5edf2e 0.0s done\n", + "#34 writing layer sha256:81a64790a2ca84cdc6b783fe2af8820ae968d692b68518541b48d405a730fad8 0.0s done\n", + "#34 writing layer sha256:84fef9f1ca4f21e9c7411db3c57fe91a1f401d7051d87a3bfed97ff70a2cf72c 0.0s done\n", + "#34 writing layer sha256:910884cfc2f77acaf46c98cd83f883dc31c12a54e3b54a3fceb7a71af0ae890e\n", + "#34 writing layer sha256:910884cfc2f77acaf46c98cd83f883dc31c12a54e3b54a3fceb7a71af0ae890e 0.9s done\n", + "#34 writing layer sha256:932162d4fcf6e1094ee1544e8fde0ae2a02b2c4e9545f64f373ce3a4479189e6\n", + "#34 writing layer sha256:932162d4fcf6e1094ee1544e8fde0ae2a02b2c4e9545f64f373ce3a4479189e6 0.0s done\n", + "#34 writing layer sha256:96bf0dfae02f8c2f168036610f3d5c148096280e7a5b8d3cf50d454f5b2d5b67 0.0s done\n", + "#34 writing layer sha256:9b92275482332aca5b3ab10c2996c40fa2e69fdbb8b2c6f8f38ec3f919ca41e7\n", + "#34 writing layer sha256:9b92275482332aca5b3ab10c2996c40fa2e69fdbb8b2c6f8f38ec3f919ca41e7 0.4s done\n", + "#34 writing layer sha256:9c9b39ad83d512d5af47e9c22f4458cb586f05ea478656a372c5e739cb7280e5\n", + "#34 writing layer sha256:9c9b39ad83d512d5af47e9c22f4458cb586f05ea478656a372c5e739cb7280e5 0.1s done\n", + "#34 writing layer sha256:a05e2a2b3985056fa77853b6928b3370ccad98c8497462777f5d434477f63df6\n", + "#34 writing layer sha256:a05e2a2b3985056fa77853b6928b3370ccad98c8497462777f5d434477f63df6 0.0s done\n", + "#34 writing layer sha256:a1873c23f4f8b5ce90ce14f597e2d9cb97ae53621e8a59cbe73febca4f098653 0.0s done\n", + "#34 writing layer sha256:d275366bd0805885c41fbe61b6361f7bbc2f0848c4305e19acd2d6c32d1a0a3e\n", + "#34 writing layer sha256:d275366bd0805885c41fbe61b6361f7bbc2f0848c4305e19acd2d6c32d1a0a3e 0.5s done\n", + "#34 writing layer sha256:d561590878ab411042992f7e4568bc267aa23391d3869a79bd4bf0e52f877254\n", + "#34 preparing build cache for export 123.4s done\n", + "#34 writing layer sha256:d561590878ab411042992f7e4568bc267aa23391d3869a79bd4bf0e52f877254 0.0s done\n", + "#34 writing layer sha256:ddc61996788ff6833bbe82138d6fc5000e848953b90df5055cbae21479218914 0.0s done\n", + "#34 writing layer sha256:f13cbeb7d0a63570c41d3556d86c99fd936dc9cfecd19aaf79987386928db773 0.0s done\n", + "#34 writing config sha256:d32f6b23fadf73ef78f9192fbe3cfcf27a3e823e04973f14d926160b33a97252 0.0s done\n", + "#34 writing cache manifest sha256:58127de9bdb08ce0994a9eb1dd8a7da5295a88147f7ebd7e2902bba12b9b5052 0.0s done\n", + "#34 DONE 123.4s\n", + "[2025-10-30 12:24:15,847] [INFO] (packager) - Build Summary:\n", + "\n", + "Platform: x64-workstation/dgpu\n", + " Status: Succeeded\n", + " Docker Tag: my_app-x64-workstation-dgpu-linux-amd64:1.0\n", + " Tarball: None\n" ] } ], @@ -1559,7 +2807,7 @@ "name": "stdout", "output_type": "stream", "text": [ - "my_app-x64-workstation-dgpu-linux-amd64 1.0 cfc03ea74fd7 3 hours ago 11.5GB\n" + "my_app-x64-workstation-dgpu-linux-amd64 1.0 091c1b0dabf9 9 minutes ago 11GB\n" ] } ], @@ -1587,23 +2835,23 @@ "text": [ "output\n", "dcm\n", - "[2025-09-30 18:30:02,980] [INFO] (runner) - Checking dependencies...\n", - "[2025-09-30 18:30:02,980] [INFO] (runner) - --> Verifying if \"docker\" is installed...\n", + "[2025-10-30 12:24:18,309] [INFO] (runner) - Checking dependencies...\n", + "[2025-10-30 12:24:18,309] [INFO] (runner) - --> Verifying if \"docker\" is installed...\n", "\n", - "[2025-09-30 18:30:02,980] [INFO] (runner) - --> Verifying if \"docker-buildx\" is installed...\n", + "[2025-10-30 12:24:18,309] [INFO] (runner) - --> Verifying if \"docker-buildx\" is installed...\n", "\n", - "[2025-09-30 18:30:02,980] [INFO] (runner) - --> Verifying if \"my_app-x64-workstation-dgpu-linux-amd64:1.0\" is available...\n", + "[2025-10-30 12:24:18,310] [INFO] (runner) - --> Verifying if \"my_app-x64-workstation-dgpu-linux-amd64:1.0\" is available...\n", "\n", - "[2025-09-30 18:30:03,070] [INFO] (runner) - Reading HAP/MAP manifest...\n", - "Successfully copied 2.56kB to /tmp/tmpjksfr5pj/app.json\n", - "Successfully copied 2.05kB to /tmp/tmpjksfr5pj/pkg.json\n", - "3cfb3be292dc85cd2f35ccae1b853a86561ca60f369886ff6fdad88c5d6366e6\n", - "[2025-09-30 18:30:03,608] [INFO] (runner) - --> Verifying if \"nvidia-ctk\" is installed...\n", + "[2025-10-30 12:24:18,430] [INFO] (runner) - Reading HAP/MAP manifest...\n", + "Successfully copied 2.56kB to /tmp/tmpo9i84mva/app.json\n", + "Successfully copied 2.05kB to /tmp/tmpo9i84mva/pkg.json\n", + "7de2dd97da25b2da6ec714dc954b9bdd07b00af669c9f14b58c0605681673d11\n", + "[2025-10-30 12:24:19,131] [INFO] (runner) - --> Verifying if \"nvidia-ctk\" is installed...\n", "\n", - "[2025-09-30 18:30:03,608] [INFO] (runner) - --> Verifying \"nvidia-ctk\" version...\n", + "[2025-10-30 12:24:19,132] [INFO] (runner) - --> Verifying \"nvidia-ctk\" version...\n", "\n", - "[2025-09-30 18:30:04,004] [INFO] (common) - Launching container (6f2868245795) using image 'my_app-x64-workstation-dgpu-linux-amd64:1.0'...\n", - " container name: agitated_fermat\n", + "[2025-10-30 12:24:19,830] [INFO] (common) - Launching container (68cbb8952837) using image 'my_app-x64-workstation-dgpu-linux-amd64:1.0'...\n", + " container name: agitated_wozniak\n", " host name: mingq-dt\n", " network: host\n", " user: 1000:1000\n", @@ -1613,7 +2861,7 @@ " shared memory size: 67108864\n", " devices: \n", " group_add: 44\n", - "2025-10-01 01:30:05 [INFO] Launching application python3 /opt/holoscan/app ...\n", + "2025-10-30 19:24:20 [INFO] Launching application python3 /opt/holoscan/app ...\n", "\n", "/home/holoscan/.local/lib/python3.12/site-packages/monai/deploy/utils/importutil.py:20: UserWarning: pkg_resources is deprecated as an API. See https://setuptools.pypa.io/en/latest/pkg_resources.html. The pkg_resources package is slated for removal as early as 2025-11-30. Refrain from using this package or pin to Setuptools<81.\n", "\n", @@ -1623,11 +2871,11 @@ "\n", "[info] [gxf_executor.cpp:344] Creating context\n", "\n", - "[2025-10-01 01:30:11,177] [INFO] (root) - Parsed args: Namespace(log_level=None, input=None, output=None, model=None, workdir=None, triton_server_netloc=None, argv=['/opt/holoscan/app'])\n", + "[2025-10-30 19:24:24,662] [INFO] (root) - Parsed args: Namespace(log_level=None, input=None, output=None, model=None, workdir=None, triton_server_netloc=None, argv=['/opt/holoscan/app'])\n", "\n", - "[2025-10-01 01:30:11,184] [INFO] (root) - AppContext object: AppContext(input_path=/var/holoscan/input, output_path=/var/holoscan/output, model_path=/opt/holoscan/models, workdir=/var/holoscan), triton_server_netloc=\n", + "[2025-10-30 19:24:24,664] [INFO] (root) - AppContext object: AppContext(input_path=/var/holoscan/input, output_path=/var/holoscan/output, model_path=/opt/holoscan/models, workdir=/var/holoscan), triton_server_netloc=\n", "\n", - "[2025-10-01 01:30:11,184] [INFO] (app.AISpleenSegApp) - App input and output path: /var/holoscan/input, /var/holoscan/output\n", + "[2025-10-30 19:24:24,664] [INFO] (app.AISpleenSegApp) - App input and output path: /var/holoscan/input, /var/holoscan/output\n", "\n", "[info] [gxf_executor.cpp:2508] Activating Graph...\n", "\n", @@ -1637,75 +2885,75 @@ "\n", "[info] [greedy_scheduler.cpp:191] Scheduling 5 entities\n", "\n", - "[2025-10-01 01:30:11,339] [INFO] (monai.deploy.operators.dicom_data_loader_operator.DICOMDataLoaderOperator) - No or invalid input path from the optional input port: None\n", + "[2025-10-30 19:24:24,818] [INFO] (monai.deploy.operators.dicom_data_loader_operator.DICOMDataLoaderOperator) - No or invalid input path from the optional input port: None\n", "\n", - "[2025-10-01 01:30:11,663] [INFO] (root) - Finding series for Selection named: CT Series\n", + "[2025-10-30 19:24:25,629] [INFO] (root) - Finding series for Selection named: CT Series\n", "\n", - "[2025-10-01 01:30:11,663] [INFO] (root) - Searching study, : 1.3.6.1.4.1.14519.5.2.1.7085.2626.822645453932810382886582736291\n", + "[2025-10-30 19:24:25,629] [INFO] (root) - Searching study, : 1.3.6.1.4.1.14519.5.2.1.7085.2626.822645453932810382886582736291\n", "\n", " # of series: 1\n", "\n", - "[2025-10-01 01:30:11,663] [INFO] (root) - Working on series, instance UID: 1.3.6.1.4.1.14519.5.2.1.7085.2626.119403521930927333027265674239\n", + "[2025-10-30 19:24:25,629] [INFO] (root) - Working on series, instance UID: 1.3.6.1.4.1.14519.5.2.1.7085.2626.119403521930927333027265674239\n", "\n", - "[2025-10-01 01:30:11,663] [INFO] (root) - On attribute: 'StudyDescription' to match value: '(.*?)'\n", + "[2025-10-30 19:24:25,629] [INFO] (root) - On attribute: 'StudyDescription' to match value: '(.*?)'\n", "\n", - "[2025-10-01 01:30:11,663] [INFO] (root) - Series attribute StudyDescription value: CT ABDOMEN W IV CONTRAST\n", + "[2025-10-30 19:24:25,629] [INFO] (root) - Series attribute StudyDescription value: CT ABDOMEN W IV CONTRAST\n", "\n", - "[2025-10-01 01:30:11,663] [INFO] (root) - On attribute: 'Modality' to match value: '(?i)CT'\n", + "[2025-10-30 19:24:25,630] [INFO] (root) - On attribute: 'Modality' to match value: '(?i)CT'\n", "\n", - "[2025-10-01 01:30:11,663] [INFO] (root) - Series attribute Modality value: CT\n", + "[2025-10-30 19:24:25,630] [INFO] (root) - Series attribute Modality value: CT\n", "\n", - "[2025-10-01 01:30:11,664] [INFO] (root) - On attribute: 'SeriesDescription' to match value: '(.*?)'\n", + "[2025-10-30 19:24:25,630] [INFO] (root) - On attribute: 'SeriesDescription' to match value: '(.*?)'\n", "\n", - "[2025-10-01 01:30:11,664] [INFO] (root) - Series attribute SeriesDescription value: ABD/PANC 3.0 B31f\n", + "[2025-10-30 19:24:25,630] [INFO] (root) - Series attribute SeriesDescription value: ABD/PANC 3.0 B31f\n", "\n", - "[2025-10-01 01:30:11,664] [INFO] (root) - On attribute: 'ImageType' to match value: ['PRIMARY', 'ORIGINAL']\n", + "[2025-10-30 19:24:25,630] [INFO] (root) - On attribute: 'ImageType' to match value: ['PRIMARY', 'ORIGINAL']\n", "\n", - "[2025-10-01 01:30:11,664] [INFO] (root) - Series attribute ImageType value: None\n", + "[2025-10-30 19:24:25,630] [INFO] (root) - Series attribute ImageType value: None\n", "\n", - "[2025-10-01 01:30:11,664] [INFO] (root) - Instance level attribute ImageType value: [\"['ORIGINAL', 'PRIMARY', 'AXIAL', 'CT_SOM5 SPI']\"]\n", + "[2025-10-30 19:24:25,630] [INFO] (root) - Instance level attribute ImageType value: [\"['ORIGINAL', 'PRIMARY', 'AXIAL', 'CT_SOM5 SPI']\"]\n", "\n", - "[2025-10-01 01:30:11,664] [INFO] (root) - Selected Series, UID: 1.3.6.1.4.1.14519.5.2.1.7085.2626.119403521930927333027265674239\n", + "[2025-10-30 19:24:25,630] [INFO] (root) - Selected Series, UID: 1.3.6.1.4.1.14519.5.2.1.7085.2626.119403521930927333027265674239\n", "\n", - "[2025-10-01 01:30:11,664] [INFO] (root) - Series Selection finalized\n", + "[2025-10-30 19:24:25,630] [INFO] (root) - Series Selection finalized\n", "\n", - "[2025-10-01 01:30:11,664] [INFO] (root) - Series Description of selected DICOM Series for inference: ABD/PANC 3.0 B31f\n", + "[2025-10-30 19:24:25,630] [INFO] (root) - Series Description of selected DICOM Series for inference: ABD/PANC 3.0 B31f\n", "\n", - "[2025-10-01 01:30:11,664] [INFO] (root) - Series Instance UID of selected DICOM Series for inference: 1.3.6.1.4.1.14519.5.2.1.7085.2626.119403521930927333027265674239\n", + "[2025-10-30 19:24:25,630] [INFO] (root) - Series Instance UID of selected DICOM Series for inference: 1.3.6.1.4.1.14519.5.2.1.7085.2626.119403521930927333027265674239\n", "\n", "/home/holoscan/.local/lib/python3.12/site-packages/monai/utils/deprecate_utils.py:321: FutureWarning: monai.transforms.spatial.dictionary Orientationd.__init__:labels: Current default value of argument `labels=(('L', 'R'), ('P', 'A'), ('I', 'S'))` was changed in version None from `labels=(('L', 'R'), ('P', 'A'), ('I', 'S'))` to `labels=None`. Default value changed to None meaning that the transform now uses the 'space' of a meta-tensor, if applicable, to determine appropriate axis labels.\n", "\n", " warn_deprecated(argname, msg, warning_category)\n", "\n", - "[2025-10-01 01:30:12,202] [INFO] (monai.deploy.operators.monai_seg_inference_operator.MonaiSegInferenceOperator) - Converted Image object metadata:\n", + "[2025-10-30 19:24:25,966] [INFO] (monai.deploy.operators.monai_seg_inference_operator.MonaiSegInferenceOperator) - Converted Image object metadata:\n", "\n", - "[2025-10-01 01:30:12,202] [INFO] (monai.deploy.operators.monai_seg_inference_operator.MonaiSegInferenceOperator) - SeriesInstanceUID: 1.3.6.1.4.1.14519.5.2.1.7085.2626.119403521930927333027265674239, type \n", + "[2025-10-30 19:24:25,966] [INFO] (monai.deploy.operators.monai_seg_inference_operator.MonaiSegInferenceOperator) - SeriesInstanceUID: 1.3.6.1.4.1.14519.5.2.1.7085.2626.119403521930927333027265674239, type \n", "\n", - "[2025-10-01 01:30:12,202] [INFO] (monai.deploy.operators.monai_seg_inference_operator.MonaiSegInferenceOperator) - SeriesDate: 20090831, type \n", + "[2025-10-30 19:24:25,966] [INFO] (monai.deploy.operators.monai_seg_inference_operator.MonaiSegInferenceOperator) - SeriesDate: 20090831, type \n", "\n", - "[2025-10-01 01:30:12,202] [INFO] (monai.deploy.operators.monai_seg_inference_operator.MonaiSegInferenceOperator) - SeriesTime: 101721.452, type \n", + "[2025-10-30 19:24:25,966] [INFO] (monai.deploy.operators.monai_seg_inference_operator.MonaiSegInferenceOperator) - SeriesTime: 101721.452, type \n", "\n", - "[2025-10-01 01:30:12,202] [INFO] (monai.deploy.operators.monai_seg_inference_operator.MonaiSegInferenceOperator) - Modality: CT, type \n", + "[2025-10-30 19:24:25,966] [INFO] (monai.deploy.operators.monai_seg_inference_operator.MonaiSegInferenceOperator) - Modality: CT, type \n", "\n", - "[2025-10-01 01:30:12,202] [INFO] (monai.deploy.operators.monai_seg_inference_operator.MonaiSegInferenceOperator) - SeriesDescription: ABD/PANC 3.0 B31f, type \n", + "[2025-10-30 19:24:25,966] [INFO] (monai.deploy.operators.monai_seg_inference_operator.MonaiSegInferenceOperator) - SeriesDescription: ABD/PANC 3.0 B31f, type \n", "\n", - "[2025-10-01 01:30:12,202] [INFO] (monai.deploy.operators.monai_seg_inference_operator.MonaiSegInferenceOperator) - PatientPosition: HFS, type \n", + "[2025-10-30 19:24:25,966] [INFO] (monai.deploy.operators.monai_seg_inference_operator.MonaiSegInferenceOperator) - PatientPosition: HFS, type \n", "\n", - "[2025-10-01 01:30:12,202] [INFO] (monai.deploy.operators.monai_seg_inference_operator.MonaiSegInferenceOperator) - SeriesNumber: 8, type \n", + "[2025-10-30 19:24:25,966] [INFO] (monai.deploy.operators.monai_seg_inference_operator.MonaiSegInferenceOperator) - SeriesNumber: 8, type \n", "\n", - "[2025-10-01 01:30:12,202] [INFO] (monai.deploy.operators.monai_seg_inference_operator.MonaiSegInferenceOperator) - row_pixel_spacing: 0.7890625, type \n", + "[2025-10-30 19:24:25,966] [INFO] (monai.deploy.operators.monai_seg_inference_operator.MonaiSegInferenceOperator) - row_pixel_spacing: 0.7890625, type \n", "\n", - "[2025-10-01 01:30:12,202] [INFO] (monai.deploy.operators.monai_seg_inference_operator.MonaiSegInferenceOperator) - col_pixel_spacing: 0.7890625, type \n", + "[2025-10-30 19:24:25,967] [INFO] (monai.deploy.operators.monai_seg_inference_operator.MonaiSegInferenceOperator) - col_pixel_spacing: 0.7890625, type \n", "\n", - "[2025-10-01 01:30:12,202] [INFO] (monai.deploy.operators.monai_seg_inference_operator.MonaiSegInferenceOperator) - depth_pixel_spacing: 1.5, type \n", + "[2025-10-30 19:24:25,967] [INFO] (monai.deploy.operators.monai_seg_inference_operator.MonaiSegInferenceOperator) - depth_pixel_spacing: 1.5, type \n", "\n", - "[2025-10-01 01:30:12,202] [INFO] (monai.deploy.operators.monai_seg_inference_operator.MonaiSegInferenceOperator) - row_direction_cosine: [1.0, 0.0, 0.0], type \n", + "[2025-10-30 19:24:25,967] [INFO] (monai.deploy.operators.monai_seg_inference_operator.MonaiSegInferenceOperator) - row_direction_cosine: [1.0, 0.0, 0.0], type \n", "\n", - "[2025-10-01 01:30:12,202] [INFO] (monai.deploy.operators.monai_seg_inference_operator.MonaiSegInferenceOperator) - col_direction_cosine: [0.0, 1.0, 0.0], type \n", + "[2025-10-30 19:24:25,967] [INFO] (monai.deploy.operators.monai_seg_inference_operator.MonaiSegInferenceOperator) - col_direction_cosine: [0.0, 1.0, 0.0], type \n", "\n", - "[2025-10-01 01:30:12,202] [INFO] (monai.deploy.operators.monai_seg_inference_operator.MonaiSegInferenceOperator) - depth_direction_cosine: [0.0, 0.0, 1.0], type \n", + "[2025-10-30 19:24:25,967] [INFO] (monai.deploy.operators.monai_seg_inference_operator.MonaiSegInferenceOperator) - depth_direction_cosine: [0.0, 0.0, 1.0], type \n", "\n", - "[2025-10-01 01:30:12,203] [INFO] (monai.deploy.operators.monai_seg_inference_operator.MonaiSegInferenceOperator) - dicom_affine_transform: [[ 0.7890625 0. 0. -197.60547 ]\n", + "[2025-10-30 19:24:25,967] [INFO] (monai.deploy.operators.monai_seg_inference_operator.MonaiSegInferenceOperator) - dicom_affine_transform: [[ 0.7890625 0. 0. -197.60547 ]\n", "\n", " [ 0. 0.7890625 0. -398.60547 ]\n", "\n", @@ -1713,7 +2961,7 @@ "\n", " [ 0. 0. 0. 1. ]], type \n", "\n", - "[2025-10-01 01:30:12,203] [INFO] (monai.deploy.operators.monai_seg_inference_operator.MonaiSegInferenceOperator) - nifti_affine_transform: [[ -0.7890625 -0. -0. 197.60547 ]\n", + "[2025-10-30 19:24:25,968] [INFO] (monai.deploy.operators.monai_seg_inference_operator.MonaiSegInferenceOperator) - nifti_affine_transform: [[ -0.7890625 -0. -0. 197.60547 ]\n", "\n", " [ -0. -0.7890625 -0. 398.60547 ]\n", "\n", @@ -1721,67 +2969,75 @@ "\n", " [ 0. 0. 0. 1. ]], type \n", "\n", - "[2025-10-01 01:30:12,204] [INFO] (monai.deploy.operators.monai_seg_inference_operator.MonaiSegInferenceOperator) - StudyInstanceUID: 1.3.6.1.4.1.14519.5.2.1.7085.2626.822645453932810382886582736291, type \n", + "[2025-10-30 19:24:25,968] [INFO] (monai.deploy.operators.monai_seg_inference_operator.MonaiSegInferenceOperator) - StudyInstanceUID: 1.3.6.1.4.1.14519.5.2.1.7085.2626.822645453932810382886582736291, type \n", + "\n", + "[2025-10-30 19:24:25,968] [INFO] (monai.deploy.operators.monai_seg_inference_operator.MonaiSegInferenceOperator) - StudyID: , type \n", + "\n", + "[2025-10-30 19:24:25,968] [INFO] (monai.deploy.operators.monai_seg_inference_operator.MonaiSegInferenceOperator) - StudyDate: 20090831, type \n", "\n", - "[2025-10-01 01:30:12,204] [INFO] (monai.deploy.operators.monai_seg_inference_operator.MonaiSegInferenceOperator) - StudyID: , type \n", + "[2025-10-30 19:24:25,968] [INFO] (monai.deploy.operators.monai_seg_inference_operator.MonaiSegInferenceOperator) - StudyTime: 095948.599, type \n", "\n", - "[2025-10-01 01:30:12,204] [INFO] (monai.deploy.operators.monai_seg_inference_operator.MonaiSegInferenceOperator) - StudyDate: 20090831, type \n", + "[2025-10-30 19:24:25,968] [INFO] (monai.deploy.operators.monai_seg_inference_operator.MonaiSegInferenceOperator) - StudyDescription: CT ABDOMEN W IV CONTRAST, type \n", "\n", - "[2025-10-01 01:30:12,204] [INFO] (monai.deploy.operators.monai_seg_inference_operator.MonaiSegInferenceOperator) - StudyTime: 095948.599, type \n", + "[2025-10-30 19:24:25,968] [INFO] (monai.deploy.operators.monai_seg_inference_operator.MonaiSegInferenceOperator) - AccessionNumber: 5471978513296937, type \n", "\n", - "[2025-10-01 01:30:12,204] [INFO] (monai.deploy.operators.monai_seg_inference_operator.MonaiSegInferenceOperator) - StudyDescription: CT ABDOMEN W IV CONTRAST, type \n", + "[2025-10-30 19:24:25,968] [INFO] (monai.deploy.operators.monai_seg_inference_operator.MonaiSegInferenceOperator) - selection_name: CT Series, type \n", "\n", - "[2025-10-01 01:30:12,204] [INFO] (monai.deploy.operators.monai_seg_inference_operator.MonaiSegInferenceOperator) - AccessionNumber: 5471978513296937, type \n", + "[2025-10-30 19:24:25,968] [INFO] (monai.deploy.operators.monai_seg_inference_operator.MonaiSegInferenceOperator) - affine: [[ -0.7890625 -0. -0. 197.60547 ]\n", "\n", - "[2025-10-01 01:30:12,204] [INFO] (monai.deploy.operators.monai_seg_inference_operator.MonaiSegInferenceOperator) - selection_name: CT Series, type \n", + " [ -0. -0.7890625 -0. 398.60547 ]\n", + "\n", + " [ 0. 0. 1.5 -383. ]\n", + "\n", + " [ 0. 0. 0. 1. ]], type \n", "\n", - "[2025-10-01 01:30:12,204] [INFO] (monai.deploy.operators.monai_seg_inference_operator.MonaiSegInferenceOperator) - space: LPS, type \n", + "[2025-10-30 19:24:25,968] [INFO] (monai.deploy.operators.monai_seg_inference_operator.MonaiSegInferenceOperator) - space: RAS, type \n", "\n", - "2025-10-01 01:30:12,912 INFO image_writer.py:197 - writing: /var/holoscan/output/saved_images_folder/1.3.6.1.4.1.14519.5.2.1.7085.2626/1.3.6.1.4.1.14519.5.2.1.7085.2626.nii\n", + "2025-10-30 19:24:26,606 INFO image_writer.py:197 - writing: /var/holoscan/output/saved_images_folder/1.3.6.1.4.1.14519.5.2.1.7085.2626/1.3.6.1.4.1.14519.5.2.1.7085.2626.nii\n", "\n", - "[2025-10-01 01:30:15,137] [INFO] (monai.deploy.operators.monai_seg_inference_operator.MonaiSegInferenceOperator) - Input of shape: torch.Size([1, 1, 270, 270, 106])\n", + "[2025-10-30 19:24:28,296] [INFO] (monai.deploy.operators.monai_seg_inference_operator.MonaiSegInferenceOperator) - Input of shape: torch.Size([1, 1, 270, 270, 106])\n", "\n", - "/home/holoscan/.local/lib/python3.12/site-packages/monai/inferers/utils.py:226: UserWarning: Using a non-tuple sequence for multidimensional indexing is deprecated and will be changed in pytorch 2.9; use x[tuple(seq)] instead of x[seq]. In pytorch 2.9 this will be interpreted as tensor index, x[torch.tensor(seq)], which will result either in an error or a different result (Triggered internally at /pytorch/torch/csrc/autograd/python_variable_indexing.cpp:306.)\n", + "/home/holoscan/.local/lib/python3.12/site-packages/monai/inferers/utils.py:226: UserWarning: Using a non-tuple sequence for multidimensional indexing is deprecated and will be changed in pytorch 2.9; use x[tuple(seq)] instead of x[seq]. In pytorch 2.9 this will be interpreted as tensor index, x[torch.tensor(seq)], which will result either in an error or a different result (Triggered internally at /pytorch/torch/csrc/autograd/python_variable_indexing.cpp:345.)\n", "\n", " win_data = torch.cat([inputs[win_slice] for win_slice in unravel_slice]).to(sw_device)\n", "\n", - "/home/holoscan/.local/lib/python3.12/site-packages/monai/inferers/utils.py:370: UserWarning: Using a non-tuple sequence for multidimensional indexing is deprecated and will be changed in pytorch 2.9; use x[tuple(seq)] instead of x[seq]. In pytorch 2.9 this will be interpreted as tensor index, x[torch.tensor(seq)], which will result either in an error or a different result (Triggered internally at /pytorch/torch/csrc/autograd/python_variable_indexing.cpp:306.)\n", + "/home/holoscan/.local/lib/python3.12/site-packages/monai/inferers/utils.py:370: UserWarning: Using a non-tuple sequence for multidimensional indexing is deprecated and will be changed in pytorch 2.9; use x[tuple(seq)] instead of x[seq]. In pytorch 2.9 this will be interpreted as tensor index, x[torch.tensor(seq)], which will result either in an error or a different result (Triggered internally at /pytorch/torch/csrc/autograd/python_variable_indexing.cpp:345.)\n", "\n", " out[idx_zm] += p\n", "\n", - "2025-10-01 01:30:16,851 INFO image_writer.py:197 - writing: /var/holoscan/output/saved_images_folder/1.3.6.1.4.1.14519.5.2.1.7085.2626/1.3.6.1.4.1.14519.5.2.1.7085.2626_seg.nii\n", + "2025-10-30 19:24:29,704 INFO image_writer.py:197 - writing: /var/holoscan/output/saved_images_folder/1.3.6.1.4.1.14519.5.2.1.7085.2626/1.3.6.1.4.1.14519.5.2.1.7085.2626_seg.nii\n", "\n", - "[2025-10-01 01:30:18,415] [INFO] (monai.deploy.operators.monai_seg_inference_operator.MonaiSegInferenceOperator) - Post transform length/batch size of output: 1\n", + "[2025-10-30 19:24:31,143] [INFO] (monai.deploy.operators.monai_seg_inference_operator.MonaiSegInferenceOperator) - Post transform length/batch size of output: 1\n", "\n", - "[2025-10-01 01:30:18,418] [INFO] (monai.deploy.operators.monai_seg_inference_operator.MonaiSegInferenceOperator) - Post transform pixel spacings for pred: tensor([0.7891, 0.7891, 1.5000], dtype=torch.float64)\n", + "[2025-10-30 19:24:31,144] [INFO] (monai.deploy.operators.monai_seg_inference_operator.MonaiSegInferenceOperator) - Post transform pixel spacings for pred: tensor([0.7891, 0.7891, 1.5000], dtype=torch.float64)\n", "\n", - "[2025-10-01 01:30:18,546] [INFO] (monai.deploy.operators.monai_seg_inference_operator.MonaiSegInferenceOperator) - Post transform pred of shape: (1, 512, 512, 204)\n", + "[2025-10-30 19:24:31,270] [INFO] (monai.deploy.operators.monai_seg_inference_operator.MonaiSegInferenceOperator) - Post transform pred of shape: (1, 512, 512, 204)\n", "\n", - "[2025-10-01 01:30:18,583] [INFO] (monai.deploy.operators.monai_seg_inference_operator.MonaiSegInferenceOperator) - Output Seg image numpy array of type shape: (204, 512, 512)\n", + "[2025-10-30 19:24:31,308] [INFO] (monai.deploy.operators.monai_seg_inference_operator.MonaiSegInferenceOperator) - Output Seg image numpy array of type shape: (204, 512, 512)\n", "\n", - "[2025-10-01 01:30:18,587] [INFO] (monai.deploy.operators.monai_seg_inference_operator.MonaiSegInferenceOperator) - Output Seg image pixel max value: 1\n", + "[2025-10-30 19:24:31,312] [INFO] (monai.deploy.operators.monai_seg_inference_operator.MonaiSegInferenceOperator) - Output Seg image pixel max value: 1\n", "\n", - "/home/holoscan/.local/lib/python3.12/site-packages/highdicom/base.py:165: UserWarning: The string \"C3N-00198\" is unlikely to represent the intended person name since it contains only a single component. Construct a person name according to the format in described in https://dicom.nema.org/dicom/2013/output/chtml/part05/sect_6.2.html#sect_6.2.1.2, or, in pydicom 2.2.0 or later, use the pydicom.valuerep.PersonName.from_named_components() method to construct the person name correctly. If a single-component name is really intended, add a trailing caret character to disambiguate the name.\n", + "/home/holoscan/.local/lib/python3.12/site-packages/highdicom/base.py:181: UserWarning: The string \"C3N-00198\" is unlikely to represent the intended person name since it contains only a single component. Construct a person name according to the format in described in https://dicom.nema.org/dicom/2013/output/chtml/part05/sect_6.2.html#sect_6.2.1.2, or, in pydicom 2.2.0 or later, use the pydicom.valuerep.PersonName.from_named_components() method to construct the person name correctly. If a single-component name is really intended, add a trailing caret character to disambiguate the name.\n", "\n", " check_person_name(patient_name)\n", "\n", - "[2025-10-01 01:30:19,670] [INFO] (highdicom.base) - copy Image-related attributes from dataset \"1.3.6.1.4.1.14519.5.2.1.7085.2626.936983343951485811186213470191\"\n", + "[2025-10-30 19:24:32,225] [INFO] (highdicom.base) - copy Image-related attributes from dataset \"1.3.6.1.4.1.14519.5.2.1.7085.2626.936983343951485811186213470191\"\n", "\n", - "[2025-10-01 01:30:19,670] [INFO] (highdicom.base) - copy attributes of module \"Specimen\"\n", + "[2025-10-30 19:24:32,226] [INFO] (highdicom.base) - copy attributes of module \"Specimen\"\n", "\n", - "[2025-10-01 01:30:19,670] [INFO] (highdicom.base) - copy Patient-related attributes from dataset \"1.3.6.1.4.1.14519.5.2.1.7085.2626.936983343951485811186213470191\"\n", + "[2025-10-30 19:24:32,226] [INFO] (highdicom.base) - copy Patient-related attributes from dataset \"1.3.6.1.4.1.14519.5.2.1.7085.2626.936983343951485811186213470191\"\n", "\n", - "[2025-10-01 01:30:19,670] [INFO] (highdicom.base) - copy attributes of module \"Patient\"\n", + "[2025-10-30 19:24:32,226] [INFO] (highdicom.base) - copy attributes of module \"Patient\"\n", "\n", - "[2025-10-01 01:30:19,671] [INFO] (highdicom.base) - copy attributes of module \"Clinical Trial Subject\"\n", + "[2025-10-30 19:24:32,226] [INFO] (highdicom.base) - copy attributes of module \"Clinical Trial Subject\"\n", "\n", - "[2025-10-01 01:30:19,671] [INFO] (highdicom.base) - copy Study-related attributes from dataset \"1.3.6.1.4.1.14519.5.2.1.7085.2626.936983343951485811186213470191\"\n", + "[2025-10-30 19:24:32,226] [INFO] (highdicom.base) - copy Study-related attributes from dataset \"1.3.6.1.4.1.14519.5.2.1.7085.2626.936983343951485811186213470191\"\n", "\n", - "[2025-10-01 01:30:19,671] [INFO] (highdicom.base) - copy attributes of module \"General Study\"\n", + "[2025-10-30 19:24:32,226] [INFO] (highdicom.base) - copy attributes of module \"General Study\"\n", "\n", - "[2025-10-01 01:30:19,671] [INFO] (highdicom.base) - copy attributes of module \"Patient Study\"\n", + "[2025-10-30 19:24:32,226] [INFO] (highdicom.base) - copy attributes of module \"Patient Study\"\n", "\n", - "[2025-10-01 01:30:19,671] [INFO] (highdicom.base) - copy attributes of module \"Clinical Trial Study\"\n", + "[2025-10-30 19:24:32,226] [INFO] (highdicom.base) - copy attributes of module \"Clinical Trial Study\"\n", "\n", "[info] [greedy_scheduler.cpp:372] Scheduler stopped: Some entities are waiting for execution, but there are no periodic or async entities to get out of the deadlock.\n", "\n", @@ -1791,13 +3047,13 @@ "\n", "[info] [gxf_executor.cpp:2597] Graph execution finished.\n", "\n", - "[2025-10-01 01:30:19,799] [INFO] (app.AISpleenSegApp) - End run\n", + "[2025-10-30 19:24:32,303] [INFO] (app.AISpleenSegApp) - End run\n", "\n", "[info] [gxf_executor.cpp:379] Destroying context\n", "\n", - "2025-10-01 01:30:21 [INFO] Application exited with 0.\n", + "2025-10-30 19:24:33 [INFO] Application exited with 0.\n", "\n", - "[2025-09-30 18:30:22,528] [INFO] (common) - Container 'agitated_fermat'(6f2868245795) exited with code 0.\n" + "[2025-10-30 12:24:35,481] [INFO] (common) - Container 'agitated_wozniak'(68cbb8952837) exited with code 0.\n" ] } ], @@ -1819,7 +3075,7 @@ "output_type": "stream", "text": [ "output:\n", - "1.2.826.0.1.3680043.10.511.3.21261845401654068054816597008318808.dcm\n", + "1.2.826.0.1.3680043.10.511.3.79884456519089834616076829263540444.dcm\n", "saved_images_folder\n", "\n", "output/saved_images_folder:\n", diff --git a/platforms/nuance_pin/app/inference.py b/platforms/nuance_pin/app/inference.py index 908f16a2..1d805e39 100644 --- a/platforms/nuance_pin/app/inference.py +++ b/platforms/nuance_pin/app/inference.py @@ -181,7 +181,7 @@ def pre_process(self, img_reader) -> Compose: Spacingd(keys=image_key, pixdim=(0.703125, 0.703125, 1.25)), Orientationd( keys=image_key, - axcodes="LPS", + axcodes="RAS", ), EnsureChannelFirstd(keys=image_key), ScaleIntensityRanged(image_key, a_min=-1024.0, a_max=300.0, b_min=0.0, b_max=1.0, clip=True), diff --git a/requirements.txt b/requirements.txt index 365bc447..58f5faa0 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,5 +1,5 @@ -holoscan<=3.5.0 -holoscan-cli<=3.5.0 +holoscan-cu12 +holoscan-cli numpy>=1.21.6 colorama>=0.4.1 tritonclient[all]>=2.53.0 diff --git a/run b/run index 4ef9e042..33cb0041 100755 --- a/run +++ b/run @@ -344,10 +344,10 @@ install_python_dev_deps() { # Copy the cuda runtime library to the fixed location (workaround for readthedocs) so that # we can leverage the existing LD_LIBRARY_PATH (configured by the readthedocs UI) to locate the cuda runtime library. # (LD_LIBRARY_PATH is set to /home/docs/ for that purpose) - # Note that 'python3.9' is hard-coded here, it should be updated if the Python version changes by + # Note that 'python3.10' is hard-coded here, it should be updated if the Python version changes by # .readthedocs.yml or other configurations. - run_command ls -al /home/docs/checkouts/readthedocs.org/user_builds/${READTHEDOCS_PROJECT}/envs/${READTHEDOCS_VERSION}/lib/python3.9/site-packages/nvidia/cuda_runtime/lib/ - run_command cp /home/docs/checkouts/readthedocs.org/user_builds/${READTHEDOCS_PROJECT}/envs/${READTHEDOCS_VERSION}/lib/python3.9/site-packages/nvidia/cuda_runtime/lib/*.so* /home/docs/ + run_command ls -al /home/docs/checkouts/readthedocs.org/user_builds/${READTHEDOCS_PROJECT}/envs/${READTHEDOCS_VERSION}/lib/python3.10/site-packages/nvidia/cuda_runtime/lib/ + run_command cp /home/docs/checkouts/readthedocs.org/user_builds/${READTHEDOCS_PROJECT}/envs/${READTHEDOCS_VERSION}/lib/python3.10/site-packages/nvidia/cuda_runtime/lib/*.so* /home/docs/ run_command ls -al /home/docs/ fi } diff --git a/setup.cfg b/setup.cfg index 6413b321..8838944f 100644 --- a/setup.cfg +++ b/setup.cfg @@ -16,7 +16,7 @@ project_urls = Source Code=https://github.com/Project-MONAI/monai-deploy-app-sdk [options] -python_requires = >=3.9, <3.14 +python_requires = >=3.10, <3.14 # for compiling and develop setup only # no need to specify the versions so that we could # compile for multiple targeted versions. @@ -24,8 +24,8 @@ python_requires = >=3.9, <3.14 # cucim install_requires = numpy>=1.21.6 - holoscan<=3.5.0 - holoscan-cli<=3.5.0 + holoscan-cu12 + holoscan-cli colorama>=0.4.1 tritonclient[all]>=2.53.0 typeguard>=3.0.0 From 0f6b5fddfeca89900598cf1eabfd0449004a9073 Mon Sep 17 00:00:00 2001 From: Ming M Qin <38891913+MMelQin@users.noreply.github.com> Date: Thu, 30 Oct 2025 18:32:05 -0700 Subject: [PATCH 19/21] Mq/release 3.3 (#567) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Update release notes for v3.3.0 Signed-off-by: M Q * Update the index file Signed-off-by: M Q * Bump version: 3.2.0 → 3.3.0 Signed-off-by: M Q --------- Signed-off-by: M Q --- .bumpversion.cfg | 2 +- docs/source/release_notes/index.md | 1 + docs/source/release_notes/v3.3.0.md | 22 ++++++++++++++++++++++ requirements-dev.txt | 7 +++---- 4 files changed, 27 insertions(+), 5 deletions(-) create mode 100644 docs/source/release_notes/v3.3.0.md diff --git a/.bumpversion.cfg b/.bumpversion.cfg index 47311295..49707ec2 100644 --- a/.bumpversion.cfg +++ b/.bumpversion.cfg @@ -1,5 +1,5 @@ [bumpversion] -current_version = 3.2.0 +current_version = 3.3.0 parse = (?P\d+)\.(?P\d+)\.(?P\d+)((?Pa|b|rc)(?P\d+))? serialize = {major}.{minor}.{patch}{release}{build} diff --git a/docs/source/release_notes/index.md b/docs/source/release_notes/index.md index 450758b0..0923d220 100644 --- a/docs/source/release_notes/index.md +++ b/docs/source/release_notes/index.md @@ -10,6 +10,7 @@ ```{toctree} :maxdepth: 1 +v3.3.0 v3.2.0 v3.1.0 v3.0.0 diff --git a/docs/source/release_notes/v3.3.0.md b/docs/source/release_notes/v3.3.0.md new file mode 100644 index 00000000..13087b38 --- /dev/null +++ b/docs/source/release_notes/v3.3.0.md @@ -0,0 +1,22 @@ +# Version 3.3.0 (October 2025) + +## What's new in 3.3.0 + +This version of the App SDK is compatible with [Holoscan SDK](https://pypi.org/project/holoscan) CUDA 12 version, [holoscan-cu12](https://pypi.org/project/holoscan-cu12). + +It has also been tested compatible with the latest [MONAI at v1.5.1](https://pypi.org/project/monai/1.5.1/) which supports the latest PyTorch v2.8. PyTorch version 2.7 and above have embedded CUDA 12 Runtime `>= 12.6.77`, which is required by Holoscan v3.6 and v3.7 CUDA12 version on X86_64 Linux workstations. + +### Key changes + +- Compatible with the latest MONAI v1.5.1 and Holoscan v3.7 CUDA12 version, [holoscan-cu12](https://pypi.org/project/holoscan-cu12). +- Volumentric image converted from DICOM has consistent `affine` and `space` metadata, e.g. converted image in original DICOM orientation is in `LPS`. + + +Please also see the closed issues on Github and the closed pull requests on Github. + +## Additional information +Please visit [GETTING STARTED](/getting_started/index) guide and follow the tutorials. + +You can learn more about SDK usage through [DEVELOPING WITH SDK](/developing_with_sdk/index). + +Please let us know how you like it and what could be improved by [submitting an issue](https://github.com/Project-MONAI/monai-deploy-app-sdk/issues/new/choose) or [asking questions](https://github.com/Project-MONAI/monai-deploy-app-sdk/discussions). \ No newline at end of file diff --git a/requirements-dev.txt b/requirements-dev.txt index a5299c98..8bd97b5b 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -14,10 +14,9 @@ isort pytype>=2020.6.1; platform_system != "Windows" mypy>=0.790 psutil -Sphinx==4.1.2 -recommonmark==0.6.0 -sphinx-autodoc-typehints==1.12.0 -sphinx-rtd-theme==0.5.2 +Sphinx>=5.3.0,<8.0.0 +sphinx-autodoc-typehints>=1.19.0 +sphinx-rtd-theme>=1.0.0 pytest==7.4.0 pytest-cov==4.1.0 pytest-lazy-fixture==0.6.3 From 4c3dd808fdd10d4d8ead89dfd04e7b090598507b Mon Sep 17 00:00:00 2001 From: M Q Date: Thu, 30 Oct 2025 19:35:28 -0700 Subject: [PATCH 20/21] Use string enum for status Signed-off-by: M Q --- platforms/aidoc/README.md | 4 ++-- platforms/aidoc/restful_app/app.py | 16 ++++++++++++---- 2 files changed, 14 insertions(+), 6 deletions(-) diff --git a/platforms/aidoc/README.md b/platforms/aidoc/README.md index ae11020d..b95b5627 100644 --- a/platforms/aidoc/README.md +++ b/platforms/aidoc/README.md @@ -72,7 +72,7 @@ sequenceDiagram REST Service-->>-Client: HTTP 409 Conflict Client->>+REST Service: GET /status - REST Service-->>-Client: HTTP 200 OK ("status": "BUSY") + REST Service-->>-Client: HTTP 200 OK ("status": "PROCESSING") end AppThread->>+AISpleenSegApp: Create instance(status_callback) @@ -174,7 +174,7 @@ The script can be run multiple times or modified to loop with different output f - **Description**: Checks the current status of the processor. - **Success Response**: - **Code**: 200 OK - - **Content**: `{ "status": "IDLE" }` or `{ "status": "BUSY" }` + - **Content**: `{ "status": "IDLE" }` or `{ "status": "PROCESSING" }` ### Process Data diff --git a/platforms/aidoc/restful_app/app.py b/platforms/aidoc/restful_app/app.py index d17e7bbc..ded0fa91 100644 --- a/platforms/aidoc/restful_app/app.py +++ b/platforms/aidoc/restful_app/app.py @@ -16,12 +16,20 @@ import os import sys import threading +from enum import Enum from http import HTTPStatus import requests from flask import Flask, jsonify, request from flask_wtf.csrf import CSRFProtect +# Processing status enum per Aidoc API specification. +class ProcessingStatus(str, Enum): + INITIALIZING = "INITIALIZING" + IDLE = "IDLE" + PROCESSING = "PROCESSING" + ERROR = "ERROR" + # The MONAI Deploy application to be wrapped. # This can be changed to any other application in the repository. # Provide the module path and the class name of the application. @@ -40,7 +48,7 @@ logging.basicConfig(stream=sys.stdout, level=logging.INFO) # Global state to track processing status. A lock is used for thread safety. -PROCESSING_STATUS = "IDLE" +PROCESSING_STATUS = ProcessingStatus.IDLE PROCESSING_LOCK = threading.Lock() @@ -87,7 +95,7 @@ def app_status_callback(summary: str): try: logging.info("Starting processing in a background thread.") - set_processing_status("BUSY") + set_processing_status(ProcessingStatus.PROCESSING) # Set environment variables for the MONAI Deploy application. # The application context will pick these up. @@ -118,7 +126,7 @@ def app_status_callback(summary: str): app_status_callback(json.dumps(callback_msg)) finally: - set_processing_status("IDLE") + set_processing_status(ProcessingStatus.IDLE) logging.info("Processor is now IDLE.") @@ -132,7 +140,7 @@ def status(): @csrf.exempt def process(): """Endpoint to start a new processing job.""" - if get_processing_status() == "BUSY": + if get_processing_status() == ProcessingStatus.PROCESSING: return jsonify({"error": "Processor is busy."}), HTTPStatus.CONFLICT data = request.get_json() From 16c8b6773c5502aba5517ee62574b44ac26fff36 Mon Sep 17 00:00:00 2001 From: M Q Date: Thu, 30 Oct 2025 19:46:14 -0700 Subject: [PATCH 21/21] Fix the lovely formatting complaint Signed-off-by: M Q --- platforms/aidoc/restful_app/app.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/platforms/aidoc/restful_app/app.py b/platforms/aidoc/restful_app/app.py index ded0fa91..9ef186f3 100644 --- a/platforms/aidoc/restful_app/app.py +++ b/platforms/aidoc/restful_app/app.py @@ -23,6 +23,7 @@ from flask import Flask, jsonify, request from flask_wtf.csrf import CSRFProtect + # Processing status enum per Aidoc API specification. class ProcessingStatus(str, Enum): INITIALIZING = "INITIALIZING" @@ -30,6 +31,7 @@ class ProcessingStatus(str, Enum): PROCESSING = "PROCESSING" ERROR = "ERROR" + # The MONAI Deploy application to be wrapped. # This can be changed to any other application in the repository. # Provide the module path and the class name of the application.