diff --git a/README.md b/README.md index 423f724..73fd982 100755 --- a/README.md +++ b/README.md @@ -33,18 +33,49 @@ allprojects { } ``` -project build.gradle -```groovy +version catalogs +``` +[versions] +moko-tensorflow = "" + +[libraries] +moko-tensorflow = { module = "dev.icerock.moko:tensorflow", version.ref = "moko-tensorflow" } +``` + +If using default KMP plugin, type in your project.gradle.kts +```kotlin +kotlin { + cocoapods { + // other cocoapods configurations here + pod("TensorFlowLiteObjC") { + moduleName = "TFLTensorFlowLite" + } + // Or in non-exported module + pod(name="TensorFlowLiteObjC", linkOnly = true, moduleName = "TFLTensorFlowLite") + } + sourceSets { + val commonMain by getting { + dependencies { + api(libs.moko.tensorflow) + } + } + } +} +``` +If using default moko gradle plugin, type in your project.gradle.kts +```kotlin dependencies { - commonMainApi("dev.icerock.moko:tensorflow:0.3.0") + commonMainApi(libs.moko.tensorflow) } - cocoaPods { - podsProject = file("../ios-app/Pods/Pods.xcodeproj") // here should be path to Pods xcode project + // here should be path to Pods xcode project + podsProject = file("../ios-app/Pods/Pods.xcodeproj") pod("TensorFlowLiteObjC", module = "TFLTensorFlowLite", onlyLink = true) } - +``` +Also add fraemwork location resolver into your project.gradle.kts +```kotlin kotlin.targets .filterIsInstance() .flatMap { it.binaries } @@ -65,7 +96,7 @@ kotlin.targets Podfile ```ruby -pod 'mokoTensorflow', :git => 'https://github.com/icerockdev/moko-tensorflow.git', :tag => 'release/0.3.0' +pod 'mokoTensorflow', :git => 'https://github.com/icerockdev/moko-tensorflow.git', :tag => 'release/' ``` ## Usage @@ -137,6 +168,11 @@ class ViewController: UIViewController { } ``` +## Pitfalls +1. Only Float32 is supported, but you can easily add your own type convertors +2. When using ObjCInterpreter you are required to convert any input data into NSData. +3. Because of IOS array allocation, only supported models with structure [N,X,Y,...,Z], where N is batch size + ## Samples Please see more examples in the [sample directory](sample). diff --git a/mokoTensorflow.podspec b/mokoTensorflow.podspec index 64c77d0..f208db3 100644 --- a/mokoTensorflow.podspec +++ b/mokoTensorflow.podspec @@ -11,7 +11,7 @@ Pod::Spec.new do |spec| spec.source_files = "tensorflow/src/iosMain/swift/**/*.{h,m,swift}" spec.resources = "tensorflow/src/iosMain/bundle/**/*" - spec.dependency 'TensorFlowLiteObjC', '0.0.1-nightly.20230212' + spec.dependency 'TensorFlowLiteObjC', '2.12.0' spec.ios.deployment_target = '11.0' spec.swift_version = '5.0' diff --git a/sample/android-app/src/main/java/com/icerockdev/MainActivity.kt b/sample/android-app/src/main/java/com/icerockdev/MainActivity.kt index 6c639ef..9decd09 100644 --- a/sample/android-app/src/main/java/com/icerockdev/MainActivity.kt +++ b/sample/android-app/src/main/java/com/icerockdev/MainActivity.kt @@ -15,8 +15,9 @@ import androidx.lifecycle.lifecycleScope import com.divyanshu.draw.widget.DrawView import com.icerockdev.library.ResHolder import com.icerockdev.library.TFDigitClassifier -import dev.icerock.moko.tensorflow.Interpreter import dev.icerock.moko.tensorflow.InterpreterOptions +import dev.icerock.moko.tensorflow.JVMInterpreter +import dev.icerock.moko.tensorflow.NativeInput import java.nio.ByteBuffer import java.nio.ByteOrder import java.util.concurrent.atomic.AtomicBoolean @@ -28,7 +29,7 @@ class MainActivity : AppCompatActivity() { private lateinit var clearButton: Button private lateinit var predictedTextView: TextView - private lateinit var interpreter: Interpreter + private lateinit var interpreter: JVMInterpreter private lateinit var digitClassifier: TFDigitClassifier private val isInterpreterInited = AtomicBoolean(false) @@ -60,7 +61,7 @@ class MainActivity : AppCompatActivity() { true } - interpreter = Interpreter(ResHolder.getDigitsClassifierModel(), InterpreterOptions(2, useNNAPI = true), this) + interpreter = JVMInterpreter(ResHolder.getDigitsClassifierModel(), InterpreterOptions(2, useNNAPI = true), this) digitClassifier = TFDigitClassifier(interpreter, this.lifecycleScope) digitClassifier.initialize() @@ -82,8 +83,13 @@ class MainActivity : AppCompatActivity() { digitClassifier.inputImageHeight, true ) - - digitClassifier.classifyAsync(convertBitmapToByteBuffer(bitmapToClassify)) { + val byteBuffer = convertBitmapToByteBuffer(bitmapToClassify) +// digitClassifier.classifyAsync(byteBuffer) { +// runOnUiThread { +// predictedTextView.text = it +// } +// } + digitClassifier.classifyNativeAsync(NativeInput(byteBuffer)) { runOnUiThread { predictedTextView.text = it } diff --git a/sample/ios-app/Podfile.lock b/sample/ios-app/Podfile.lock index 6953208..8bae775 100644 --- a/sample/ios-app/Podfile.lock +++ b/sample/ios-app/Podfile.lock @@ -1,15 +1,15 @@ PODS: - - mokoTensorflow (0.4.0): - - TensorFlowLiteObjC (= 0.0.1-nightly.20230212) + - mokoTensorflow (0.3.0): + - TensorFlowLiteObjC (= 2.12.0) - MultiPlatformLibrary (0.1.0) - Sketch (3.0) - - TensorFlowLiteC (0.0.1-nightly.20230212): - - TensorFlowLiteC/Core (= 0.0.1-nightly.20230212) - - TensorFlowLiteC/Core (0.0.1-nightly.20230212) - - TensorFlowLiteObjC (0.0.1-nightly.20230212): - - TensorFlowLiteObjC/Core (= 0.0.1-nightly.20230212) - - TensorFlowLiteObjC/Core (0.0.1-nightly.20230212): - - TensorFlowLiteC (= 0.0.1-nightly.20230212) + - TensorFlowLiteC (2.12.0): + - TensorFlowLiteC/Core (= 2.12.0) + - TensorFlowLiteC/Core (2.12.0) + - TensorFlowLiteObjC (2.12.0): + - TensorFlowLiteObjC/Core (= 2.12.0) + - TensorFlowLiteObjC/Core (2.12.0): + - TensorFlowLiteC (= 2.12.0) DEPENDENCIES: - mokoTensorflow (from `../..`) @@ -29,11 +29,11 @@ EXTERNAL SOURCES: :path: "../mpp-library" SPEC CHECKSUMS: - mokoTensorflow: f31dd35d9c68c098aa842b2885ded86e24b91a83 + mokoTensorflow: 3b3781b48d0b8822a9a5811f6555be26156f7b4b MultiPlatformLibrary: 91d3837ea2c0943e0713f98671a36913470ef412 Sketch: 49a4b71f7bc77316ed5f75ee79dedaa2b844d5e7 - TensorFlowLiteC: 131cd06718a81ace70d56f10b1404157ce40d7fc - TensorFlowLiteObjC: 2e5cf40e720254b0905e09a7f638e7a2cf935727 + TensorFlowLiteC: 20785a69299185a379ba9852b6625f00afd7984a + TensorFlowLiteObjC: 9a46a29a76661c513172cfffd3bf712b11ef25c3 PODFILE CHECKSUM: 5d66f0fb585809b01037efa8b5383d224d5dca98 diff --git a/sample/ios-app/tensorflow-test/ViewController.swift b/sample/ios-app/tensorflow-test/ViewController.swift index ea91b7f..95cf24d 100644 --- a/sample/ios-app/tensorflow-test/ViewController.swift +++ b/sample/ios-app/tensorflow-test/ViewController.swift @@ -29,7 +29,7 @@ class ViewController: UIViewController, SketchViewDelegate { ) let modelFileRes: ResourcesFileResource = ResHolder().getDigitsClassifierModel() - interpreter = Interpreter( + interpreter = ObjCInterpreter( fileResource: modelFileRes, options: options ) diff --git a/sample/mpp-library/src/commonMain/kotlin/com/icerockdev/library/TFDigitClassifier.kt b/sample/mpp-library/src/commonMain/kotlin/com/icerockdev/library/TFDigitClassifier.kt index 975aae0..de5b9ab 100644 --- a/sample/mpp-library/src/commonMain/kotlin/com/icerockdev/library/TFDigitClassifier.kt +++ b/sample/mpp-library/src/commonMain/kotlin/com/icerockdev/library/TFDigitClassifier.kt @@ -5,6 +5,7 @@ package com.icerockdev.library import dev.icerock.moko.tensorflow.Interpreter +import dev.icerock.moko.tensorflow.NativeInput import kotlinx.coroutines.CoroutineScope import kotlinx.coroutines.Dispatchers import kotlinx.coroutines.launch @@ -26,12 +27,24 @@ class TFDigitClassifier( inputImageWidth = inputShape[1] inputImageHeight = inputShape[2] modelInputSize = FLOAT_TYPE_SIZE * inputImageWidth * inputImageHeight * PIXEL_SIZE + interpreter.allocateTensors() } fun classifyAsync(inputData: Any, onResult: (String) -> Unit) { scope.launch(Dispatchers.Default) { val result = Array(1) { FloatArray(OUTPUT_CLASSES_COUNT) } - interpreter.run(listOf(inputData), mapOf(Pair(0, result))) + interpreter.run(arrayOf(inputData), mapOf(Pair(0, result))) + + val maxIndex = result[0].indices.maxByOrNull { result[0][it] } ?: -1 + val strResult = "Prediction Result: $maxIndex\nConfidence: ${result[0][maxIndex]}" + + onResult(strResult) + } + } + fun classifyNativeAsync(nativeInput: NativeInput, onResult: (String) -> Unit) { + scope.launch(Dispatchers.Default) { + val result = Array(1) { FloatArray(OUTPUT_CLASSES_COUNT) } + interpreter.run(nativeInput, result) val maxIndex = result[0].indices.maxByOrNull { result[0][it] } ?: -1 val strResult = "Prediction Result: $maxIndex\nConfidence: ${result[0][maxIndex]}" diff --git a/tensorflow/src/androidMain/kotlin/dev/icerock/moko/tensorflow/Interpreter.kt b/tensorflow/src/androidMain/kotlin/dev/icerock/moko/tensorflow/JVMInterpreter.kt similarity index 54% rename from tensorflow/src/androidMain/kotlin/dev/icerock/moko/tensorflow/Interpreter.kt rename to tensorflow/src/androidMain/kotlin/dev/icerock/moko/tensorflow/JVMInterpreter.kt index c55bff3..4656456 100644 --- a/tensorflow/src/androidMain/kotlin/dev/icerock/moko/tensorflow/Interpreter.kt +++ b/tensorflow/src/androidMain/kotlin/dev/icerock/moko/tensorflow/JVMInterpreter.kt @@ -7,11 +7,11 @@ package dev.icerock.moko.tensorflow import android.content.Context import dev.icerock.moko.resources.FileResource -actual class Interpreter( - actual val fileResource: FileResource, - actual val options: InterpreterOptions, +class JVMInterpreter( + override val fileResource: FileResource, + override val options: InterpreterOptions, context: Context -) { +) : Interpreter { private val tensorFlowInterpreter = PlatformInterpreter( fileResource.openAsFile(context), @@ -21,12 +21,12 @@ actual class Interpreter( /** * Gets the number of input tensors. */ - actual fun getInputTensorCount(): Int = tensorFlowInterpreter.inputTensorCount + override fun getInputTensorCount(): Int = tensorFlowInterpreter.inputTensorCount /** * Gets the number of output Tensors. */ - actual fun getOutputTensorCount(): Int = tensorFlowInterpreter.outputTensorCount + override fun getOutputTensorCount(): Int = tensorFlowInterpreter.outputTensorCount /** * Gets the Tensor associated with the provdied input index. @@ -34,7 +34,9 @@ actual class Interpreter( * @throws IllegalArgumentException if [index] is negative or is not smaller than the * number of model inputs. */ - actual fun getInputTensor(index: Int): Tensor = tensorFlowInterpreter.getInputTensor(index).toTensor() + override fun getInputTensor(index: Int): Tensor { + return tensorFlowInterpreter.getInputTensor(index).toTensor() + } /** * Gets the Tensor associated with the provdied output index. @@ -42,26 +44,38 @@ actual class Interpreter( * @throws IllegalArgumentException if [index] is negative or is not smaller than the * number of model inputs. */ - actual fun getOutputTensor(index: Int): Tensor = tensorFlowInterpreter.getOutputTensor(index).toTensor() + override fun getOutputTensor(index: Int): Tensor { + return tensorFlowInterpreter.getOutputTensor(index).toTensor() + } /** * Resizes [index] input of the native model to the given [shape]. */ - actual fun resizeInput(index: Int, shape: TensorShape) { + override fun resizeInput(index: Int, shape: TensorShape) { tensorFlowInterpreter.resizeInput(index, shape.dimensions) } + override fun allocateTensors() { + tensorFlowInterpreter.allocateTensors() + } + /** * Runs model inference if the model takes multiple inputs, or returns multiple outputs. */ - actual fun run(inputs: List, outputs: Map) { - tensorFlowInterpreter.runForMultipleInputsOutputs(inputs.toTypedArray(), outputs) + override fun run(inputs: Array<*>, outputs: Map) { + tensorFlowInterpreter.runForMultipleInputsOutputs(inputs, outputs) + } + + override fun run(nativeInput: NativeInput, output: Array<*>) { + val inputs = arrayOf(nativeInput.byteBuffer) + val outputs = mapOf(Interpreter.OUTPUT_KEY to output) + run(inputs, outputs) } /** - * Release resources associated with the [Interpreter]. + * Release resources associated with the [JVMInterpreter]. */ - actual fun close() { + override fun close() { tensorFlowInterpreter.close() } } diff --git a/tensorflow/src/androidMain/kotlin/dev/icerock/moko/tensorflow/NativeInput.kt b/tensorflow/src/androidMain/kotlin/dev/icerock/moko/tensorflow/NativeInput.kt new file mode 100644 index 0000000..90e369a --- /dev/null +++ b/tensorflow/src/androidMain/kotlin/dev/icerock/moko/tensorflow/NativeInput.kt @@ -0,0 +1,5 @@ +package dev.icerock.moko.tensorflow + +import java.nio.ByteBuffer + +actual class NativeInput(val byteBuffer: ByteBuffer) diff --git a/tensorflow/src/commonMain/kotlin/dev/icerock/moko/tensorflow/Interpreter.kt b/tensorflow/src/commonMain/kotlin/dev/icerock/moko/tensorflow/Interpreter.kt index 29f3f29..ba71062 100644 --- a/tensorflow/src/commonMain/kotlin/dev/icerock/moko/tensorflow/Interpreter.kt +++ b/tensorflow/src/commonMain/kotlin/dev/icerock/moko/tensorflow/Interpreter.kt @@ -6,8 +6,7 @@ package dev.icerock.moko.tensorflow import dev.icerock.moko.resources.FileResource -expect class Interpreter { - +interface Interpreter { val fileResource: FileResource val options: InterpreterOptions @@ -41,14 +40,35 @@ expect class Interpreter { * Resizes [index] input of the native model to the given [shape]. */ fun resizeInput(index: Int, shape: TensorShape) + fun allocateTensors() /** * Runs model inference if the model takes multiple inputs, or returns multiple outputs. + * + * In case with ios [outputs] required to be the { 0: Array } structure + * + * In case with ios [inputs] required to be the Array structure + */ + @Deprecated("This approach may work differently on ios and android platform. Use run with NativeInput") + fun run(inputs: Array<*>, outputs: Map) + + /** + * Runs model inference with native input data + * + * @param nativeInput - NSData or java's ByteBuffer + * @param output - required output array */ - fun run(inputs: List, outputs: Map) + fun run(nativeInput: NativeInput, output: Array<*>) /** * Release resources associated with the [Interpreter]. */ fun close() + + companion object { + /** + * This is static output key which should be used when adding outputs data in [Interpreter.run] + */ + const val OUTPUT_KEY = 0 + } } diff --git a/tensorflow/src/commonMain/kotlin/dev/icerock/moko/tensorflow/NativeInput.kt b/tensorflow/src/commonMain/kotlin/dev/icerock/moko/tensorflow/NativeInput.kt new file mode 100644 index 0000000..c6b9d6d --- /dev/null +++ b/tensorflow/src/commonMain/kotlin/dev/icerock/moko/tensorflow/NativeInput.kt @@ -0,0 +1,3 @@ +package dev.icerock.moko.tensorflow + +expect class NativeInput diff --git a/tensorflow/src/iosMain/kotlin/dev/icerock/moko/tensorflow/Interpreter.kt b/tensorflow/src/iosMain/kotlin/dev/icerock/moko/tensorflow/Interpreter.kt deleted file mode 100644 index acd7194..0000000 --- a/tensorflow/src/iosMain/kotlin/dev/icerock/moko/tensorflow/Interpreter.kt +++ /dev/null @@ -1,131 +0,0 @@ -/* - * Copyright 2020 IceRock MAG Inc. Use of this source code is governed by the Apache 2.0 license. - */ - -package dev.icerock.moko.tensorflow - -import dev.icerock.moko.resources.FileResource -import platform.Foundation.NSData - -@Suppress("ForbiddenComment") -actual class Interpreter( - actual val fileResource: FileResource, - actual val options: InterpreterOptions -) { - - private val tflInterpreter: PlatformInterpreter - - init { - tflInterpreter = errorHandled { errPtr -> - PlatformInterpreter(fileResource.path, options.tflInterpreterOptions, errPtr) - }!! - - errorHandled { errPtr -> - tflInterpreter.allocateTensorsWithError(errPtr) - } - } - - /** - * Gets the number of input tensors. - */ - actual fun getInputTensorCount(): Int { - return tflInterpreter.inputTensorCount().toInt() - } - - /** - * Gets the number of output Tensors. - */ - actual fun getOutputTensorCount(): Int { - return tflInterpreter.outputTensorCount().toInt() - } - - /** - * Gets the Tensor associated with the provdied input index. - * - * @throws IllegalArgumentException if [index] is negative or is not smaller than the - * number of model inputs. - */ - actual fun getInputTensor(index: Int): Tensor { - return errorHandled { errPtr -> - tflInterpreter.inputTensorAtIndex(index.toULong(), errPtr) - }!!.toTensor() - } - - /** - * Gets the Tensor associated with the provdied output index. - * - * @throws IllegalArgumentException if [index] is negative or is not smaller than the - * number of model inputs. - */ - actual fun getOutputTensor(index: Int): Tensor { - return errorHandled { errPtr -> - tflInterpreter.outputTensorAtIndex(index.toULong(), errPtr) - }!!.toTensor() - } - - /** - * Resizes [index] input of the native model to the given [shape]. - */ - actual fun resizeInput(index: Int, shape: TensorShape) { - errorHandled { errPtr -> - tflInterpreter.resizeInputTensorAtIndex( - index.toULong(), - shape.getNSNumberDimensionList(), - errPtr - ) - } - } - - /** - * Runs model inference if the model takes multiple inputs, or returns multiple outputs. - * - * TODO: need to implement [outputs] applying. - */ - actual fun run( - inputs: List, - outputs: Map - ) { - require(inputs.size > getInputTensorCount()) { "Wrong inputs dimension." } - - inputs.forEachIndexed { index, any -> - val inputTensor = getInputTensor(index) - errorHandled { errPtr -> - inputTensor.platformTensor.copyData( - any as NSData, - errPtr - ) // Fixme: hardcast Any to NSData - } - } - - errorHandled { errPtr -> - tflInterpreter.invokeWithError(errPtr) - } - - inputs.forEachIndexed { index, any -> - val outputTensor = getOutputTensor(index) - - val array = when (outputTensor.dataType) { - TensorDataType.FLOAT32 -> { - errorHandled { errPtr -> - outputTensor.platformTensor.dataWithError(errPtr) - }!!.toUByteArray().toFloatArray() - } - TensorDataType.INT32 -> IntArray(outputTensor.dataType.byteSize()) // Fixme: - TensorDataType.UINT8 -> UIntArray(outputTensor.dataType.byteSize()) // Fixme: - TensorDataType.INT64 -> LongArray(outputTensor.dataType.byteSize()) // Fixme: - TensorDataType.INT16 -> ShortArray(outputTensor.dataType.byteSize()) // TODO() - TensorDataType.INT8 -> ByteArray(outputTensor.dataType.byteSize()) // TODO() - } - - (outputs[0] as Array)[0] = - array // TODO: hardcoded case, works only with digits sample - } - } - - /** - * Release resources associated with the [Interpreter]. - */ - actual fun close() { - // TODO: ??? - } -} diff --git a/tensorflow/src/iosMain/kotlin/dev/icerock/moko/tensorflow/NativeInput.kt b/tensorflow/src/iosMain/kotlin/dev/icerock/moko/tensorflow/NativeInput.kt new file mode 100644 index 0000000..b3c2d05 --- /dev/null +++ b/tensorflow/src/iosMain/kotlin/dev/icerock/moko/tensorflow/NativeInput.kt @@ -0,0 +1,5 @@ +package dev.icerock.moko.tensorflow + +import platform.Foundation.NSData + +actual class NativeInput(val nsData: NSData) diff --git a/tensorflow/src/iosMain/kotlin/dev/icerock/moko/tensorflow/ObjCInterpreter.kt b/tensorflow/src/iosMain/kotlin/dev/icerock/moko/tensorflow/ObjCInterpreter.kt new file mode 100644 index 0000000..0bd463c --- /dev/null +++ b/tensorflow/src/iosMain/kotlin/dev/icerock/moko/tensorflow/ObjCInterpreter.kt @@ -0,0 +1,146 @@ +/* + * Copyright 2020 IceRock MAG Inc. Use of this source code is governed by the Apache 2.0 license. + */ + +package dev.icerock.moko.tensorflow + +import dev.icerock.moko.resources.FileResource +import dev.icerock.moko.tensorflow.map.Float32Mapper +import platform.Foundation.NSData + +@Suppress("ForbiddenComment") +class ObjCInterpreter( + override val fileResource: FileResource, + override val options: InterpreterOptions +) : Interpreter { + + private val tflInterpreter: PlatformInterpreter = errorHandled { errPtr -> + PlatformInterpreter(fileResource.path, options.tflInterpreterOptions, errPtr) + }!! + + /** + * Gets the number of input tensors. + */ + override fun getInputTensorCount(): Int { + return tflInterpreter.inputTensorCount().toInt() + } + + /** + * Gets the number of output Tensors. + */ + override fun getOutputTensorCount(): Int { + return tflInterpreter.outputTensorCount().toInt() + } + + /** + * Gets the Tensor associated with the provdied input index. + * + * @throws IllegalArgumentException if [index] is negative or is not smaller than the + * number of model inputs. + */ + override fun getInputTensor(index: Int): Tensor { + return errorHandled { errPtr -> + tflInterpreter.inputTensorAtIndex(index.toULong(), errPtr) + }!!.toTensor() + } + + /** + * Gets the Tensor associated with the provdied output index. + * + * @throws IllegalArgumentException if [index] is negative or is not smaller than the + * number of model inputs. + */ + override fun getOutputTensor(index: Int): Tensor { + return errorHandled { errPtr -> + tflInterpreter.outputTensorAtIndex(index.toULong(), errPtr) + }!!.toTensor() + } + + /** + * Resizes [index] input of the native model to the given [shape]. + */ + override fun resizeInput(index: Int, shape: TensorShape) { + errorHandled { errPtr -> + tflInterpreter.resizeInputTensorAtIndex( + index.toULong(), + shape.getNSNumberDimensionList(), + errPtr + ) + } + } + + override fun allocateTensors() { + errorHandled { errPtr -> + tflInterpreter.allocateTensorsWithError(errPtr) + } + } + + /** + * Runs model inference if the model takes multiple inputs, or returns multiple outputs. + * + * TODO: need to implement [outputs] applying. + */ + override fun run( + inputs: Array<*>, + outputs: Map + ) { + require(outputs.size == 1) { + "Output map should have the { 0: Array } structure" + } + require(outputs.containsKey(Interpreter.OUTPUT_KEY)) { + "Output map should have the { 0: Array } structure" + } + require(outputs[Interpreter.OUTPUT_KEY] is Array<*>) { + "Output map 0's key value is not Array<*>. Output map should have the{ 0: Array } structure" + } + require(inputs.size <= getInputTensorCount()) { + "Wrong inputs dimension." + } + inputs.forEach { input -> + require(input is NSData) { + "ios interpterer only accpept NSData as an input" + } + } + val nsInputs = inputs.map { it as NSData } + val outputArray = outputs[Interpreter.OUTPUT_KEY] as Array + // Filling input tensors of our interpreter with nsData + nsInputs.forEachIndexed { index, nsData -> + val inputTensor = getInputTensor(index) + errorHandled { errPtr -> + inputTensor.platformTensor.copyData( + nsData, + errPtr + ) + } + } + errorHandled { errPtr -> + tflInterpreter.invokeWithError(errPtr) + } + // Here we can get our output data + nsInputs.indices.forEach { index -> + val outputTensor = getOutputTensor(index) + + val array = when (outputTensor.dataType) { + TensorDataType.FLOAT32 -> Float32Mapper.map(outputTensor.platformTensor) + + else -> error("ObjCInterpreter doesn't have convertor for ${outputTensor.dataType} yet") + } + // ~~TODO: hardcoded case, works only with digits sample~~ + // Actually works with every shape, which contract the [N,X,Y,...,Z]. Where N is batch size + outputArray[index] = array + } + } + + override fun run(nativeInput: NativeInput, output: Array<*>) { + val inputs = arrayOf(nativeInput.nsData) as Array<*> + val outputs = mapOf(Interpreter.OUTPUT_KEY to output) + run(inputs, outputs) + } + + /** + * Release resources associated with the [ObjCInterpreter]. + * + * ObjectiveC interpreter doesn't have jvm-like close method + */ + override fun close() = Unit +} diff --git a/tensorflow/src/iosMain/kotlin/dev/icerock/moko/tensorflow/Utils.kt b/tensorflow/src/iosMain/kotlin/dev/icerock/moko/tensorflow/Utils.kt index 37dcd0d..eca2fa9 100644 --- a/tensorflow/src/iosMain/kotlin/dev/icerock/moko/tensorflow/Utils.kt +++ b/tensorflow/src/iosMain/kotlin/dev/icerock/moko/tensorflow/Utils.kt @@ -20,25 +20,15 @@ import platform.posix.memcpy internal fun errorHandled(block: (CPointer>) -> T?): T? { val (result, error) = memScoped { val errorPtr = alloc>() - runCatching { - block(errorPtr.ptr) - }.getOrNull() to errorPtr.value + // need somehow to print trace + runCatching { block(errorPtr.ptr) } + .onFailure { it.printStackTrace() } + .getOrNull() to errorPtr.value } if (error != null) throw Exception(error.description) return result } -internal fun UByteArray.toFloatArray(): FloatArray { - @Suppress("MagicNumber") - val floatArr = FloatArray(this.size / 4) - usePinned { src -> - floatArr.usePinned { dst -> - memcpy(dst.addressOf(0), src.addressOf(0), this.size.toULong()) - } - } - return floatArr -} - internal fun NSData.toUByteArray(): UByteArray = UByteArray(this.length.toInt()).apply { usePinned { memcpy(it.addressOf(0), this@toUByteArray.bytes, this@toUByteArray.length) diff --git a/tensorflow/src/iosMain/kotlin/dev/icerock/moko/tensorflow/map/Float32Mapper.kt b/tensorflow/src/iosMain/kotlin/dev/icerock/moko/tensorflow/map/Float32Mapper.kt new file mode 100644 index 0000000..37b2fdf --- /dev/null +++ b/tensorflow/src/iosMain/kotlin/dev/icerock/moko/tensorflow/map/Float32Mapper.kt @@ -0,0 +1,29 @@ +package dev.icerock.moko.tensorflow.map + +import dev.icerock.moko.tensorflow.PlatformTensor +import dev.icerock.moko.tensorflow.TensorDataType +import dev.icerock.moko.tensorflow.errorHandled +import dev.icerock.moko.tensorflow.toUByteArray +import kotlinx.cinterop.addressOf +import kotlinx.cinterop.usePinned +import platform.posix.memcpy + +object Float32Mapper : TensorDataTypeMapper { + override val type: TensorDataType = TensorDataType.FLOAT32 + private fun UByteArray.toFloatArray(): FloatArray { + @Suppress("MagicNumber") + val floatArr = FloatArray(this.size / 4) + usePinned { src -> + floatArr.usePinned { dst -> + memcpy(dst.addressOf(0), src.addressOf(0), this.size.toULong()) + } + } + return floatArr + } + + override fun map(tensor: PlatformTensor): FloatArray { + return errorHandled { errPtr -> + tensor.dataWithError(errPtr) + }!!.toUByteArray().toFloatArray() + } +} diff --git a/tensorflow/src/iosMain/kotlin/dev/icerock/moko/tensorflow/map/TensorDataTypeMapper.kt b/tensorflow/src/iosMain/kotlin/dev/icerock/moko/tensorflow/map/TensorDataTypeMapper.kt new file mode 100644 index 0000000..0969904 --- /dev/null +++ b/tensorflow/src/iosMain/kotlin/dev/icerock/moko/tensorflow/map/TensorDataTypeMapper.kt @@ -0,0 +1,12 @@ +package dev.icerock.moko.tensorflow.map + +import dev.icerock.moko.tensorflow.PlatformTensor +import dev.icerock.moko.tensorflow.TensorDataType + +/** + * [TensorDataTypeMapper] required to easily(almost) add converters for objective-c Interpreter + */ +interface TensorDataTypeMapper { + val type: TensorDataType + fun map(tensor: PlatformTensor): T +}