[tf.lite] Re-land "Fix JNI memory leak when constructor exceptions are thrown"

Properly dispose of native memory handles if an exception is thrown
in the Interpreter constructor. This change landed previously but was rolled
back due to a bug in how the ByteBuffer constructor argument was stored.
That has been addressed in this change.

PiperOrigin-RevId: 352525809
Change-Id: I4f572aebe7a8afa56e8007f2c08622a3b5390cbb
diff --git a/tensorflow/lite/java/src/main/java/org/tensorflow/lite/NativeInterpreterWrapper.java b/tensorflow/lite/java/src/main/java/org/tensorflow/lite/NativeInterpreterWrapper.java
index cf690df..a23007b 100644
--- a/tensorflow/lite/java/src/main/java/org/tensorflow/lite/NativeInterpreterWrapper.java
+++ b/tensorflow/lite/java/src/main/java/org/tensorflow/lite/NativeInterpreterWrapper.java
@@ -44,19 +44,14 @@
     this(byteBuffer, /* options= */ null);
   }
 
-  NativeInterpreterWrapper(final String modelPath, Interpreter.Options options) {
+  NativeInterpreterWrapper(String modelPath, Interpreter.Options options) {
     TensorFlowLite.init();
-    ModelCreator modelCreator =
-        new ModelCreator() {
-          @Override
-          public long create(long errorHandle) {
-            return createModel(modelPath, errorHandle);
-          }
-        };
-    init(modelCreator, options);
+    long errorHandle = createErrorReporter(ERROR_BUFFER_SIZE);
+    long modelHandle = createModel(modelPath, errorHandle);
+    init(errorHandle, modelHandle, options);
   }
 
-  NativeInterpreterWrapper(final ByteBuffer buffer, Interpreter.Options options) {
+  NativeInterpreterWrapper(ByteBuffer buffer, Interpreter.Options options) {
     TensorFlowLite.init();
     if (buffer == null
         || (!(buffer instanceof MappedByteBuffer)
@@ -66,67 +61,45 @@
               + "ByteBuffer using ByteOrder.nativeOrder() which contains bytes of model content.");
     }
     this.modelByteBuffer = buffer;
-    ModelCreator modelCreator =
-        new ModelCreator() {
-          @Override
-          public long create(long errorHandle) {
-            return createModelWithBuffer(buffer, errorHandle);
-          }
-        };
-    init(modelCreator, options);
+    long errorHandle = createErrorReporter(ERROR_BUFFER_SIZE);
+    long modelHandle = createModelWithBuffer(modelByteBuffer, errorHandle);
+    init(errorHandle, modelHandle, options);
   }
 
-  private interface ModelCreator {
-    public long create(long errorHandle);
-  }
-
-  private void init(ModelCreator modelCreator, Interpreter.Options options) {
+  private void init(long errorHandle, long modelHandle, Interpreter.Options options) {
     if (options == null) {
       options = new Interpreter.Options();
     }
-    // First initialize native handles to zero. If an exception is encountered, we will dispose of
-    // them, and this avoids deleting an uninitialized pointer if handle creation fails.
-    errorHandle = 0;
-    modelHandle = 0;
-    interpreterHandle = 0;
-    try {
-      errorHandle = createErrorReporter(ERROR_BUFFER_SIZE);
-      modelHandle = modelCreator.create(errorHandle);
-      interpreterHandle = createInterpreter(modelHandle, errorHandle, options.numThreads);
-      if (options.allowCancellation != null && options.allowCancellation) {
-        cancellationFlagHandle = createCancellationFlag(interpreterHandle);
-      }
-      inputTensors = new Tensor[getInputCount(interpreterHandle)];
-      outputTensors = new Tensor[getOutputCount(interpreterHandle)];
-      if (options.allowFp16PrecisionForFp32 != null) {
-        allowFp16PrecisionForFp32(
-            interpreterHandle, options.allowFp16PrecisionForFp32.booleanValue());
-      }
-      if (options.allowBufferHandleOutput != null) {
-        allowBufferHandleOutput(interpreterHandle, options.allowBufferHandleOutput.booleanValue());
-      }
-      applyDelegates(options);
-
-      // Simply use "-1" to represent the default mode.
-      int applyXNNPACKMode = -1;
-      if (options.useXNNPACK != null) {
-        applyXNNPACKMode = options.useXNNPACK.booleanValue() ? 1 : 0;
-      }
-
-      // TODO(b/171856982): uncomment the following when applying XNNPACK delegate by default is
-      // enabled for C++ TfLite library on Android platform.
-      if (applyXNNPACKMode == 1 /*|| applyXNNPACKMode == -1*/) {
-        useXNNPACK(interpreterHandle, errorHandle, applyXNNPACKMode, options.numThreads);
-      }
-      allocateTensors(interpreterHandle, errorHandle);
-      isMemoryAllocated = true;
-    } finally {
-      // If any of the native handles were created successfully, we should dispose of them if
-      // creation and allocation did not succeed. This avoids leaks in the event of an error.
-      if (!isMemoryAllocated && (errorHandle != 0 || modelHandle != 0 || interpreterHandle != 0)) {
-        delete(errorHandle, modelHandle, interpreterHandle);
-      }
+    this.errorHandle = errorHandle;
+    this.modelHandle = modelHandle;
+    this.interpreterHandle = createInterpreter(modelHandle, errorHandle, options.numThreads);
+    if (options.allowCancellation != null && options.allowCancellation) {
+      this.cancellationFlagHandle = createCancellationFlag(interpreterHandle);
     }
+    this.inputTensors = new Tensor[getInputCount(interpreterHandle)];
+    this.outputTensors = new Tensor[getOutputCount(interpreterHandle)];
+    if (options.allowFp16PrecisionForFp32 != null) {
+      allowFp16PrecisionForFp32(
+          interpreterHandle, options.allowFp16PrecisionForFp32.booleanValue());
+    }
+    if (options.allowBufferHandleOutput != null) {
+      allowBufferHandleOutput(interpreterHandle, options.allowBufferHandleOutput.booleanValue());
+    }
+    applyDelegates(options);
+
+    // Simply use "-1" to represent the default mode.
+    int applyXNNPACKMode = -1;
+    if (options.useXNNPACK != null) {
+      applyXNNPACKMode = options.useXNNPACK.booleanValue() ? 1 : 0;
+    }
+
+    // TODO(b/171856982): uncomment the following when applying XNNPACK delegate by default is
+    // enabled for C++ TfLite library on Android platform.
+    if (applyXNNPACKMode == 1 /*|| applyXNNPACKMode == -1*/) {
+      useXNNPACK(interpreterHandle, errorHandle, applyXNNPACKMode, options.numThreads);
+    }
+    allocateTensors(interpreterHandle, errorHandle);
+    this.isMemoryAllocated = true;
   }
 
   /** Releases resources associated with this {@code NativeInterpreterWrapper}. */