Automated rollback of commit 37123e9e82bf34002b656753970fde832c2708af

PiperOrigin-RevId: 295709352
Change-Id: I57f6223335cec07ddb701fa369be31452a72c34d
diff --git a/tensorflow/lite/micro/micro_interpreter.cc b/tensorflow/lite/micro/micro_interpreter.cc
index 31e7569..2326c2d 100644
--- a/tensorflow/lite/micro/micro_interpreter.cc
+++ b/tensorflow/lite/micro/micro_interpreter.cc
@@ -52,8 +52,7 @@
       error_reporter_(error_reporter),
       allocator_(&context_, model_, tensor_arena, tensor_arena_size,
                  error_reporter_),
-      tensors_allocated_(false),
-      tensors_prepared_(false) {
+      tensors_allocated_(false) {
   const flatbuffers::Vector<flatbuffers::Offset<SubGraph>>* subgraphs =
       model->subgraphs();
   if (subgraphs->size() != 1) {
@@ -86,6 +85,21 @@
   initialization_status_ = kTfLiteOk;
 }
 
+MicroInterpreter::~MicroInterpreter() {
+  if (node_and_registrations_ != nullptr) {
+    for (size_t i = 0; i < operators_->size(); ++i) {
+      TfLiteNode* node = &(node_and_registrations_[i].node);
+      const TfLiteRegistration* registration =
+          node_and_registrations_[i].registration;
+      // registration is allocated outside the interpreter, so double check to
+      // make sure it's not nullptr;
+      if (registration != nullptr && registration->free != nullptr) {
+        registration->free(&context_, node->user_data);
+      }
+    }
+  }
+}
+
 void MicroInterpreter::CorrectTensorEndianness(TfLiteTensor* tensorCorr) {
   int32_t tensorSize = 1;
   for (int d = 0; d < tensorCorr->dims->size; ++d)
@@ -128,8 +142,41 @@
                                    op_resolver_, &node_and_registrations_));
   TF_LITE_ENSURE_OK(&context_, allocator_.FinishTensorAllocation());
 
-  tensors_allocated_ = true;
-  return kTfLiteOk;
+  // Init method is not yet implemented.
+  for (size_t i = 0; i < operators_->size(); ++i) {
+    auto* node = &(node_and_registrations_[i].node);
+    auto* registration = node_and_registrations_[i].registration;
+    size_t init_data_size;
+    const char* init_data;
+    if (registration->builtin_code == BuiltinOperator_CUSTOM) {
+      init_data = reinterpret_cast<const char*>(node->custom_initial_data);
+      init_data_size = node->custom_initial_data_size;
+    } else {
+      init_data = reinterpret_cast<const char*>(node->builtin_data);
+      init_data_size = 0;
+    }
+    if (registration->init) {
+      node->user_data =
+          registration->init(&context_, init_data, init_data_size);
+    }
+  }
+
+    for (size_t i = 0; i < operators_->size(); ++i) {
+      auto* node = &(node_and_registrations_[i].node);
+      auto* registration = node_and_registrations_[i].registration;
+      if (registration->prepare) {
+        TfLiteStatus prepare_status = registration->prepare(&context_, node);
+        if (prepare_status != kTfLiteOk) {
+          error_reporter_->Report(
+              "Node %s (number %d) failed to prepare with status %d",
+              OpNameFromRegistration(registration), i, prepare_status);
+          return kTfLiteError;
+        }
+      }
+    }
+
+    tensors_allocated_ = true;
+    return kTfLiteOk;
 }
 
 TfLiteStatus MicroInterpreter::Invoke() {
@@ -144,45 +191,6 @@
     AllocateTensors();
   }
 
-  // Init method is not yet implemented.
-  for (size_t i = 0; i < operators_->size(); ++i) {
-    auto* node = &(node_and_registrations_[i].node);
-    auto* registration = node_and_registrations_[i].registration;
-    size_t init_data_size;
-    const char* init_data;
-    if (registration->builtin_code == BuiltinOperator_CUSTOM) {
-      init_data = reinterpret_cast<const char*>(node->custom_initial_data);
-      init_data_size = node->custom_initial_data_size;
-    } else {
-      init_data = reinterpret_cast<const char*>(node->builtin_data);
-      init_data_size = 0;
-    }
-    if (!tensors_prepared_ && registration->init) {
-      node->user_data =
-          registration->init(&context_, init_data, init_data_size);
-    }
-  }
-
-  if (!tensors_prepared_) {
-    for (size_t i = 0; i < operators_->size(); ++i) {
-      auto* node = &(node_and_registrations_[i].node);
-      auto* registration = node_and_registrations_[i].registration;
-      if (registration->prepare) {
-        TfLiteStatus prepare_status = registration->prepare(&context_, node);
-        if (prepare_status != kTfLiteOk) {
-          error_reporter_->Report(
-              "Node %s (number %d) failed to prepare with status %d",
-              OpNameFromRegistration(registration), i, prepare_status);
-          return kTfLiteError;
-        }
-      }
-    }
-#ifdef TF_LITE_MICRO_TENSORS_PREPARED
-    // TODO(b/148085107): Turn this value on by default.
-    tensors_prepared_ = true;
-#endif
-  }
-
   for (size_t i = 0; i < operators_->size(); ++i) {
     auto* node = &(node_and_registrations_[i].node);
     auto* registration = node_and_registrations_[i].registration;
@@ -197,16 +205,6 @@
       }
     }
   }
-
-  // This is actually a no-op.
-  // TODO(wangtz): Consider removing this code to slightly reduce binary size.
-  for (size_t i = 0; i < operators_->size(); ++i) {
-    auto* node = &(node_and_registrations_[i].node);
-    auto* registration = node_and_registrations_[i].registration;
-    if (registration->free) {
-      registration->free(&context_, node->user_data);
-    }
-  }
   return kTfLiteOk;
 }
 
diff --git a/tensorflow/lite/micro/micro_interpreter.h b/tensorflow/lite/micro/micro_interpreter.h
index 941960a..4d02769 100644
--- a/tensorflow/lite/micro/micro_interpreter.h
+++ b/tensorflow/lite/micro/micro_interpreter.h
@@ -39,6 +39,8 @@
                    uint8_t* tensor_arena, size_t tensor_arena_size,
                    ErrorReporter* error_reporter);
 
+  ~MicroInterpreter();
+
   // Runs through the model and allocates all necessary input, output and
   // intermediate tensors.
   TfLiteStatus AllocateTensors();
@@ -109,7 +111,7 @@
   template <class T>
   void CorrectTensorDataEndianness(T* data, int32_t size);
 
-  NodeAndRegistration* node_and_registrations_;
+  NodeAndRegistration* node_and_registrations_ = nullptr;
 
   const Model* model_;
   const OpResolver& op_resolver_;
@@ -117,7 +119,6 @@
   TfLiteContext context_ = {};
   MicroAllocator allocator_;
   bool tensors_allocated_;
-  bool tensors_prepared_;
 
   TfLiteStatus initialization_status_;
   const flatbuffers::Vector<flatbuffers::Offset<Tensor>>* tensors_;
diff --git a/tensorflow/lite/micro/micro_interpreter_test.cc b/tensorflow/lite/micro/micro_interpreter_test.cc
index 6d0deca..5ca2c3a 100644
--- a/tensorflow/lite/micro/micro_interpreter_test.cc
+++ b/tensorflow/lite/micro/micro_interpreter_test.cc
@@ -22,6 +22,7 @@
 
 namespace tflite {
 namespace {
+
 void* MockInit(TfLiteContext* context, const char* buffer, size_t length) {
   // We don't support delegate in TFL micro. This is a weak check to test if
   // context struct being zero-initialized.
@@ -31,9 +32,8 @@
   return nullptr;
 }
 
-void MockFree(TfLiteContext* context, void* buffer) {
-  // Do nothing.
-}
+bool freed = false;
+void MockFree(TfLiteContext* context, void* buffer) { freed = true; }
 
 TfLiteStatus MockPrepare(TfLiteContext* context, TfLiteNode* node) {
   return kTfLiteOk;
@@ -75,49 +75,56 @@
 TF_LITE_MICRO_TESTS_BEGIN
 
 TF_LITE_MICRO_TEST(TestInterpreter) {
+  tflite::freed = false;
   const tflite::Model* model = tflite::testing::GetSimpleMockModel();
   TF_LITE_MICRO_EXPECT_NE(nullptr, model);
   tflite::MockOpResolver mock_resolver;
   constexpr size_t allocator_buffer_size = 1024;
   uint8_t allocator_buffer[allocator_buffer_size];
-  tflite::MicroInterpreter interpreter(model, mock_resolver, allocator_buffer,
-                                       allocator_buffer_size,
-                                       micro_test::reporter);
-  TF_LITE_MICRO_EXPECT_EQ(interpreter.AllocateTensors(), kTfLiteOk);
-  TF_LITE_MICRO_EXPECT_EQ(1, interpreter.inputs_size());
-  TF_LITE_MICRO_EXPECT_EQ(2, interpreter.outputs_size());
 
-  TfLiteTensor* input = interpreter.input(0);
-  TF_LITE_MICRO_EXPECT_NE(nullptr, input);
-  TF_LITE_MICRO_EXPECT_EQ(kTfLiteInt32, input->type);
-  TF_LITE_MICRO_EXPECT_EQ(1, input->dims->size);
-  TF_LITE_MICRO_EXPECT_EQ(1, input->dims->data[0]);
-  TF_LITE_MICRO_EXPECT_EQ(4, input->bytes);
-  TF_LITE_MICRO_EXPECT_NE(nullptr, input->data.i32);
-  input->data.i32[0] = 21;
+  // Create a new scope so that we can test the destructor.
+  {
+    tflite::MicroInterpreter interpreter(model, mock_resolver, allocator_buffer,
+                                         allocator_buffer_size,
+                                         micro_test::reporter);
+    TF_LITE_MICRO_EXPECT_EQ(interpreter.AllocateTensors(), kTfLiteOk);
+    TF_LITE_MICRO_EXPECT_EQ(1, interpreter.inputs_size());
+    TF_LITE_MICRO_EXPECT_EQ(2, interpreter.outputs_size());
 
-  TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, interpreter.Invoke());
+    TfLiteTensor* input = interpreter.input(0);
+    TF_LITE_MICRO_EXPECT_NE(nullptr, input);
+    TF_LITE_MICRO_EXPECT_EQ(kTfLiteInt32, input->type);
+    TF_LITE_MICRO_EXPECT_EQ(1, input->dims->size);
+    TF_LITE_MICRO_EXPECT_EQ(1, input->dims->data[0]);
+    TF_LITE_MICRO_EXPECT_EQ(4, input->bytes);
+    TF_LITE_MICRO_EXPECT_NE(nullptr, input->data.i32);
+    input->data.i32[0] = 21;
 
-  TfLiteTensor* output = interpreter.output(0);
-  TF_LITE_MICRO_EXPECT_NE(nullptr, output);
-  TF_LITE_MICRO_EXPECT_EQ(kTfLiteInt32, output->type);
-  TF_LITE_MICRO_EXPECT_EQ(1, output->dims->size);
-  TF_LITE_MICRO_EXPECT_EQ(1, output->dims->data[0]);
-  TF_LITE_MICRO_EXPECT_EQ(4, output->bytes);
-  TF_LITE_MICRO_EXPECT_NE(nullptr, output->data.i32);
-  TF_LITE_MICRO_EXPECT_EQ(42, output->data.i32[0]);
+    TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, interpreter.Invoke());
 
-  output = interpreter.output(1);
-  TF_LITE_MICRO_EXPECT_NE(nullptr, output);
-  TF_LITE_MICRO_EXPECT_EQ(kTfLiteInt32, output->type);
-  TF_LITE_MICRO_EXPECT_EQ(1, output->dims->size);
-  TF_LITE_MICRO_EXPECT_EQ(1, output->dims->data[0]);
-  TF_LITE_MICRO_EXPECT_EQ(4, output->bytes);
-  TF_LITE_MICRO_EXPECT_NE(nullptr, output->data.i32);
-  TF_LITE_MICRO_EXPECT_EQ(42, output->data.i32[0]);
+    TfLiteTensor* output = interpreter.output(0);
+    TF_LITE_MICRO_EXPECT_NE(nullptr, output);
+    TF_LITE_MICRO_EXPECT_EQ(kTfLiteInt32, output->type);
+    TF_LITE_MICRO_EXPECT_EQ(1, output->dims->size);
+    TF_LITE_MICRO_EXPECT_EQ(1, output->dims->data[0]);
+    TF_LITE_MICRO_EXPECT_EQ(4, output->bytes);
+    TF_LITE_MICRO_EXPECT_NE(nullptr, output->data.i32);
+    TF_LITE_MICRO_EXPECT_EQ(42, output->data.i32[0]);
 
-  // Just to make sure that this method works.
-  tflite::PrintInterpreterState(&interpreter);
+    output = interpreter.output(1);
+    TF_LITE_MICRO_EXPECT_NE(nullptr, output);
+    TF_LITE_MICRO_EXPECT_EQ(kTfLiteInt32, output->type);
+    TF_LITE_MICRO_EXPECT_EQ(1, output->dims->size);
+    TF_LITE_MICRO_EXPECT_EQ(1, output->dims->data[0]);
+    TF_LITE_MICRO_EXPECT_EQ(4, output->bytes);
+    TF_LITE_MICRO_EXPECT_NE(nullptr, output->data.i32);
+    TF_LITE_MICRO_EXPECT_EQ(42, output->data.i32[0]);
+
+    // Just to make sure that this method works.
+    tflite::PrintInterpreterState(&interpreter);
+  }
+
+  TF_LITE_MICRO_EXPECT_EQ(tflite::freed, true);
 }
 
 TF_LITE_MICRO_TEST(TestVariableTensorReset) {