Update concrete function examples to provide the associated trackable objects

Providing trackable objects is now recommended for the users, who want to use
the newer saved model importer in MLIR.

PiperOrigin-RevId: 386160000
Change-Id: Iae2898f6e059f84f9eff845c9901bd7acf898c11
diff --git a/tensorflow/lite/examples/experimental_new_converter/stack_trace_example.py b/tensorflow/lite/examples/experimental_new_converter/stack_trace_example.py
index 9858264..0638a45 100644
--- a/tensorflow/lite/examples/experimental_new_converter/stack_trace_example.py
+++ b/tensorflow/lite/examples/experimental_new_converter/stack_trace_example.py
@@ -67,7 +67,7 @@
     return y + y
 
   func = model.get_concrete_function()
-  converter = tf.lite.TFLiteConverter.from_concrete_functions([func])
+  converter = tf.lite.TFLiteConverter.from_concrete_functions([func], model)
   converter.convert()
 
 
diff --git a/tensorflow/lite/python/analyzer_test.py b/tensorflow/lite/python/analyzer_test.py
index 902eb30..b2a722d 100644
--- a/tensorflow/lite/python/analyzer_test.py
+++ b/tensorflow/lite/python/analyzer_test.py
@@ -72,7 +72,7 @@
       return x + tf.cos(x)
 
     converter = tf.lite.TFLiteConverter.from_concrete_functions(
-        [func.get_concrete_function()])
+        [func.get_concrete_function()], func)
     fb_model = converter.convert()
     txt = analyzer.ModelAnalyzer.analyze(
         model_content=fb_model, result_format='txt')
@@ -88,7 +88,7 @@
       return x + tf.cos(x)
 
     converter = tf.lite.TFLiteConverter.from_concrete_functions(
-        [func.get_concrete_function()])
+        [func.get_concrete_function()], func)
     fb_model = converter.convert()
     mlir = analyzer.ModelAnalyzer.analyze(
         model_content=fb_model, result_format='mlir')
@@ -108,7 +108,7 @@
       return x + tf.cos(x)
 
     converter = tf.lite.TFLiteConverter.from_concrete_functions(
-        [func.get_concrete_function()])
+        [func.get_concrete_function()], func)
     fb_model = converter.convert()
     html = analyzer.ModelAnalyzer.analyze(
         model_content=fb_model, result_format='html')
diff --git a/tensorflow/lite/python/authoring/authoring.py b/tensorflow/lite/python/authoring/authoring.py
index 3e43ab9..4e55880 100644
--- a/tensorflow/lite/python/authoring/authoring.py
+++ b/tensorflow/lite/python/authoring/authoring.py
@@ -119,9 +119,10 @@
           file=sys.stderr)
 
     if not self._verified:
-      concrete_func = self._get_func().get_concrete_function(*args, **kwargs)
+      model = self._get_func()
+      concrete_func = model.get_concrete_function(*args, **kwargs)
       converter = lite.TFLiteConverterV2.from_concrete_functions(
-          [concrete_func])
+          [concrete_func], model)
       # Set provided converter parameters
       if self._converter_target_spec is not None:
         converter.target_spec = self._converter_target_spec
diff --git a/tensorflow/lite/python/authoring/authoring_test.py b/tensorflow/lite/python/authoring/authoring_test.py
index 4ac7745..2c9ad61 100644
--- a/tensorflow/lite/python/authoring/authoring_test.py
+++ b/tensorflow/lite/python/authoring/authoring_test.py
@@ -148,7 +148,7 @@
 
     # Check if the decorator works with get_concrete_function method.
     converter = tf.lite.TFLiteConverter.from_concrete_functions(
-        [func.get_concrete_function()])
+        [func.get_concrete_function()], func)
     converter.convert()
 
   def test_decorated_class_method_type(self):
@@ -170,7 +170,7 @@
 
     # Check if the decorator works with get_concrete_function method.
     converter = tf.lite.TFLiteConverter.from_concrete_functions(
-        [m.eval.get_concrete_function()])
+        [m.eval.get_concrete_function()], m)
     converter.convert()
 
   def test_simple_cosh_multiple(self):
diff --git a/tensorflow/lite/python/lite.py b/tensorflow/lite/python/lite.py
index 4f12874..9055504 100644
--- a/tensorflow/lite/python/lite.py
+++ b/tensorflow/lite/python/lite.py
@@ -1203,6 +1203,14 @@
       input_tensors: List of input tensors.
       output_tensors: List of output tensors.
     """
+    if len(self._funcs) == 0:  # pylint: disable=g-explicit-length-test
+      raise ValueError("No ConcreteFunction is specified.")
+
+    if len(self._funcs) > 1:
+      raise ValueError("This converter can only convert a single "
+                       "ConcreteFunction. Converting multiple functions is "
+                       "under development.")
+
     func = self._funcs[0]
 
     if not self.experimental_lower_to_saved_model:
@@ -1337,7 +1345,7 @@
     tflite_model = converter.convert()
 
     # Converting ConcreteFunctions to a TensorFlow Lite model.
-    converter = tf.lite.TFLiteConverter.from_concrete_functions([func])
+    converter = tf.lite.TFLiteConverter.from_concrete_functions([func], model)
     tflite_model = converter.convert()
     ```
   """
diff --git a/tensorflow/lite/python/lite_flex_test.py b/tensorflow/lite/python/lite_flex_test.py
index 2e12cad..edfb887 100644
--- a/tensorflow/lite/python/lite_flex_test.py
+++ b/tensorflow/lite/python/lite_flex_test.py
@@ -140,7 +140,8 @@
     concrete_func = root.f.get_concrete_function(input_data)
 
     # Convert model.
-    converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func])
+    converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func],
+                                                               root)
     converter.target_spec.supported_ops = set([lite.OpsSet.SELECT_TF_OPS])
     converter.experimental_new_converter = enable_mlir
     tflite_model = converter.convert()
diff --git a/tensorflow/lite/python/lite_v2_test.py b/tensorflow/lite/python/lite_v2_test.py
index 095d05c..d564523 100644
--- a/tensorflow/lite/python/lite_v2_test.py
+++ b/tensorflow/lite/python/lite_v2_test.py
@@ -69,7 +69,7 @@
   def testTypeInvalid(self):
     root = self._getSimpleVariableModel()
     with self.assertRaises(ValueError) as error:
-      _ = lite.TFLiteConverterV2.from_concrete_functions([root.f])
+      _ = lite.TFLiteConverterV2.from_concrete_functions([root.f], root)
     self.assertIn('call get_concrete_function', str(error.exception))
 
   @parameterized.named_parameters(
@@ -82,7 +82,8 @@
     concrete_func = root.f.get_concrete_function(input_data)
 
     # Convert model.
-    converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func])
+    converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func],
+                                                               root)
     converter.experimental_new_converter = enable_mlir_converter
     tflite_model = converter.convert()
 
@@ -101,7 +102,8 @@
     concrete_func = root.f.get_concrete_function(input_data)
 
     # Convert model.
-    converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func])
+    converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func],
+                                                               root)
     with self.assertRaises(ValueError) as error:
       converter.inference_input_type = inference_input_output_type
       converter.inference_output_type = inference_input_output_type
@@ -117,7 +119,8 @@
     concrete_func = root.f.get_concrete_function(input_data)
 
     # Convert model.
-    converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func])
+    converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func],
+                                                               root)
     tflite_model = converter.convert()
 
     # Check values from converted model.
@@ -133,7 +136,8 @@
     concrete_func = root.add.get_concrete_function(input_data)
 
     # Convert model and ensure model is not None.
-    converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func])
+    converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func],
+                                                               root)
     tflite_model = converter.convert()
 
     # Check values from converted model.
@@ -151,7 +155,7 @@
 
     # Try converting multiple functions.
     converter = lite.TFLiteConverterV2.from_concrete_functions(
-        [add_func, sub_func])
+        [add_func, sub_func], root)
     with self.assertRaises(ValueError) as error:
       _ = converter.convert()
     self.assertIn('can only convert a single ConcreteFunction',
@@ -176,21 +180,23 @@
 
     root.f = func
     to_save = root.f.get_concrete_function()
-    return (to_save, calibration_gen)
+    return (root, to_save, calibration_gen)
 
   @parameterized.named_parameters(
       ('EnableMlirQuantizer', True),  # enable mlir quantizer
       ('DisableMlirQuantizer', False))  # disable mlir quantizer
   def testPostTrainingCalibrateAndQuantize(self, mlir_quantizer):
-    func, calibration_gen = self._getIntegerQuantizeModel()
+    root, func, calibration_gen = self._getIntegerQuantizeModel()
 
     # Convert float model.
-    float_converter = lite.TFLiteConverterV2.from_concrete_functions([func])
+    float_converter = lite.TFLiteConverterV2.from_concrete_functions([func],
+                                                                     root)
     float_tflite_model = float_converter.convert()
     self.assertIsNotNone(float_tflite_model)
 
     # Convert quantized model.
-    quantized_converter = lite.TFLiteConverterV2.from_concrete_functions([func])
+    quantized_converter = lite.TFLiteConverterV2.from_concrete_functions([func],
+                                                                         root)
     quantized_converter.optimizations = [lite.Optimize.DEFAULT]
     quantized_converter.representative_dataset = calibration_gen
     quantized_converter.experimental_new_quantizer = mlir_quantizer
@@ -216,15 +222,16 @@
   @test_util.run_v2_only
   def testInvalidPostTrainingDynamicRangeQuantization(
       self, inference_input_output_type):
-    func, _ = self._getIntegerQuantizeModel()
+    root, func, _ = self._getIntegerQuantizeModel()
 
     # Convert float model.
-    converter = lite.TFLiteConverterV2.from_concrete_functions([func])
+    converter = lite.TFLiteConverterV2.from_concrete_functions([func], root)
     tflite_model = converter.convert()
     self.assertTrue(tflite_model)
 
     # Convert quantized model.
-    quantized_converter = lite.TFLiteConverterV2.from_concrete_functions([func])
+    quantized_converter = lite.TFLiteConverterV2.from_concrete_functions([func],
+                                                                         root)
     quantized_converter.optimizations = [lite.Optimize.DEFAULT]
     with self.assertRaises(ValueError) as error:
       quantized_converter.inference_input_type = inference_input_output_type
@@ -247,15 +254,16 @@
       ('_IntOnly_INT16Quantize_INT16InputOutput', True, True, dtypes.int16))
   def testIntegerQuantization(self, is_int_only, is_int16_quantize,
                               inference_input_output_type):
-    func, calibration_gen = self._getIntegerQuantizeModel()
+    root, func, calibration_gen = self._getIntegerQuantizeModel()
 
     # Convert float model.
-    converter = lite.TFLiteConverterV2.from_concrete_functions([func])
+    converter = lite.TFLiteConverterV2.from_concrete_functions([func], root)
     tflite_model = converter.convert()
     self.assertTrue(tflite_model)
 
     # Convert quantized model.
-    quantized_converter = lite.TFLiteConverterV2.from_concrete_functions([func])
+    quantized_converter = lite.TFLiteConverterV2.from_concrete_functions([func],
+                                                                         root)
     quantized_converter.optimizations = [lite.Optimize.DEFAULT]
     quantized_converter.representative_dataset = calibration_gen
     if is_int_only:
@@ -298,10 +306,11 @@
       ('_INT16Quantize_INT8InputOutput', True, dtypes.int8))
   def testInvalidIntegerQuantization(self, is_int16_quantize,
                                      inference_input_output_type):
-    func, calibration_gen = self._getIntegerQuantizeModel()
+    root, func, calibration_gen = self._getIntegerQuantizeModel()
 
     # Convert quantized model.
-    quantized_converter = lite.TFLiteConverterV2.from_concrete_functions([func])
+    quantized_converter = lite.TFLiteConverterV2.from_concrete_functions([func],
+                                                                         root)
     quantized_converter.optimizations = [lite.Optimize.DEFAULT]
     quantized_converter.representative_dataset = calibration_gen
     if is_int16_quantize:
@@ -319,14 +328,15 @@
         "must be in ['tf.float32', 'tf.int16'].", str(error.exception))
 
   def testCalibrateAndQuantizeBuiltinInt16(self):
-    func, calibration_gen = self._getIntegerQuantizeModel()
+    root, func, calibration_gen = self._getIntegerQuantizeModel()
 
     # Convert float model.
-    float_converter = lite.TFLiteConverterV2.from_concrete_functions([func])
+    float_converter = lite.TFLiteConverterV2.from_concrete_functions([func],
+                                                                     root)
     float_tflite_model = float_converter.convert()
     self.assertIsNotNone(float_tflite_model)
 
-    converter = lite.TFLiteConverterV2.from_concrete_functions([func])
+    converter = lite.TFLiteConverterV2.from_concrete_functions([func], root)
     # TODO(b/156309549): We should add INT16 to the builtin types.
     converter.optimizations = [lite.Optimize.DEFAULT]
     converter.target_spec.supported_ops = [lite.OpsSet.TFLITE_BUILTINS_INT8]
@@ -506,9 +516,10 @@
   @test_util.run_v2_only
   def testNewQuantizer(self):
     """Test the model quantized by the new converter."""
-    func, calibration_gen = self._getIntegerQuantizeModel()
+    root, func, calibration_gen = self._getIntegerQuantizeModel()
 
-    quantized_converter = lite.TFLiteConverterV2.from_concrete_functions([func])
+    quantized_converter = lite.TFLiteConverterV2.from_concrete_functions([func],
+                                                                         root)
     quantized_converter.target_spec.supported_ops = [
         lite.OpsSet.TFLITE_BUILTINS_INT8
     ]
@@ -558,7 +569,8 @@
     concrete_func = root.func.get_concrete_function()
 
     # Convert model.
-    converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func])
+    converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func],
+                                                               root)
     converter.experimental_new_converter = enable_mlir_converter
     tflite_model = converter.convert()
 
@@ -577,7 +589,8 @@
     concrete_func = root.f.get_concrete_function(input_data)
 
     # Convert model.
-    converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func])
+    converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func],
+                                                               root)
     converter.convert()
     self._assertValidDebugInfo(converter._debug_info)
 
@@ -609,7 +622,7 @@
         ]
 
     root.f = func
-    return (root.f.get_concrete_function(), calibration_gen)
+    return (root, root.f.get_concrete_function(), calibration_gen)
 
   @parameterized.named_parameters(
       ('_Default', False, False, dtypes.float32),
@@ -625,10 +638,10 @@
   @test_util.run_v2_only
   def testIntegerQuantizationWithFlexOp(self, is_int_only, is_int16_quantize,
                                         inference_input_output_type):
-    func, calibration_gen = self._getIntegerQuantizationModelWithFlexOp()
+    root, func, calibration_gen = self._getIntegerQuantizationModelWithFlexOp()
 
     quantized_converter = tf.lite.TFLiteConverter.from_concrete_functions(
-        [func])
+        [func], root)
     quantized_converter.optimizations = [lite.Optimize.DEFAULT]
     quantized_converter.representative_dataset = calibration_gen
     if is_int_only:
@@ -697,7 +710,7 @@
         ]
 
     root.f = func
-    return (root.f.get_concrete_function(), calibration_gen)
+    return (root, root.f.get_concrete_function(), calibration_gen)
 
   @parameterized.named_parameters(
       ('_INT8InputOutput', False, False, dtypes.int8),
@@ -714,10 +727,11 @@
                                                 is_int16_quantize,
                                                 inference_input_output_type,
                                                 enable_mlir_quantizer=False):
-    func, calib_gen = self._getIntegerQuantizationModelWithUnsupportedOps()
+    root, func, calib_gen = self._getIntegerQuantizationModelWithUnsupportedOps(
+    )
 
     quantized_converter = tf.lite.TFLiteConverter.from_concrete_functions(
-        [func])
+        [func], root)
     quantized_converter.optimizations = [lite.Optimize.DEFAULT]
     quantized_converter.representative_dataset = calib_gen
     if is_int_only:
@@ -758,12 +772,12 @@
     interpreter.allocate_tensors()
     input_details = interpreter.get_input_details()
     self.assertLen(input_details, 2)
-    self.assertEqual(input_details[0]['dtype'], expected_ceil_dtype)
-    self.assertEqual(input_details[1]['dtype'], expected_dtype)
+    self.assertEqual(input_details[0]['dtype'], expected_dtype)
+    self.assertEqual(input_details[1]['dtype'], expected_ceil_dtype)
     output_details = interpreter.get_output_details()
     self.assertLen(output_details, 2)
-    self.assertEqual(output_details[0]['dtype'], expected_ceil_dtype)
-    self.assertEqual(output_details[1]['dtype'], expected_dtype)
+    self.assertEqual(output_details[0]['dtype'], expected_dtype)
+    self.assertEqual(output_details[1]['dtype'], expected_ceil_dtype)
 
   @parameterized.named_parameters(
       ('_BlocklistedNone', None, None),
@@ -773,8 +787,9 @@
   def testNewQuantizerBlocklistingArgs(self, blocklisted_ops,
                                        blocklisted_nodes):
     """Test the model quantized by the new converter and blocklisted options."""
-    func, calibration_gen = self._getIntegerQuantizeModel()
-    quantized_converter = lite.TFLiteConverterV2.from_concrete_functions([func])
+    root, func, calibration_gen = self._getIntegerQuantizeModel()
+    quantized_converter = lite.TFLiteConverterV2.from_concrete_functions([func],
+                                                                         root)
     quantized_converter.target_spec.supported_ops = [
         lite.OpsSet.TFLITE_BUILTINS_INT8
     ]
@@ -782,6 +797,9 @@
     quantized_converter.optimizations = [lite.Optimize.DEFAULT]
     quantized_converter.experimental_new_quantizer = True
     quantized_converter._experimental_calibrate_only = True
+    if blocklisted_nodes:
+      # TODO(b/191205988): Explicitly disable saved model lowering.
+      quantized_converter.experimental_lower_to_saved_model = False
     calibrated = quantized_converter.convert()
     quantized_tflite_model = mlir_quantize(calibrated,
                                            blocklisted_ops=blocklisted_ops,
@@ -803,9 +821,10 @@
   @test_util.run_v2_only
   def testNewQuantizerNumericVerificationDebugMode(self, whole_model_verify):
     """Test the model quantized by the new converter with numeric verify ops."""
-    func, calibration_gen = self._getIntegerQuantizeModel()
+    root, func, calibration_gen = self._getIntegerQuantizeModel()
 
-    quantized_converter = lite.TFLiteConverterV2.from_concrete_functions([func])
+    quantized_converter = lite.TFLiteConverterV2.from_concrete_functions([func],
+                                                                         root)
     quantized_converter.target_spec.supported_ops = [
         lite.OpsSet.TFLITE_BUILTINS_INT8
     ]
@@ -891,9 +910,9 @@
                                         enable_mlir_quantizer=False):
     k_conv_name = 'Conv2D1'
     k_num_filters = 16
-    func, calib_gen = self._getIntegerQuantizeModel()
+    root, func, calib_gen = self._getIntegerQuantizeModel()
     quantized_converter = tf.lite.TFLiteConverter.from_concrete_functions(
-        [func])
+        [func], root)
     quantized_converter.optimizations = [lite.Optimize.DEFAULT]
     quantized_converter.representative_dataset = calib_gen
     quantized_converter.target_spec.supported_ops = [
@@ -930,7 +949,8 @@
       return resize1 + resize2
 
     concrete_func = custom_resize.get_concrete_function()
-    converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func])
+    converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func],
+                                                               custom_resize)
     tflite_model = converter.convert()
     model_object = schema_fb.Model.GetRootAsModel(tflite_model, 0)
     model = schema_fb.ModelT.InitFromObj(model_object)
@@ -1890,7 +1910,8 @@
     concrete_func = model.get_concrete_function()
 
     # Convert model.
-    converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func])
+    converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func],
+                                                               model)
     tflite_model = converter.convert()
 
     # Check values from converted model.
@@ -1928,7 +1949,8 @@
     concrete_func = model.get_concrete_function()
 
     # Convert model.
-    converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func])
+    converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func],
+                                                               model)
     tflite_model = converter.convert()
 
     # Check values from converted model.
@@ -1957,7 +1979,8 @@
     concrete_func = model.get_concrete_function()
 
     # Convert model.
-    converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func])
+    converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func],
+                                                               model)
     tflite_model = converter.convert()
 
     # Check values from converted model.
@@ -1980,7 +2003,8 @@
     concrete_func = model.get_concrete_function()
 
     # Convert model.
-    converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func])
+    converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func],
+                                                               model)
     tflite_model = converter.convert()
 
     # Check values from converted model.
@@ -2096,7 +2120,8 @@
     concrete_func = root.f.get_concrete_function(input_data)
 
     # Convert model.
-    converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func])
+    converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func],
+                                                               root)
     tflite_model = converter.convert()
 
     # Check values from converted model.
@@ -2127,7 +2152,8 @@
 
     concrete_func = model.get_concrete_function()
 
-    converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func])
+    converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func],
+                                                               model)
     tflite_model = converter.convert()
 
     # Check values from converted model.
@@ -2165,17 +2191,17 @@
         for _ in range(5):
           yield [np.random.uniform(-1, 1, size=(batch, 33)).astype(np.float32)]
 
-    return concrete_func, calibration_gen
+    return root, concrete_func, calibration_gen
 
   @test_util.run_v2_only
   def testMatMulQuantize(self):
-    concrete_func, _ = self._getIntegerQuantizeModelWithUnknownShapes()
+    root, concrete_func, _ = self._getIntegerQuantizeModelWithUnknownShapes()
     float_converter = lite.TFLiteConverterV2.from_concrete_functions(
-        [concrete_func])
+        [concrete_func], root)
     float_tflite_model = float_converter.convert()
 
     quantized_converter = lite.TFLiteConverterV2.from_concrete_functions(
-        [concrete_func])
+        [concrete_func], root)
     quantized_converter.optimizations = [lite.Optimize.DEFAULT]
     quantized_tflite_model = quantized_converter.convert()
 
@@ -2192,14 +2218,14 @@
 
   @test_util.run_v2_only
   def testMatMulCalibrateAndQuantize(self):
-    concrete_func, calibration_gen = (
+    root, concrete_func, calibration_gen = (
         self._getIntegerQuantizeModelWithUnknownShapes())
     float_converter = lite.TFLiteConverterV2.from_concrete_functions(
-        [concrete_func])
+        [concrete_func], root)
     float_tflite_model = float_converter.convert()
 
     quantized_converter = lite.TFLiteConverterV2.from_concrete_functions(
-        [concrete_func])
+        [concrete_func], root)
     quantized_converter.optimizations = [lite.Optimize.DEFAULT]
     quantized_converter.representative_dataset = calibration_gen
     quantized_tflite_model = quantized_converter.convert()
@@ -2230,7 +2256,8 @@
 
     concrete_func = model.get_concrete_function()
 
-    converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func])
+    converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func],
+                                                               model)
     tflite_model = converter.convert()
 
     # Check values from converted model.
@@ -2252,7 +2279,8 @@
 
     # Test invalid shape. None after 1st dimension. Run with TOCO in order to
     # invoke shape checking code.
-    converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func])
+    converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func],
+                                                               model)
     converter.experimental_new_converter = False
     with self.assertRaises(ValueError) as error:
       converter.convert()
@@ -2279,7 +2307,8 @@
 
     concrete_func = model.get_concrete_function()
 
-    converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func])
+    converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func],
+                                                               model)
     converter.target_spec.supported_ops = [
         tf.lite.OpsSet.TFLITE_BUILTINS, tf.lite.OpsSet.SELECT_TF_OPS
     ]
@@ -2799,7 +2828,8 @@
 
     input_data = np.array(2.0, np.float32)
     concrete_func = f.get_concrete_function(input_data)
-    converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func])
+    converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func],
+                                                               f)
     tflite_model = converter.convert()
     interpreter = Interpreter(
         model_content=tflite_model,
@@ -2852,7 +2882,8 @@
       return output
 
     concrete_func = model.get_concrete_function()
-    converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func])
+    converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func],
+                                                               model)
     converter.target_spec.supported_ops = [
         tf.lite.OpsSet.TFLITE_BUILTINS, tf.lite.OpsSet.SELECT_TF_OPS
     ]
diff --git a/tensorflow/lite/python/metrics_nonportable_test.py b/tensorflow/lite/python/metrics_nonportable_test.py
index 1312b0e..131c705 100644
--- a/tensorflow/lite/python/metrics_nonportable_test.py
+++ b/tensorflow/lite/python/metrics_nonportable_test.py
@@ -197,12 +197,13 @@
 
     root.f = func
     to_save = root.f.get_concrete_function()
-    return (to_save, calibration_gen)
+    return (root, to_save, calibration_gen)
 
   def test_conversion_from_frozen_graph_v2(self):
-    func, calibration_gen = self._getIntegerQuantizeModel()
+    model, func, calibration_gen = self._getIntegerQuantizeModel()
 
-    quantized_converter = lite.TFLiteConverterV2.from_concrete_functions([func])
+    quantized_converter = lite.TFLiteConverterV2.from_concrete_functions([func],
+                                                                         model)
     mock_metrics = mock.create_autospec(
         metrics.TFLiteConverterMetrics, instance=True)
     quantized_converter._tflite_metrics = mock_metrics
diff --git a/tensorflow/lite/python/metrics_wrapper/metrics_wrapper_test.py b/tensorflow/lite/python/metrics_wrapper/metrics_wrapper_test.py
index a7166d9..41a0b9b 100644
--- a/tensorflow/lite/python/metrics_wrapper/metrics_wrapper_test.py
+++ b/tensorflow/lite/python/metrics_wrapper/metrics_wrapper_test.py
@@ -37,7 +37,7 @@
       return tf.cosh(x)
 
     converter = lite.TFLiteConverterV2.from_concrete_functions(
-        [func.get_concrete_function()])
+        [func.get_concrete_function()], func)
     try:
       converter.convert()
     except ConverterError as err:
diff --git a/tensorflow/lite/tools/optimize/debugging/python/debugger_test.py b/tensorflow/lite/tools/optimize/debugging/python/debugger_test.py
index f9573b7..e135c9c 100644
--- a/tensorflow/lite/tools/optimize/debugging/python/debugger_test.py
+++ b/tensorflow/lite/tools/optimize/debugging/python/debugger_test.py
@@ -54,7 +54,7 @@
 
   root.f = func
   to_save = root.f.get_concrete_function()
-  return to_save
+  return (root, to_save)
 
 
 def _calibration_gen():
@@ -62,18 +62,23 @@
     yield [np.arange(9).reshape((1, 3, 3, 1)).astype(np.float32) * i]
 
 
-def _convert_model(func):
+def _convert_model(model, func):
   """Converts TF model to TFLite float model."""
-  converter = lite.TFLiteConverterV2.from_concrete_functions([func])
+  converter = lite.TFLiteConverterV2.from_concrete_functions([func], model)
+  # TODO(b/191205988): Explicitly disable saved model lowering in conversion.
+  converter.experimental_lower_to_saved_model = False
   return converter.convert()
 
 
-def _quantize_converter(func, calibration_gen, debug=True):
+def _quantize_converter(model, func, calibration_gen, debug=True):
   """Returns a converter appropriate for the function and debug configs."""
-  converter = lite.TFLiteConverterV2.from_concrete_functions([func])
+  converter = lite.TFLiteConverterV2.from_concrete_functions([func], model)
   converter.target_spec.supported_ops = [lite.OpsSet.TFLITE_BUILTINS_INT8]
   converter.representative_dataset = calibration_gen
 
+  # TODO(b/191205988): Explicitly disable saved model lowering in conversion.
+  converter.experimental_lower_to_saved_model = False
+
   # Create a TFLite model with new quantizer and numeric verify ops.
   converter.optimizations = [lite.Optimize.DEFAULT]
   converter.experimental_new_quantizer = True
@@ -82,9 +87,13 @@
   return converter
 
 
-def _quantize_model(func, calibration_gen, quantized_io=False, debug=True):
+def _quantize_model(model,
+                    func,
+                    calibration_gen,
+                    quantized_io=False,
+                    debug=True):
   """Quantizes model, in debug or normal mode."""
-  converter = _quantize_converter(func, calibration_gen, debug)
+  converter = _quantize_converter(model, func, calibration_gen, debug)
   if debug:
     calibrated = converter.convert()
     return convert.mlir_quantize(
@@ -135,12 +144,12 @@
   @classmethod
   def setUpClass(cls):
     super().setUpClass()
-    cls.tf_model = _get_model()
-    cls.float_model = _convert_model(cls.tf_model)
+    cls.tf_model_root, cls.tf_model = _get_model()
+    cls.float_model = _convert_model(cls.tf_model_root, cls.tf_model)
     cls.debug_model_float = _quantize_model(
-        cls.tf_model, _calibration_gen, quantized_io=False)
+        cls.tf_model_root, cls.tf_model, _calibration_gen, quantized_io=False)
     cls.debug_model_int8 = _quantize_model(
-        cls.tf_model, _calibration_gen, quantized_io=True)
+        cls.tf_model_root, cls.tf_model, _calibration_gen, quantized_io=True)
 
   @parameterized.named_parameters(
       ('float_io', False, False),
@@ -164,7 +173,8 @@
     else:
       options.fully_quantize = quantized_io
       quant_debugger = debugger.QuantizationDebugger(
-          converter=_quantize_converter(self.tf_model, _calibration_gen),
+          converter=_quantize_converter(self.tf_model_root, self.tf_model,
+                                        _calibration_gen),
           debug_dataset=_calibration_gen,
           debug_options=options)
 
@@ -288,7 +298,10 @@
   @test_util.run_v2_only
   def test_non_debug_model_raises_ValueError(self):
     normal_quant_model = _quantize_model(
-        QuantizationDebuggerTest.tf_model, _calibration_gen, debug=False)
+        QuantizationDebuggerTest.tf_model_root,
+        QuantizationDebuggerTest.tf_model,
+        _calibration_gen,
+        debug=False)
 
     with self.assertRaisesRegex(
         ValueError, 'Please check if the quantized model is in debug mode'):
@@ -331,7 +344,8 @@
         layer_debug_metrics={'l1_norm': lambda diffs: np.mean(np.abs(diffs))})
     options.fully_quantize = quantized_io
     quant_debugger = debugger.QuantizationDebugger(
-        converter=_quantize_converter(self.tf_model, _calibration_gen),
+        converter=_quantize_converter(self.tf_model_root, self.tf_model,
+                                      _calibration_gen),
         debug_dataset=_calibration_gen,
         debug_options=options)
     options.denylisted_ops = ['CONV_2D']
@@ -349,7 +363,8 @@
     options.fully_quantize = quantized_io
     options.fully_quantize = quantized_io
     quant_debugger = debugger.QuantizationDebugger(
-        converter=_quantize_converter(self.tf_model, _calibration_gen),
+        converter=_quantize_converter(self.tf_model_root, self.tf_model,
+                                      _calibration_gen),
         debug_dataset=_calibration_gen,
         debug_options=options)
     options.denylisted_nodes = ['Identity']