Changed TOSA common legalizations to use llvm::Optional<>

- Changed TOSA common legalizations to use llvm::Optional<> with Value
  ValueRange, as appopriate
- Removed legalizations return type macros
- Added an affirmative check for the "NONE" type in
  convertFusedActivation() to avoid an ambiguous meaning for the
  nullptr return value when the return type was an Operator *
- Changed auto type to Value and RankedTensorType, where possible
- Used succinct constructor for SmallVector with an initial value,
  where possible

Signed-off-by: Jared Smolens <jared.smolens@arm.com>
diff --git a/tensorflow/compiler/mlir/tosa/transforms/legalize_common.cc b/tensorflow/compiler/mlir/tosa/transforms/legalize_common.cc
index 7cd3e16..084efbf 100644
--- a/tensorflow/compiler/mlir/tosa/transforms/legalize_common.cc
+++ b/tensorflow/compiler/mlir/tosa/transforms/legalize_common.cc
@@ -21,13 +21,11 @@
 // llvm::Optional value after each call.
 
 #include "tensorflow/compiler/mlir/tosa/transforms/legalize_common.h"
-
 #include <climits>
 #include <cstddef>
 #include <cstdint>
 #include <iterator>
 #include <numeric>
-
 #include "tensorflow/compiler/mlir/tosa/transforms/legalize_utils.h"
 
 namespace mlir {
@@ -62,7 +60,8 @@
   // Sanity check 1: make sure all input tensors have the same shape
   // if input[0] has shape [A, B, C], input[1] to input[N-1] should also have
   // shape[A, B, C]
-  auto result_type = result_value.getType().dyn_cast<RankedTensorType>();
+  RankedTensorType result_type =
+      result_value.getType().dyn_cast<RankedTensorType>();
 
   // Check for ranked tensor type.
   if (!result_type) {
@@ -73,13 +72,14 @@
   // Valid axis in TF is [-rank(input), rank(input))
   // Valid axis in TOSA is [0, rank(input))
   // Plus rank(input) once if axis is negative.
-  auto input_type = op->getOperand(0).getType().dyn_cast<RankedTensorType>();
+  RankedTensorType input_type =
+      op->getOperand(0).getType().dyn_cast<RankedTensorType>();
   if (!input_type) {
     op->emitOpError("PackOp: input type not ranked tensor");
     return llvm::None;
   }
 
-  auto input_rank = input_type.getShape().size();
+  int32_t input_rank = input_type.getShape().size();
   if (axis < 0) axis += input_rank;
 
   input_type = inputs[0].getType().dyn_cast<RankedTensorType>();
@@ -115,7 +115,7 @@
   // performing concat.
   if (input_tensor_rank == 0) {
     SmallVector<int64_t, 8> reshape_rank1_size1_shape{1};
-    auto reshape_rank1_size1_type =
+    RankedTensorType reshape_rank1_size1_type =
         RankedTensorType::get(ArrayRef<int64_t>(reshape_rank1_size1_shape),
                               result_type.getElementType());
     ArrayAttr shape_rank1_size1_attr =
@@ -207,7 +207,7 @@
   }
 
   concat_output_shape[concat_axis] = orig_input_dim_on_axis * 2;
-  auto concat_type = RankedTensorType::get(
+  RankedTensorType concat_type = RankedTensorType::get(
       ArrayRef<int64_t>(concat_output_shape), result_type.getElementType());
   auto a1_concat_op = rewriter.create<tosa::ConcatOp>(
       op->getLoc(), concat_type, inputs[0], inputs[1], concat_axis_attr);
@@ -228,7 +228,7 @@
   if (input_tensor_rank == 0) return a1_concat_op.getResult();
 
   // Reshape [N * A, B, C] to [N, A, B, C].
-  auto reshape_output_type = RankedTensorType::get(
+  RankedTensorType reshape_output_type = RankedTensorType::get(
       ArrayRef<int64_t>(reshape_output_shape), result_type.getElementType());
 
   auto a2_reshape_op = rewriter.create<tosa::ReshapeOp>(
@@ -237,7 +237,7 @@
   // If axis is equal to input tensor rank, then we need extra transpose
   // [N, A, B, C] to [A, B, C, N]
   if (axis == input_tensor_rank) {
-    auto a3_transpose_perm =
+    Value a3_transpose_perm =
         get1DConstTensor<tosa::ConstOp, int32_t>(rewriter, op, perm);
 
     return rewriter
@@ -253,7 +253,8 @@
 llvm::Optional<ValueRange> convertUnpackOp(PatternRewriter& rewriter,
                                            Operation* op, Value input_value,
                                            int32_t axis) {
-  auto input_type = input_value.getType().dyn_cast<RankedTensorType>();
+  RankedTensorType input_type =
+      input_value.getType().dyn_cast<RankedTensorType>();
   if (!input_type) return llvm::None;
 
   auto input_shape = input_type.getShape();
@@ -281,7 +282,7 @@
       perm_vec.push_back(i);
     }
 
-    auto a1_transpose_perm =
+    Value a1_transpose_perm =
         get1DConstTensor<tosa::ConstOp, int32_t>(rewriter, op, perm_vec);
 
     for (int i = 0; i < input_rank; i++) {
@@ -301,7 +302,7 @@
   }
 
   // Step 2: slice [N, A, B, C] into N [A, B, C].
-  auto transposed_input_type =
+  RankedTensorType transposed_input_type =
       transposed_input_value.getType().dyn_cast<RankedTensorType>();
   if (!transposed_input_type) return llvm::None;
 
@@ -355,10 +356,12 @@
 llvm::Optional<Value> convertSelectOp(PatternRewriter& rewriter, Operation* op,
                                       Value result_value, Value condition_value,
                                       Value x_value, Value y_value) {
-  auto result_type = result_value.getType().dyn_cast<RankedTensorType>();
-  auto condition_type = condition_value.getType().dyn_cast<RankedTensorType>();
-  auto x_type = x_value.getType().dyn_cast<RankedTensorType>();
-  auto y_type = y_value.getType().dyn_cast<RankedTensorType>();
+  RankedTensorType result_type =
+      result_value.getType().dyn_cast<RankedTensorType>();
+  RankedTensorType condition_type =
+      condition_value.getType().dyn_cast<RankedTensorType>();
+  RankedTensorType x_type = x_value.getType().dyn_cast<RankedTensorType>();
+  RankedTensorType y_type = y_value.getType().dyn_cast<RankedTensorType>();
 
   if (!result_type || !condition_type || !x_type || !y_type) {
     op->emitOpError("Select: failed ranked tensor type check");
@@ -376,10 +379,8 @@
   }
 
   // Need to reshape the condition.
-  SmallVector<int64_t, 8> new_cond_dims;
-  for (int i = 0; i < (result_type.getRank() - condition_type.getRank()); i++) {
-    new_cond_dims.push_back(1);
-  }
+  SmallVector<int64_t, 8> new_cond_dims(
+      result_type.getRank() - condition_type.getRank(), 1);
 
   for (int i = 0; i < condition_type.getRank(); i++) {
     new_cond_dims.push_back(condition_type.getShape()[i]);
@@ -402,13 +403,13 @@
 llvm::Optional<Value> convertZerosLikeOp(PatternRewriter& rewriter,
                                          Operation* op, Value result,
                                          Value input) {
-  auto result_type = result.getType().dyn_cast<RankedTensorType>();
+  RankedTensorType result_type = result.getType().dyn_cast<RankedTensorType>();
   if (!result_type) {
     op->emitOpError("Zeroslike: result not ranked tensor type");
     return llvm::None;
   }
 
-  auto input_type = input.getType().dyn_cast<RankedTensorType>();
+  RankedTensorType input_type = input.getType().dyn_cast<RankedTensorType>();
   if (!input_type) {
     op->emitOpError("Zeroslike: input not ranked tensor type");
     return llvm::None;
@@ -432,9 +433,12 @@
                                         Operation* op, Value output_val,
                                         Value input_lhs_val,
                                         Value input_rhs_val) {
-  auto input_lhs_type = input_lhs_val.getType().dyn_cast<RankedTensorType>();
-  auto input_rhs_type = input_rhs_val.getType().dyn_cast<RankedTensorType>();
-  auto output_type = output_val.getType().dyn_cast<RankedTensorType>();
+  RankedTensorType input_lhs_type =
+      input_lhs_val.getType().dyn_cast<RankedTensorType>();
+  RankedTensorType input_rhs_type =
+      input_rhs_val.getType().dyn_cast<RankedTensorType>();
+  RankedTensorType output_type =
+      output_val.getType().dyn_cast<RankedTensorType>();
   // Not a ranked tensor output
   if (!input_lhs_type || !input_rhs_type || !output_type) return llvm::None;
 
@@ -455,7 +459,7 @@
 
   Value output;
   if (output_is_qtype) {
-    auto rescale_type =
+    RankedTensorType rescale_type =
         RankedTensorType::get(output_type.getShape(), rewriter.getI32Type());
     auto input_lhs_qtype = input_lhs_type.getElementType()
                                .cast<mlir::quant::UniformQuantizedType>();
@@ -469,9 +473,9 @@
 
     double output_rescale_scale = in_lhs_scale * in_rhs_scale / output_scale;
 
-    auto op1_rescale_lhs = buildRescaleToInt32(
+    Value op1_rescale_lhs = buildRescaleToInt32(
         rewriter, op, input_lhs_val, 1.0f, input_lhs_qtype.getZeroPoint());
-    auto op2_rescale_rhs = buildRescaleToInt32(
+    Value op2_rescale_rhs = buildRescaleToInt32(
         rewriter, op, input_rhs_val, 1.0f, input_rhs_qtype.getZeroPoint());
     auto op3_mul_op1_op2 = rewriter.create<tosa::MulOp>(
         op->getLoc(), rescale_type, op1_rescale_lhs, op2_rescale_rhs, 0);
@@ -492,14 +496,14 @@
                                                  Value x, Value y) {
   // Squared-difference is (x-y)*(x-y).
   // This lowering calculates the difference and multiplies.
-  auto result_type = result.getType().dyn_cast<RankedTensorType>();
+  RankedTensorType result_type = result.getType().dyn_cast<RankedTensorType>();
   if (!result_type) {
     op->emitOpError("SquaredDifference: result not ranked tensor type");
     return llvm::None;
   }
 
-  auto x_type = x.getType().dyn_cast<RankedTensorType>();
-  auto y_type = y.getType().dyn_cast<RankedTensorType>();
+  RankedTensorType x_type = x.getType().dyn_cast<RankedTensorType>();
+  RankedTensorType y_type = y.getType().dyn_cast<RankedTensorType>();
   if (!x_type || !y_type) {
     op->emitOpError("SquaredDifference: inputs not ranked tensor type");
     return llvm::None;
@@ -516,13 +520,13 @@
 llvm::Optional<Value> convertRoundOp(PatternRewriter& rewriter, Operation* op,
                                      Value result, Value input) {
   // Implements banker's rounding by calculating floor(input + 0.5).
-  auto result_type = result.getType().dyn_cast<RankedTensorType>();
+  RankedTensorType result_type = result.getType().dyn_cast<RankedTensorType>();
   if (!result_type) {
     op->emitOpError("Round: result not ranked tensor type");
     return llvm::None;
   }
 
-  auto input_type = input.getType().dyn_cast<RankedTensorType>();
+  RankedTensorType input_type = input.getType().dyn_cast<RankedTensorType>();
   if (!input_type) {
     op->emitOpError("Round: input not ranked tensor type");
     return llvm::None;
@@ -545,7 +549,8 @@
   // ConcatV2 becomes a series of TOSA Concat operators that take pairs of
   // tensors as arguments.   Rank-0 tensors are reshaped to Rank-1,
   // shape (1,) tensors.
-  auto result_type = result_value.getType().dyn_cast<RankedTensorType>();
+  RankedTensorType result_type =
+      result_value.getType().dyn_cast<RankedTensorType>();
   if (!result_type) {
     op->emitOpError("ConcatV2Op: result type not ranked tensor.");
     return llvm::None;
@@ -554,7 +559,8 @@
   // Valid axis in TF is [-rank(input), rank(input)).
   // Valid axis in TOSA is [0, rank(input)).
   // Plus rank(input) once if axis is negative.
-  auto input_type = op->getOperand(0).getType().dyn_cast<RankedTensorType>();
+  RankedTensorType input_type =
+      op->getOperand(0).getType().dyn_cast<RankedTensorType>();
   if (!input_type) {
     op->emitOpError("ConcatV2Op: input type not ranked tensor.");
     return llvm::None;
@@ -574,8 +580,8 @@
 
   Value lhs_val = values[0];
   Value rhs_val = values[1];
-  auto lhs_type = lhs_val.getType().cast<RankedTensorType>();
-  auto rhs_type = rhs_val.getType().cast<RankedTensorType>();
+  RankedTensorType lhs_type = lhs_val.getType().cast<RankedTensorType>();
+  RankedTensorType rhs_type = rhs_val.getType().cast<RankedTensorType>();
   ArrayRef<int64_t> lhs_tensor_shape = lhs_type.getShape();
   ArrayRef<int64_t> rhs_tensor_shape = rhs_type.getShape();
   int input_tensor_rank = lhs_tensor_shape.size();
@@ -594,7 +600,7 @@
       return llvm::None;
     }
     SmallVector<int64_t, 8> reshape_rank1_size1_shape{1};
-    auto reshape_rank1_size1_type =
+    RankedTensorType reshape_rank1_size1_type =
         RankedTensorType::get(ArrayRef<int64_t>(reshape_rank1_size1_shape),
                               result_type.getElementType());
     ArrayAttr shape_rank1_size1_attr =
@@ -617,7 +623,7 @@
     concat_result_shape[axis] = lhs_tensor_shape[axis] + rhs_tensor_shape[axis];
   }
 
-  auto concat_type = RankedTensorType::get(
+  RankedTensorType concat_type = RankedTensorType::get(
       ArrayRef<int64_t>(concat_result_shape), result_type.getElementType());
 
   mlir::quant::UniformQuantizedType lhs_quant_type =
@@ -649,22 +655,22 @@
 
     // Rescale input if scale is not equal to output tensor scale.
     if (lhs_scale != result_scale) {
-      auto rescale_type =
+      RankedTensorType rescale_type =
           RankedTensorType::get(lhs_type.getShape(), result_quant_type);
 
-      auto rescale_op = buildRescale(rewriter, op, rescale_type, lhs_val,
-                                     lhs_scale / result_scale, lhs_zeropoint,
-                                     result_zeropoint);
+      Value rescale_op = buildRescale(rewriter, op, rescale_type, lhs_val,
+                                      lhs_scale / result_scale, lhs_zeropoint,
+                                      result_zeropoint);
 
       lhs_val = rescale_op;
     }
     if (rhs_scale != result_scale) {
-      auto rescale_type =
+      RankedTensorType rescale_type =
           RankedTensorType::get(rhs_type.getShape(), result_quant_type);
 
-      auto rescale_op = buildRescale(rewriter, op, rescale_type, rhs_val,
-                                     rhs_scale / result_scale, rhs_zeropoint,
-                                     result_zeropoint);
+      Value rescale_op = buildRescale(rewriter, op, rescale_type, rhs_val,
+                                      rhs_scale / result_scale, rhs_zeropoint,
+                                      result_zeropoint);
 
       rhs_val = rescale_op;
     }
@@ -694,12 +700,12 @@
       rhs_zeropoint = rhs_quant_type.getZeroPoint();
 
       if (rhs_scale != result_scale) {
-        auto rescale_type =
+        RankedTensorType rescale_type =
             RankedTensorType::get(rhs_type.getShape(), result_quant_type);
 
-        auto rescale_op = buildRescale(rewriter, op, rescale_type, rhs_val,
-                                       rhs_scale / result_scale, rhs_zeropoint,
-                                       result_zeropoint);
+        Value rescale_op = buildRescale(rewriter, op, rescale_type, rhs_val,
+                                        rhs_scale / result_scale, rhs_zeropoint,
+                                        result_zeropoint);
 
         rhs_val = rescale_op;
       }
@@ -772,11 +778,14 @@
   //  shape=a3_shape)
   //
 
-  auto result_type = result_value.getType().dyn_cast<RankedTensorType>();
-  auto input_type = input_value.getType().dyn_cast<RankedTensorType>();
-  auto block_shape_type =
+  RankedTensorType result_type =
+      result_value.getType().dyn_cast<RankedTensorType>();
+  RankedTensorType input_type =
+      input_value.getType().dyn_cast<RankedTensorType>();
+  RankedTensorType block_shape_type =
       block_shape_value.getType().dyn_cast<RankedTensorType>();
-  auto paddings_type = paddings_value.getType().dyn_cast<RankedTensorType>();
+  RankedTensorType paddings_type =
+      paddings_value.getType().dyn_cast<RankedTensorType>();
 
   // Not a ranked tensor output.
   if (!result_type) {
@@ -862,7 +871,7 @@
     padded_shape[i + block_rank + 1] = input_shape[i + block_rank + 1];
   }
 
-  auto a0_pad_const_attr_type =
+  RankedTensorType a0_pad_const_attr_type =
       RankedTensorType::get({(input_rank), 2}, rewriter.getIntegerType(32));
 
   // Create a const op to generate the tensor type for the input padding array
@@ -935,7 +944,7 @@
     a3_transpose_shape[i] = a2_shape[a3_perm[i]];
   }
 
-  auto a3_transpose_const =
+  Value a3_transpose_const =
       get1DConstTensor<tosa::ConstOp, int32_t>(rewriter, op, a3_perm);
 
   auto a3_transpose_a2_op = rewriter.create<tosa::TransposeOp>(
@@ -1030,11 +1039,14 @@
   // a4_slice_a3 = tosa.slice(input=a3_reshape_a2, start=a4_start,
   // size=a4_size)
 
-  auto result_type = result_value.getType().dyn_cast<RankedTensorType>();
-  auto input_type = input_value.getType().dyn_cast<RankedTensorType>();
-  auto block_shape_type =
+  RankedTensorType result_type =
+      result_value.getType().dyn_cast<RankedTensorType>();
+  RankedTensorType input_type =
+      input_value.getType().dyn_cast<RankedTensorType>();
+  RankedTensorType block_shape_type =
       block_shape_value.getType().dyn_cast<RankedTensorType>();
-  auto crops_type = crops_value.getType().dyn_cast<RankedTensorType>();
+  RankedTensorType crops_type =
+      crops_value.getType().dyn_cast<RankedTensorType>();
 
   if (!result_type) {
     op->emitOpError("BatchToSpaceND: result type not ranked tensor");
@@ -1151,7 +1163,7 @@
     a2_transpose_shape[i] = a1_shape[a2_perm[i]];
   }
 
-  auto a2_transpose_perm =
+  Value a2_transpose_perm =
       get1DConstTensor<tosa::ConstOp, int32_t>(rewriter, op, a2_perm);
   auto a2_transpose_a1_op = rewriter.create<tosa::TransposeOp>(
       op->getLoc(),
@@ -1219,14 +1231,16 @@
                                           Operation* op, Value result_value,
                                           Value input_value, Value dim_value) {
   // Lowers to a reshape op with 1's inserted in the appropriate dimensions.
-  auto output_type = result_value.getType().dyn_cast<RankedTensorType>();
+  RankedTensorType output_type =
+      result_value.getType().dyn_cast<RankedTensorType>();
   // Not a ranked tensor output
   if (!output_type) {
     op->emitOpError("ExpandDims: output type not ranked tensor");
     return llvm::None;
   }
 
-  auto input_type = input_value.getType().dyn_cast<RankedTensorType>();
+  RankedTensorType input_type =
+      input_value.getType().dyn_cast<RankedTensorType>();
   if (!input_type) {
     op->emitOpError("ExpandDims: input type not ranked tensor");
     return llvm::None;
@@ -1270,14 +1284,16 @@
                                        SmallVector<int32_t, 8>& squeeze_dims) {
   // Lowers to a reshape op where dimensions in squeeze_dims with size=1
   // are removed.
-  auto output_type = result_value.getType().dyn_cast<RankedTensorType>();
+  RankedTensorType output_type =
+      result_value.getType().dyn_cast<RankedTensorType>();
   // Not a ranked tensor output
   if (!output_type) {
     op->emitOpError("Squeeze: output type not ranked tensor");
     return llvm::None;
   }
 
-  auto input_type = input_value.getType().dyn_cast<RankedTensorType>();
+  RankedTensorType input_type =
+      input_value.getType().dyn_cast<RankedTensorType>();
   if (!input_type) {
     op->emitOpError("Squeeze: input type not ranked tensor");
     return llvm::None;
@@ -1335,7 +1351,8 @@
   // a2 = sub(a1, one_bcast)
   // a3 = ge(x, zero_bcast)
   // a4 = select(a3, x, a2)
-  auto output_type = result_value.getType().dyn_cast<RankedTensorType>();
+  RankedTensorType output_type =
+      result_value.getType().dyn_cast<RankedTensorType>();
   // Not a ranked tensor output
   if (!output_type) {
     op->emitOpError("Elu: output type not ranked tensor");
@@ -1343,16 +1360,13 @@
   }
 
   int32_t input_rank = output_type.getShape().size();
-  SmallVector<int64_t, 4> bcast_shape;
-  for (int i = 0; i < input_rank; i++) {
-    bcast_shape.push_back(1);
-  }
+  SmallVector<int64_t, 4> bcast_shape(input_rank, 1);
 
   // Can't directly create size=1, rank=rank(input) tensor because
   // it will be optimized out.  Instead, create rank0 tensor and reshape later.
-  auto one_const_op = getTosaConstTensorSingleF32(rewriter, op, 1.0);
+  Value one_const_op = getTosaConstTensorSingleF32(rewriter, op, 1.0);
 
-  auto zero_const_op = getTosaConstTensorSingleF32(rewriter, op, 0.0);
+  Value zero_const_op = getTosaConstTensorSingleF32(rewriter, op, 0.0);
 
   auto a1_exp_in_op =
       rewriter.create<tosa::ExpOp>(op->getLoc(), output_type, features_value);
@@ -1386,8 +1400,10 @@
   // We'll use first version for direct fp lowering, and second version for
   // quantized lowering since second one we can restrict input to exp() be
   // negative, and thus LUT can always be within [0.0, 1.0].
-  auto output_type = result_value.getType().dyn_cast<RankedTensorType>();
-  auto input_type = logits_value.getType().dyn_cast<RankedTensorType>();
+  RankedTensorType output_type =
+      result_value.getType().dyn_cast<RankedTensorType>();
+  RankedTensorType input_type =
+      logits_value.getType().dyn_cast<RankedTensorType>();
 
   // Not a ranked tensor input/output
   if (!output_type || !input_type) {
@@ -1414,17 +1430,17 @@
     auto int16_element_qtype = mlir::quant::UniformQuantizedType::get(
         true, rewriter.getIntegerType(16), rewriter.getF32Type(), 1.0f, 0,
         -32768, 32767);
-    auto int16_logits_type =
+    RankedTensorType int16_logits_type =
         RankedTensorType::get(logits_shape, int16_element_qtype);
-    auto int32_logits_type =
+    RankedTensorType int32_logits_type =
         RankedTensorType::get(logits_shape, rewriter.getIntegerType(32));
-    auto int16_rsum_type =
+    RankedTensorType int16_rsum_type =
         RankedTensorType::get(rsum_shape, int16_element_qtype);
-    auto int32_rsum_type =
+    RankedTensorType int32_rsum_type =
         RankedTensorType::get(rsum_shape, rewriter.getIntegerType(32));
 
     // Step 1. get x - max(x)
-    auto op1_rescale_in =
+    Value op1_rescale_in =
         buildRescale(rewriter, op, int32_logits_type, logits_value, 1.0f,
                      in_quant_type.getZeroPoint(), 0);
 
@@ -1445,10 +1461,10 @@
       return std::lround(32768.0 * v);
     };
 
-    auto exp_table_const = getTosa1DConstTensorTable(rewriter, op, exp_func);
+    Value exp_table_const = getTosa1DConstTensorTable(rewriter, op, exp_func);
 
     // Step 2. rescale input
-    auto op4_rescale_op3 = buildRescale(
+    Value op4_rescale_op3 = buildRescale(
         rewriter, op, int16_logits_type, op3_sub_op1_op2.getResult(),
         in_quant_type.getScale() * 128.0 / exp_sample_grain, 0, 0);
 
@@ -1511,7 +1527,7 @@
       return std::lround(32768.0 * v);
     };
 
-    auto one_over_one_plus_x_table_const =
+    Value one_over_one_plus_x_table_const =
         getTosa1DConstTensorTable(rewriter, op, one_over_one_plus_x_func);
 
     auto op14_table_op13 = rewriter.create<tosa::TableOp>(
@@ -1519,11 +1535,11 @@
         one_over_one_plus_x_table_const);
 
     // Rescale sum(exp(x)) from 0.23 back to 0.16
-    auto op15_rescale_op14 = buildRescale(rewriter, op, int32_rsum_type,
-                                          op14_table_op13, 1.0 / 128.0, 0, 0);
+    Value op15_rescale_op14 = buildRescale(rewriter, op, int32_rsum_type,
+                                           op14_table_op13, 1.0 / 128.0, 0, 0);
 
     // Rescale exp(x) from 0.23 back to 0.16
-    auto op16_rescale_op5 =
+    Value op16_rescale_op5 =
         buildRescale(rewriter, op, int32_logits_type, op5_table_op4.getResult(),
                      1.0 / 128.0, 0, 0);
 
@@ -1559,7 +1575,7 @@
     // op4 = mul(op1, op3)
     auto op1_exp_in =
         rewriter.create<tosa::ExpOp>(op->getLoc(), output_type, logits_value);
-    auto rsum_type =
+    RankedTensorType rsum_type =
         RankedTensorType::get(rsum_shape, output_type.getElementType());
 
     // Keep dims so we don't need to reshape later
@@ -1587,14 +1603,16 @@
   // op4 = mul(op1, op3)
   // op5 = log(op4)
 
-  auto output_type = result_value.getType().dyn_cast<RankedTensorType>();
+  RankedTensorType output_type =
+      result_value.getType().dyn_cast<RankedTensorType>();
   // Not a ranked tensor output
   if (!output_type) {
     op->emitOpError("LogSoftmax: output type not ranked tensor.");
     return llvm::None;
   }
 
-  auto input_type = op->getOperand(0).getType().dyn_cast<RankedTensorType>();
+  RankedTensorType input_type =
+      op->getOperand(0).getType().dyn_cast<RankedTensorType>();
   if (!input_type) {
     op->emitOpError("LogSoftmax: input type not ranked tensor.");
     return llvm::None;
@@ -1619,8 +1637,8 @@
   SmallVector<int64_t, 4> rsum_shape(output_type.getShape().begin(),
                                      output_type.getShape().end());
   rsum_shape[input_rank - 1] = 1;
-  auto rsum_type = RankedTensorType::get(ArrayRef<int64_t>(rsum_shape),
-                                         output_type.getElementType());
+  RankedTensorType rsum_type = RankedTensorType::get(
+      ArrayRef<int64_t>(rsum_shape), output_type.getElementType());
   // Keep dims so we don't need to reshape later
   auto op2_reducesum_op1 = rewriter.create<tosa::ReduceSumOp>(
       op->getLoc(), rsum_type, op1_exp_in.getResult(),
@@ -1651,7 +1669,8 @@
   // a4 = tf.reshape(a3, [orig_shape[0], orig_shape[1]//b, orig_shape[2]//b,
   // orig_shape[3]*b*b])
   // return a4
-  auto output_type = result_value.getType().dyn_cast<RankedTensorType>();
+  RankedTensorType output_type =
+      result_value.getType().dyn_cast<RankedTensorType>();
 
   // Not a ranked tensor output.
   if (!output_type) {
@@ -1659,7 +1678,8 @@
     return llvm::None;
   }
 
-  auto input_type = input_value.getType().dyn_cast<RankedTensorType>();
+  RankedTensorType input_type =
+      input_value.getType().dyn_cast<RankedTensorType>();
   if (!input_type) {
     op->emitOpError("SpaceToDepth: input type not ranked tensor.");
     return llvm::None;
@@ -1697,13 +1717,13 @@
   a_reshape_dims.push_back(block_size[1]);
   a_reshape_dims.push_back(input_shape[3]);
 
-  auto a_reshape_output_type = RankedTensorType::get(
+  RankedTensorType a_reshape_output_type = RankedTensorType::get(
       ArrayRef<int64_t>(a_reshape_dims), output_type.getElementType());
   auto a2_reshape_a_op = rewriter.create<tosa::ReshapeOp>(
       op->getLoc(), a_reshape_output_type, input_value,
       rewriter.getI64ArrayAttr(a_reshape_dims));
 
-  auto a3_transpose_perm = get1DConstTensor<tosa::ConstOp, int32_t>(
+  Value a3_transpose_perm = get1DConstTensor<tosa::ConstOp, int32_t>(
       rewriter, op, {0, 1, 3, 2, 4, 5});
 
   auto a3_transpose_a2_op = rewriter.create<tosa::TransposeOp>(
@@ -1716,7 +1736,7 @@
   a3_reshape_dims.push_back(input_shape[2] / block_size[1]);
   a3_reshape_dims.push_back(input_shape[3] * block_size[0] * block_size[1]);
 
-  auto a3_reshape_output_type = RankedTensorType::get(
+  RankedTensorType a3_reshape_output_type = RankedTensorType::get(
       ArrayRef<int64_t>(a3_reshape_dims), output_type.getElementType());
   return rewriter
       .create<tosa::ReshapeOp>(op->getLoc(), a3_reshape_output_type,
@@ -1739,7 +1759,8 @@
   // orig_shape[3] // (b*b)])
   // return a4
 
-  auto output_type = result_value.getType().dyn_cast<RankedTensorType>();
+  RankedTensorType output_type =
+      result_value.getType().dyn_cast<RankedTensorType>();
 
   // Not a ranked tensor output
   if (!output_type) {
@@ -1747,7 +1768,8 @@
     return llvm::None;
   }
 
-  auto input_type = input_value.getType().dyn_cast<RankedTensorType>();
+  RankedTensorType input_type =
+      input_value.getType().dyn_cast<RankedTensorType>();
   if (!input_type) {
     op->emitOpError("DepthToSpace: input type not ranked tensor.");
     return llvm::None;
@@ -1780,13 +1802,13 @@
   a_reshape_dims.push_back(block_size[1]);
   a_reshape_dims.push_back(input_shape[3] / (block_size[0] * block_size[1]));
 
-  auto a_reshape_output_type = RankedTensorType::get(
+  RankedTensorType a_reshape_output_type = RankedTensorType::get(
       ArrayRef<int64_t>(a_reshape_dims), output_type.getElementType());
   auto a2_reshape_a_op = rewriter.create<tosa::ReshapeOp>(
       op->getLoc(), a_reshape_output_type, input_value,
       rewriter.getI64ArrayAttr(a_reshape_dims));
 
-  auto a3_transpose_perm = get1DConstTensor<tosa::ConstOp, int32_t>(
+  Value a3_transpose_perm = get1DConstTensor<tosa::ConstOp, int32_t>(
       rewriter, op, {0, 1, 3, 2, 4, 5});
 
   auto a3_transpose_a2_op = rewriter.create<tosa::TransposeOp>(
@@ -1799,7 +1821,7 @@
   a3_reshape_dims.push_back(input_shape[2] * block_size[1]);
   a3_reshape_dims.push_back(input_shape[3] / (block_size[0] * block_size[1]));
 
-  auto a3_reshape_output_type = RankedTensorType::get(
+  RankedTensorType a3_reshape_output_type = RankedTensorType::get(
       ArrayRef<int64_t>(a3_reshape_dims), output_type.getElementType());
   return rewriter
       .create<tosa::ReshapeOp>(op->getLoc(), a3_reshape_output_type,
@@ -1816,14 +1838,16 @@
   // This lowering creates num_split slice ops and ties them together
   // with IdentityN to get from an array of Operations to a single Operation
   // with a list of result tensors.
-  auto result_type = result_value.getType().dyn_cast<RankedTensorType>();
+  RankedTensorType result_type =
+      result_value.getType().dyn_cast<RankedTensorType>();
   // Not a ranked tensor output
   if (!result_type) {
     op->emitOpError("Split: output type not ranked tensor.");
     return llvm::None;
   }
 
-  auto input_type = input_value.getType().dyn_cast<RankedTensorType>();
+  RankedTensorType input_type =
+      input_value.getType().dyn_cast<RankedTensorType>();
   if (!input_type) {
     op->emitOpError("Split: input type not ranked tensor.");
     return llvm::None;
@@ -1889,14 +1913,16 @@
   // This lowering creates num_split slice ops and ties them together
   // with IdentityN to get from an array of Operations to a single Operation
   // with a list of result tensors.
-  auto result_type = result_value.getType().dyn_cast<RankedTensorType>();
+  RankedTensorType result_type =
+      result_value.getType().dyn_cast<RankedTensorType>();
   // Not a ranked tensor output
   if (!result_type) {
     op->emitOpError("SplitV: output type not ranked tensor.");
     return llvm::None;
   }
 
-  auto input_type = input_value.getType().dyn_cast<RankedTensorType>();
+  RankedTensorType input_type =
+      input_value.getType().dyn_cast<RankedTensorType>();
   if (!input_type) {
     op->emitOpError("SplitV: input type not ranked tensor.");
     return llvm::None;
@@ -1996,8 +2022,10 @@
   // to insert tosa.Reverse operators for this.
   assert(ellipsis_mask == 0);
 
-  auto input_type = input_value.getType().dyn_cast<RankedTensorType>();
-  auto result_type = result_value.getType().dyn_cast<RankedTensorType>();
+  RankedTensorType input_type =
+      input_value.getType().dyn_cast<RankedTensorType>();
+  RankedTensorType result_type =
+      result_value.getType().dyn_cast<RankedTensorType>();
 
   if (!result_type) {
     op->emitOpError("StridedSlice: output type not ranked tensor.");
@@ -2117,7 +2145,8 @@
   // a2 = mul(lhs, a1);
   // a3 = floor(a2);
   // return a3;
-  auto output_type = result_value.getType().dyn_cast<RankedTensorType>();
+  RankedTensorType output_type =
+      result_value.getType().dyn_cast<RankedTensorType>();
   // Not a ranked tensor output
   if (!output_type) return llvm::None;
 
@@ -2144,7 +2173,8 @@
   // a4 = sub(a2, a3);
   // return a4;
 
-  auto output_type = result_value.getType().dyn_cast<RankedTensorType>();
+  RankedTensorType output_type =
+      result_value.getType().dyn_cast<RankedTensorType>();
   // Not a ranked tensor output
   if (!output_type) return llvm::None;
 
@@ -2166,7 +2196,8 @@
 llvm::Optional<Value> convertFusedActivation(PatternRewriter& rewriter,
                                              Operation* op, Value input_value,
                                              StringAttr fused_activation_fn) {
-  auto input_type = input_value.getType().dyn_cast<RankedTensorType>();
+  RankedTensorType input_type =
+      input_value.getType().dyn_cast<RankedTensorType>();
   if (!input_type) return llvm::None;
 
   bool input_is_qtype =
@@ -2181,11 +2212,11 @@
       op->emitWarning("Quantized TANH lowering TBD!");
       return llvm::None;
     } else {
-      auto rescale_type = RankedTensorType::get(input_type.getShape(),
-                                                rewriter.getIntegerType(32));
+      RankedTensorType rescale_type = RankedTensorType::get(
+          input_type.getShape(), rewriter.getIntegerType(32));
 
-      auto op1_rescale_in = buildRescaleToInt32(rewriter, op, input_value, 1.0f,
-                                                input_qtype.getZeroPoint());
+      Value op1_rescale_in = buildRescaleToInt32(
+          rewriter, op, input_value, 1.0f, input_qtype.getZeroPoint());
 
       Value op2_relu_op1;
       if (fused_activation_fn.getValue() == "NONE") {
@@ -2272,7 +2303,8 @@
     Value input_value, ElementsAttr axes_elems, bool keep_dims,
     Type reduce_element_type, bool is_quantized, double input_scale,
     int64_t input_zp, double output_scale, int64_t output_zp) {
-  auto input_type = input_value.getType().dyn_cast<RankedTensorType>();
+  RankedTensorType input_type =
+      input_value.getType().dyn_cast<RankedTensorType>();
   if (!input_type) return llvm::None;
 
   ArrayRef<int64_t> input_shape = input_type.getShape();
@@ -2299,7 +2331,7 @@
       auto axis_attr = rewriter.getI64IntegerAttr(axis_val);
 
       shape_vec[axis_val] = 1;
-      auto reduce_type = RankedTensorType::get(
+      RankedTensorType reduce_type = RankedTensorType::get(
           llvm::makeArrayRef<int64_t>(shape_vec), reduce_element_type);
 
       auto reduce_op =
@@ -2309,7 +2341,7 @@
     }
 
     if (is_quantized) {
-      auto output_rescale_type = RankedTensorType::get(
+      RankedTensorType output_rescale_type = RankedTensorType::get(
           llvm::makeArrayRef<int64_t>(shape_vec), output_type.getElementType());
       val = buildRescaleFromInt32(rewriter, op, output_rescale_type, val,
                                   output_scale, output_zp);
@@ -2331,7 +2363,8 @@
 llvm::Optional<Value> convertReduceAllOp(
     PatternRewriter& rewriter, Operation* op, RankedTensorType output_type,
     Value input_value, ElementsAttr axes_elems, bool keep_dims) {
-  auto input_type = input_value.getType().dyn_cast<RankedTensorType>();
+  RankedTensorType input_type =
+      input_value.getType().dyn_cast<RankedTensorType>();
   if (!input_type) return llvm::None;
 
   return convertReduceOpCommon<tosa::ReduceAllOp>(
@@ -2343,7 +2376,8 @@
 llvm::Optional<Value> convertReduceAnyOp(
     PatternRewriter& rewriter, Operation* op, RankedTensorType output_type,
     Value input_value, ElementsAttr axes_elems, bool keep_dims) {
-  auto input_type = input_value.getType().dyn_cast<RankedTensorType>();
+  RankedTensorType input_type =
+      input_value.getType().dyn_cast<RankedTensorType>();
   if (!input_type) return llvm::None;
 
   return convertReduceOpCommon<tosa::ReduceAnyOp>(
@@ -2355,7 +2389,8 @@
 llvm::Optional<Value> convertReduceMinOp(
     PatternRewriter& rewriter, Operation* op, RankedTensorType output_type,
     Value input_value, ElementsAttr axes_elems, bool keep_dims) {
-  auto input_type = input_value.getType().dyn_cast<RankedTensorType>();
+  RankedTensorType input_type =
+      input_value.getType().dyn_cast<RankedTensorType>();
   if (!input_type) return llvm::None;
 
   return convertReduceOpCommon<tosa::ReduceMinOp>(
@@ -2367,7 +2402,8 @@
 llvm::Optional<Value> convertReduceMaxOp(
     PatternRewriter& rewriter, Operation* op, RankedTensorType output_type,
     Value input_value, ElementsAttr axes_elems, bool keep_dims) {
-  auto input_type = input_value.getType().dyn_cast<RankedTensorType>();
+  RankedTensorType input_type =
+      input_value.getType().dyn_cast<RankedTensorType>();
   if (!input_type) return llvm::None;
 
   return convertReduceOpCommon<tosa::ReduceMaxOp>(
@@ -2379,7 +2415,8 @@
 llvm::Optional<Value> convertReduceProdOp(
     PatternRewriter& rewriter, Operation* op, RankedTensorType output_type,
     Value input_value, ElementsAttr axes_elems, bool keep_dims) {
-  auto input_type = input_value.getType().dyn_cast<RankedTensorType>();
+  RankedTensorType input_type =
+      input_value.getType().dyn_cast<RankedTensorType>();
   if (!input_type) return llvm::None;
 
   bool input_is_qtype =
@@ -2403,7 +2440,8 @@
 llvm::Optional<Value> convertReduceSumOp(
     PatternRewriter& rewriter, Operation* op, RankedTensorType output_type,
     Value input_value, ElementsAttr axes_elems, bool keep_dims) {
-  auto input_type = input_value.getType().dyn_cast<RankedTensorType>();
+  RankedTensorType input_type =
+      input_value.getType().dyn_cast<RankedTensorType>();
   if (!input_type) return llvm::None;
 
   bool input_is_qtype =
@@ -2456,7 +2494,8 @@
   // op1 = reduce_sum(input)
   // op2 = mul(op1, 1.0 / num_elements_on_reduced_axis)
 
-  auto input_type = input_value.getType().dyn_cast<RankedTensorType>();
+  RankedTensorType input_type =
+      input_value.getType().dyn_cast<RankedTensorType>();
   if (!input_type) return llvm::None;
 
   bool input_is_qtype =
@@ -2534,7 +2573,8 @@
 llvm::Optional<Value> convertResizeOp(PatternRewriter& rewriter, Operation* op,
                                       RankedTensorType output_type,
                                       Value input_value, StringRef mode) {
-  auto input_type = input_value.getType().dyn_cast<RankedTensorType>();
+  RankedTensorType input_type =
+      input_value.getType().dyn_cast<RankedTensorType>();
   if (!input_type) return llvm::None;
 
   auto input_shape = input_type.getShape();
@@ -2600,7 +2640,8 @@
                                         RankedTensorType output_type,
                                         Value input_value, double scale,
                                         int64_t zeropoint) {
-  auto input_type = input_value.getType().dyn_cast<RankedTensorType>();
+  RankedTensorType input_type =
+      input_value.getType().dyn_cast<RankedTensorType>();
   if (!input_type) return llvm::None;
 
   auto output_shape = output_type.getShape();
@@ -2613,7 +2654,7 @@
     return llvm::None;
   }
 
-  auto output_fp_type =
+  RankedTensorType output_fp_type =
       RankedTensorType::get(output_shape, rewriter.getF32Type());
 
   Value zp_val =
@@ -2644,7 +2685,8 @@
                                           RankedTensorType output_type,
                                           Value input_value, double scale,
                                           int64_t zeropoint) {
-  auto input_type = input_value.getType().dyn_cast<RankedTensorType>();
+  RankedTensorType input_type =
+      input_value.getType().dyn_cast<RankedTensorType>();
   if (!input_type) return llvm::None;
 
   // input element type could only be quantized integer
@@ -2661,7 +2703,7 @@
 
   // TOSA doesn't support CAST AINT8 -> FLOAT, need to RESCALE to INT32
   // followed by a CAST
-  auto op1_rescale_in =
+  Value op1_rescale_in =
       buildRescale(rewriter, op, output_int32_type, input_value, 1.0, 0, 0);
 
   auto op2_cast_op1 =
@@ -2689,7 +2731,8 @@
   // op1 = quantize(input)
   // op2 = dequantize(op1)
 
-  auto input_type = input_value.getType().dyn_cast<RankedTensorType>();
+  RankedTensorType input_type =
+      input_value.getType().dyn_cast<RankedTensorType>();
   if (!input_type) return llvm::None;
 
   // quantized as INT<num_bits>, where num_bits can only be 8, 16
@@ -2709,7 +2752,8 @@
   auto int_element_qtype = mlir::quant::UniformQuantizedType::get(
       true, rewriter.getIntegerType(num_bits), rewriter.getF32Type(), 1.0f, 0,
       qmin, qmax);
-  auto output_int_type = RankedTensorType::get(output_shape, int_element_qtype);
+  RankedTensorType output_int_type =
+      RankedTensorType::get(output_shape, int_element_qtype);
 
   double scale = (max - min) / static_cast<double>(qmax - qmin);
   int64_t zeropoint = std::llround((-min) / scale + static_cast<double>(qmin));
@@ -2730,8 +2774,8 @@
     Value input, Value filter, Value bias, ArrayAttr strides_attr,
     ArrayAttr dilations_attr, ArrayAttr explicit_padding_attr,
     StringRef padding_ref, StringRef data_format_ref) {
-  auto input_type = input.getType().dyn_cast<RankedTensorType>();
-  auto filter_type = filter.getType().dyn_cast<RankedTensorType>();
+  RankedTensorType input_type = input.getType().dyn_cast<RankedTensorType>();
+  RankedTensorType filter_type = filter.getType().dyn_cast<RankedTensorType>();
   // Not a ranked tensor output
   if (!input_type) return llvm::None;
   if (!filter_type) return llvm::None;
@@ -2743,7 +2787,7 @@
   a1_transpose_dims.push_back(filter_shape[0]);
   a1_transpose_dims.push_back(filter_shape[1]);
   a1_transpose_dims.push_back(filter_shape[2]);
-  auto a1_filter_transpose_perm =
+  Value a1_filter_transpose_perm =
       get1DConstTensor<tosa::ConstOp, int32_t>(rewriter, op, {3, 0, 1, 2});
   auto a1_filter_transpose_op = rewriter.create<tosa::TransposeOp>(
       op->getLoc(),
diff --git a/tensorflow/compiler/mlir/tosa/transforms/legalize_common.h b/tensorflow/compiler/mlir/tosa/transforms/legalize_common.h
index 8459f46..c31aebe 100644
--- a/tensorflow/compiler/mlir/tosa/transforms/legalize_common.h
+++ b/tensorflow/compiler/mlir/tosa/transforms/legalize_common.h
@@ -4,7 +4,7 @@
 you may not use this file except in compliance with the License.
 You may obtain a copy of the License at
 
-   http://www.apache.org/licenses/LICENSE-2.0
+    http://www.apache.org/licenses/LICENSE-2.0
 
 Unless required by applicable law or agreed to in writing, software
 distributed under the License is distributed on an "AS IS" BASIS,
@@ -25,6 +25,12 @@
 //
 // For these functions, the framework-specific operands/attributes/defaults
 // are already extracted and placed in a common form for lowering.
+#include "llvm/ADT/APInt.h"
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/StringSwitch.h"
+#include "llvm/Support/FormatVariadic.h"
 #include "mlir/Dialect/Quant/FakeQuantSupport.h"
 #include "mlir/Dialect/Quant/UniformSupport.h"
 #include "mlir/Dialect/StandardOps/IR/Ops.h"
@@ -43,12 +49,6 @@
 #include "mlir/Pass/Pass.h"
 #include "mlir/Support/LLVM.h"
 #include "mlir/Transforms/DialectConversion.h"
-#include "llvm/ADT/APInt.h"
-#include "llvm/ADT/ArrayRef.h"
-#include "llvm/ADT/Optional.h"
-#include "llvm/ADT/STLExtras.h"
-#include "llvm/ADT/StringSwitch.h"
-#include "llvm/Support/FormatVariadic.h"
 
 namespace mlir {
 namespace tosa {
diff --git a/tensorflow/compiler/mlir/tosa/transforms/legalize_tf.cc b/tensorflow/compiler/mlir/tosa/transforms/legalize_tf.cc
index d1140e3..20f1087 100644
--- a/tensorflow/compiler/mlir/tosa/transforms/legalize_tf.cc
+++ b/tensorflow/compiler/mlir/tosa/transforms/legalize_tf.cc
@@ -159,7 +159,7 @@
     Operation* op, PatternRewriter& rewriter) const {
   auto tf_relu_op = cast<TF::ReluOp>(op);
 
-  auto output_type =
+  RankedTensorType output_type =
       tf_relu_op.getResult().getType().dyn_cast<RankedTensorType>();
   // Not a ranked tensor output
   if (!output_type) return failure();
@@ -181,7 +181,7 @@
     Operation* op, PatternRewriter& rewriter) const {
   auto tf_relu6_op = cast<TF::Relu6Op>(op);
 
-  auto output_type =
+  RankedTensorType output_type =
       tf_relu6_op.getResult().getType().dyn_cast<RankedTensorType>();
   // Not a ranked tensor output
   if (!output_type) return failure();
@@ -202,7 +202,7 @@
     Operation* op, PatternRewriter& rewriter) const {
   auto tf_equal_op = cast<TF::EqualOp>(op);
 
-  auto output_type =
+  RankedTensorType output_type =
       tf_equal_op.getResult().getType().dyn_cast<RankedTensorType>();
   // Not a ranked tensor output
   if (!output_type) return failure();
@@ -216,7 +216,7 @@
     Operation* op, PatternRewriter& rewriter) const {
   auto tf_not_equal_op = cast<TF::NotEqualOp>(op);
 
-  auto output_type =
+  RankedTensorType output_type =
       tf_not_equal_op.getResult().getType().dyn_cast<RankedTensorType>();
   // Not a ranked tensor output
   if (!output_type) return failure();
@@ -236,7 +236,7 @@
     Operation* op, PatternRewriter& rewriter) const {
   auto tf_greater_op = cast<TF::GreaterOp>(op);
 
-  auto output_type =
+  RankedTensorType output_type =
       tf_greater_op.getResult().getType().dyn_cast<RankedTensorType>();
   // Not a ranked tensor output
   if (!output_type) return failure();
@@ -250,7 +250,7 @@
     Operation* op, PatternRewriter& rewriter) const {
   auto tf_greater_equal_op = cast<TF::GreaterEqualOp>(op);
 
-  auto output_type =
+  RankedTensorType output_type =
       tf_greater_equal_op.getResult().getType().dyn_cast<RankedTensorType>();
   // Not a ranked tensor output
   if (!output_type) return failure();
@@ -264,7 +264,7 @@
                                               PatternRewriter& rewriter) const {
   auto tf_add_op = cast<TF::AddOp>(op);
 
-  auto output_type =
+  RankedTensorType output_type =
       tf_add_op.getResult().getType().dyn_cast<RankedTensorType>();
   // Not a ranked tensor output
   if (!output_type) return failure();
@@ -278,7 +278,7 @@
     Operation* op, PatternRewriter& rewriter) const {
   auto tf_addv2_op = cast<TF::AddV2Op>(op);
 
-  auto output_type =
+  RankedTensorType output_type =
       tf_addv2_op.getResult().getType().dyn_cast<RankedTensorType>();
   // Not a ranked tensor output
   if (!output_type) return failure();
@@ -293,7 +293,7 @@
     Operation* op, PatternRewriter& rewriter) const {
   auto tf_addn_op = cast<TF::AddNOp>(op);
 
-  auto output_type =
+  RankedTensorType output_type =
       tf_addn_op.getResult().getType().dyn_cast<RankedTensorType>();
   // Not a ranked tensor output
   if (!output_type) return failure();
@@ -318,7 +318,7 @@
                                               PatternRewriter& rewriter) const {
   auto tf_sub_op = cast<TF::SubOp>(op);
 
-  auto output_type =
+  RankedTensorType output_type =
       tf_sub_op.getResult().getType().dyn_cast<RankedTensorType>();
   // Not a ranked tensor output
   if (!output_type) return failure();
@@ -332,8 +332,8 @@
                                               PatternRewriter& rewriter) const {
   auto tf_mul_op = cast<TF::MulOp>(op);
 
-  auto result = convertMultiplyOp(rewriter, op, tf_mul_op.getResult(),
-                                  tf_mul_op.x(), tf_mul_op.y());
+  llvm::Optional<Value> result = convertMultiplyOp(
+      rewriter, op, tf_mul_op.getResult(), tf_mul_op.x(), tf_mul_op.y());
 
   if (!result) return failure();
 
@@ -345,8 +345,9 @@
     Operation* op, PatternRewriter& rewriter) const {
   auto tf_square_op = cast<TF::SquareOp>(op);
 
-  auto result = convertMultiplyOp(rewriter, op, tf_square_op.getResult(),
-                                  tf_square_op.x(), tf_square_op.x());
+  llvm::Optional<Value> result =
+      convertMultiplyOp(rewriter, op, tf_square_op.getResult(),
+                        tf_square_op.x(), tf_square_op.x());
 
   if (!result) return failure();
 
@@ -358,7 +359,7 @@
     Operation* op, PatternRewriter& rewriter) const {
   auto tf_squared_op = cast<TF::SquaredDifferenceOp>(op);
 
-  auto result =
+  llvm::Optional<Value> result =
       convertSquaredDifferenceOp(rewriter, op, tf_squared_op.getResult(),
                                  tf_squared_op.x(), tf_squared_op.y());
 
@@ -372,13 +373,14 @@
     Operation* op, PatternRewriter& rewriter) const {
   auto tf_round_op = cast<TF::RoundOp>(op);
 
-  auto input_type = tf_round_op.x().getType().dyn_cast<RankedTensorType>();
+  RankedTensorType input_type =
+      tf_round_op.x().getType().dyn_cast<RankedTensorType>();
   if (!input_type) {
     return op->emitOpError("Round: input not ranked tensor type");
   }
 
   if (input_type.getElementType().isa<FloatType>()) {
-    auto result =
+    llvm::Optional<Value> result =
         convertRoundOp(rewriter, op, tf_round_op.getResult(), tf_round_op.x());
 
     if (!result) return failure();
@@ -396,8 +398,9 @@
     Operation* op, PatternRewriter& rewriter) const {
   auto tf_floordiv_op = cast<TF::FloorDivOp>(op);
 
-  auto result = convertFloorDivOp(rewriter, op, tf_floordiv_op.getResult(),
-                                  tf_floordiv_op.x(), tf_floordiv_op.y());
+  llvm::Optional<Value> result =
+      convertFloorDivOp(rewriter, op, tf_floordiv_op.getResult(),
+                        tf_floordiv_op.x(), tf_floordiv_op.y());
 
   if (!result) return failure();
 
@@ -410,8 +413,9 @@
     Operation* op, PatternRewriter& rewriter) const {
   auto tf_floormod_op = cast<TF::FloorModOp>(op);
 
-  auto result = convertFloorModOp(rewriter, op, tf_floormod_op.getResult(),
-                                  tf_floormod_op.x(), tf_floormod_op.y());
+  llvm::Optional<Value> result =
+      convertFloorModOp(rewriter, op, tf_floormod_op.getResult(),
+                        tf_floormod_op.x(), tf_floormod_op.y());
 
   if (!result) return failure();
 
@@ -431,7 +435,7 @@
     Operation* op, PatternRewriter& rewriter) const {
   auto tf_maximum_op = cast<TF::MaximumOp>(op);
 
-  auto output_type =
+  RankedTensorType output_type =
       tf_maximum_op.getResult().getType().dyn_cast<RankedTensorType>();
   // Not a ranked tensor output
   if (!output_type) return failure();
@@ -445,7 +449,7 @@
     Operation* op, PatternRewriter& rewriter) const {
   auto tf_minimum_op = cast<TF::MinimumOp>(op);
 
-  auto output_type =
+  RankedTensorType output_type =
       tf_minimum_op.getResult().getType().dyn_cast<RankedTensorType>();
   // Not a ranked tensor output
   if (!output_type) return failure();
@@ -459,8 +463,9 @@
     Operation* op, PatternRewriter& rewriter) const {
   auto tf_div_op = cast<TF::RealDivOp>(op);
 
-  auto y_type = tf_div_op.y().getType().dyn_cast<RankedTensorType>();
-  auto output_type =
+  RankedTensorType y_type =
+      tf_div_op.y().getType().dyn_cast<RankedTensorType>();
+  RankedTensorType output_type =
       tf_div_op.getResult().getType().dyn_cast<RankedTensorType>();
   // Not a ranked tensor output
   if (!output_type || !y_type) return failure();
@@ -479,8 +484,9 @@
     Operation* op, PatternRewriter& rewriter) const {
   auto tf_argmax_op = cast<TF::ArgMaxOp>(op);
 
-  auto input_type = tf_argmax_op.input().getType().dyn_cast<RankedTensorType>();
-  auto output_type =
+  RankedTensorType input_type =
+      tf_argmax_op.input().getType().dyn_cast<RankedTensorType>();
+  RankedTensorType output_type =
       tf_argmax_op.getResult().getType().dyn_cast<RankedTensorType>();
   // Not a ranked tensor output
   if (!output_type || !input_type) return failure();
@@ -509,9 +515,9 @@
     Operation* op, PatternRewriter& rewriter) const {
   auto tf_avgpool_op = cast<TF::AvgPoolOp>(op);
 
-  auto input_type =
+  RankedTensorType input_type =
       tf_avgpool_op.value().getType().dyn_cast<RankedTensorType>();
-  auto output_type =
+  RankedTensorType output_type =
       tf_avgpool_op.getResult().getType().dyn_cast<RankedTensorType>();
   // Not a ranked tensor output
   if (!input_type || !output_type) return failure();
@@ -559,7 +565,7 @@
       i64array.emplace_back(value);
     }
 
-    auto filter_type = RankedTensorType::get(
+    RankedTensorType filter_type = RankedTensorType::get(
         llvm::makeArrayRef<int64_t>(i64array), rewriter.getIntegerType(64));
 
     if (!getPaddingValuesFromPadType(
@@ -579,9 +585,9 @@
     Operation* op, PatternRewriter& rewriter) const {
   auto tf_maxpool_op = cast<TF::MaxPoolOp>(op);
 
-  auto input_type =
+  RankedTensorType input_type =
       tf_maxpool_op.input().getType().dyn_cast<RankedTensorType>();
-  auto output_type =
+  RankedTensorType output_type =
       tf_maxpool_op.getResult().getType().dyn_cast<RankedTensorType>();
   // Not a ranked tensor output
   if (!input_type || !output_type) return failure();
@@ -629,7 +635,7 @@
       i64array.emplace_back(value);
     }
 
-    auto filter_type = RankedTensorType::get(
+    RankedTensorType filter_type = RankedTensorType::get(
         llvm::makeArrayRef<int64_t>(i64array), rewriter.getIntegerType(64));
 
     if (!getPaddingValuesFromPadType(
@@ -656,7 +662,7 @@
 
   int32_t axis = axis_elems.getValue<IntegerAttr>({}).getInt();
 
-  auto result =
+  llvm::Optional<Value> result =
       convertConcatV2Op(rewriter, op, tf_concatv2_op.getResult(), values, axis);
 
   if (!result) return failure();
@@ -670,7 +676,7 @@
     Operation* op, PatternRewriter& rewriter) const {
   auto tf_reshape_op = cast<TF::ReshapeOp>(op);
 
-  auto output_type =
+  RankedTensorType output_type =
       tf_reshape_op.getResult().getType().dyn_cast<RankedTensorType>();
   // Not a ranked tensor output
   if (!output_type) return failure();
@@ -692,12 +698,14 @@
     Operation* op, PatternRewriter& rewriter) const {
   auto tf_rank_op = cast<TF::RankOp>(op);
 
-  auto input_type = tf_rank_op.input().getType().dyn_cast<RankedTensorType>();
+  RankedTensorType input_type =
+      tf_rank_op.input().getType().dyn_cast<RankedTensorType>();
   if (!input_type) return failure();
 
   int32_t rank = input_type.getRank();
 
-  auto rank_type = RankedTensorType::get({1}, rewriter.getIntegerType(32));
+  RankedTensorType rank_type =
+      RankedTensorType::get({1}, rewriter.getIntegerType(32));
   auto rank_attr = DenseElementsAttr::get(rank_type, {rank});
   auto rank_const =
       rewriter.create<tosa::ConstOp>(op->getLoc(), rank_type, rank_attr);
@@ -711,12 +719,13 @@
     Operation* op, PatternRewriter& rewriter) const {
   auto tf_shape_op = cast<TF::ShapeOp>(op);
 
-  auto output_type =
+  RankedTensorType output_type =
       tf_shape_op.getResult().getType().dyn_cast<RankedTensorType>();
   // Not a ranked tensor output
   if (!output_type) return failure();
 
-  auto input_type = tf_shape_op.input().getType().dyn_cast<RankedTensorType>();
+  RankedTensorType input_type =
+      tf_shape_op.input().getType().dyn_cast<RankedTensorType>();
   if (!input_type) return failure();
 
   auto input_shape = input_type.getShape();
@@ -726,7 +735,7 @@
     shape_arr.emplace_back(input_shape[i]);
   }
 
-  auto shape_type = RankedTensorType::get(
+  RankedTensorType shape_type = RankedTensorType::get(
       {static_cast<int32_t>(shape_arr.size())}, rewriter.getIntegerType(32));
   auto shape_attr = DenseElementsAttr::get(
       shape_type, llvm::makeArrayRef<int32_t>(shape_arr));
@@ -742,7 +751,7 @@
     Operation* op, PatternRewriter& rewriter) const {
   auto tf_expanddims_op = cast<TF::ExpandDimsOp>(op);
 
-  auto result =
+  llvm::Optional<Value> result =
       convertExpandDimsOp(rewriter, op, tf_expanddims_op.getResult(),
                           tf_expanddims_op.input(), tf_expanddims_op.dim());
 
@@ -764,8 +773,9 @@
     squeeze_dims.emplace_back(squeeze_dim.dyn_cast<IntegerAttr>().getInt());
   }
 
-  auto result = convertSqueezeOp(rewriter, op, tf_squeeze_op.getResult(),
-                                 tf_squeeze_op.input(), squeeze_dims);
+  llvm::Optional<Value> result =
+      convertSqueezeOp(rewriter, op, tf_squeeze_op.getResult(),
+                       tf_squeeze_op.input(), squeeze_dims);
 
   if (!result) return failure();
 
@@ -778,7 +788,7 @@
     Operation* op, PatternRewriter& rewriter) const {
   auto tf_fill_op = cast<TF::FillOp>(op);
 
-  auto output_type =
+  RankedTensorType output_type =
       tf_fill_op.getResult().getType().dyn_cast<RankedTensorType>();
   // Not a ranked tensor output
   if (!output_type) return failure();
@@ -797,8 +807,8 @@
   if (!matchPattern(tf_fill_op.value(), m_Constant(&value_elem)))
     return failure();
 
-  auto fill_type = RankedTensorType::get(ArrayRef<int64_t>(dims_vals),
-                                         value_elem.getType().getElementType());
+  RankedTensorType fill_type = RankedTensorType::get(
+      ArrayRef<int64_t>(dims_vals), value_elem.getType().getElementType());
   DenseElementsAttr fill_attr;
 
   // Convert to a compatible zero type
@@ -826,20 +836,20 @@
     Operation* op, PatternRewriter& rewriter) const {
   auto tf_conv2d_op = cast<TF::Conv2DOp>(op);
 
-  auto filter_type =
+  RankedTensorType filter_type =
       tf_conv2d_op.filter().getType().dyn_cast<RankedTensorType>();
-  auto output_type =
+  RankedTensorType output_type =
       tf_conv2d_op.getResult().getType().dyn_cast<RankedTensorType>();
 
   // Set up a zero attr for subsequent pattern replacement if required
   auto bias_dim = filter_type.getShape().back();
-  auto bias_type =
+  RankedTensorType bias_type =
       RankedTensorType::get({bias_dim}, filter_type.getElementType());
   auto bias_attr = rewriter.getZeroAttr(bias_type);
   auto bias = rewriter.create<tosa::ConstOp>(op->getLoc(), bias_type,
                                              bias_attr.cast<ElementsAttr>());
 
-  auto result = convertTFConv2DCommon(
+  llvm::Optional<Value> result = convertTFConv2DCommon(
       rewriter, op, output_type, tf_conv2d_op.input(), tf_conv2d_op.filter(),
       bias, tf_conv2d_op.strides(), tf_conv2d_op.dilations(),
       tf_conv2d_op.explicit_paddings(), tf_conv2d_op.padding(),
@@ -856,11 +866,11 @@
     Operation* op, PatternRewriter& rewriter) const {
   auto tf_dwconv2d_op = cast<TF::DepthwiseConv2dNativeOp>(op);
 
-  auto input_type =
+  RankedTensorType input_type =
       tf_dwconv2d_op.input().getType().dyn_cast<RankedTensorType>();
-  auto filter_type =
+  RankedTensorType filter_type =
       tf_dwconv2d_op.filter().getType().dyn_cast<RankedTensorType>();
-  auto output_type =
+  RankedTensorType output_type =
       tf_dwconv2d_op.getResult().getType().dyn_cast<RankedTensorType>();
   // Not a ranked tensor output
   if (!input_type) return failure();
@@ -922,7 +932,7 @@
 
   auto filter_shape = filter_type.getShape();
   auto bias_dim = filter_shape[2] * filter_shape[3];
-  auto bias_type =
+  RankedTensorType bias_type =
       RankedTensorType::get({bias_dim}, filter_type.getElementType());
   auto bias_attr = rewriter.getZeroAttr(bias_type);
   auto bias = rewriter.create<tosa::ConstOp>(op->getLoc(), bias_type,
@@ -938,10 +948,11 @@
     Operation* op, PatternRewriter& rewriter) const {
   auto tf_conv_op = cast<TF::Conv2DBackpropInputOp>(op);
 
-  auto input_type =
+  RankedTensorType input_type =
       tf_conv_op.out_backprop().getType().dyn_cast<RankedTensorType>();
-  auto filter_type = tf_conv_op.filter().getType().dyn_cast<RankedTensorType>();
-  auto output_type =
+  RankedTensorType filter_type =
+      tf_conv_op.filter().getType().dyn_cast<RankedTensorType>();
+  RankedTensorType output_type =
       tf_conv_op.getResult().getType().dyn_cast<RankedTensorType>();
   // Not a ranked tensor output
   if (!input_type) return failure();
@@ -955,7 +966,7 @@
   a1_transpose_dims.push_back(filter_shape[0]);
   a1_transpose_dims.push_back(filter_shape[1]);
   a1_transpose_dims.push_back(filter_shape[3]);
-  auto a1_filter_transpose_perm =
+  Value a1_filter_transpose_perm =
       get1DConstTensor<tosa::ConstOp, int32_t>(rewriter, op, {2, 0, 1, 3});
   auto a1_filter_transpose_op = rewriter.create<tosa::TransposeOp>(
       op->getLoc(),
@@ -1041,7 +1052,7 @@
                                               PatternRewriter& rewriter) const {
   auto tf_all_op = cast<TF::AllOp>(op);
 
-  auto output_type =
+  RankedTensorType output_type =
       tf_all_op.getResult().getType().dyn_cast<RankedTensorType>();
   if (!output_type) return failure();
 
@@ -1053,8 +1064,8 @@
   auto keep_dims_attr = tf_all_op.keep_dimsAttr();
   if (keep_dims_attr) keep_dims = keep_dims_attr.getValue();
 
-  auto result = convertReduceAllOp(rewriter, op, output_type, tf_all_op.input(),
-                                   axes_elems, keep_dims);
+  llvm::Optional<Value> result = convertReduceAllOp(
+      rewriter, op, output_type, tf_all_op.input(), axes_elems, keep_dims);
 
   if (!result) return failure();
 
@@ -1067,7 +1078,7 @@
                                               PatternRewriter& rewriter) const {
   auto tf_any_op = cast<TF::AnyOp>(op);
 
-  auto output_type =
+  RankedTensorType output_type =
       tf_any_op.getResult().getType().dyn_cast<RankedTensorType>();
   if (!output_type) return failure();
 
@@ -1079,8 +1090,8 @@
   auto keep_dims_attr = tf_any_op.keep_dimsAttr();
   if (keep_dims_attr) keep_dims = keep_dims_attr.getValue();
 
-  auto result = convertReduceAnyOp(rewriter, op, output_type, tf_any_op.input(),
-                                   axes_elems, keep_dims);
+  llvm::Optional<Value> result = convertReduceAnyOp(
+      rewriter, op, output_type, tf_any_op.input(), axes_elems, keep_dims);
 
   if (!result) return failure();
 
@@ -1093,7 +1104,7 @@
                                               PatternRewriter& rewriter) const {
   auto tf_max_op = cast<TF::MaxOp>(op);
 
-  auto output_type =
+  RankedTensorType output_type =
       tf_max_op.getResult().getType().dyn_cast<RankedTensorType>();
   if (!output_type) return failure();
 
@@ -1105,8 +1116,8 @@
   auto keep_dims_attr = tf_max_op.keep_dimsAttr();
   if (keep_dims_attr) keep_dims = keep_dims_attr.getValue();
 
-  auto result = convertReduceMaxOp(rewriter, op, output_type, tf_max_op.input(),
-                                   axes_elems, keep_dims);
+  llvm::Optional<Value> result = convertReduceMaxOp(
+      rewriter, op, output_type, tf_max_op.input(), axes_elems, keep_dims);
 
   if (!result) return failure();
 
@@ -1119,7 +1130,7 @@
                                               PatternRewriter& rewriter) const {
   auto tf_min_op = cast<TF::MinOp>(op);
 
-  auto output_type =
+  RankedTensorType output_type =
       tf_min_op.getResult().getType().dyn_cast<RankedTensorType>();
   if (!output_type) return failure();
 
@@ -1131,8 +1142,8 @@
   auto keep_dims_attr = tf_min_op.keep_dimsAttr();
   if (keep_dims_attr) keep_dims = keep_dims_attr.getValue();
 
-  auto result = convertReduceMinOp(rewriter, op, output_type, tf_min_op.input(),
-                                   axes_elems, keep_dims);
+  llvm::Optional<Value> result = convertReduceMinOp(
+      rewriter, op, output_type, tf_min_op.input(), axes_elems, keep_dims);
 
   if (!result) return failure();
 
@@ -1145,7 +1156,7 @@
     Operation* op, PatternRewriter& rewriter) const {
   auto tf_mean_op = cast<TF::MeanOp>(op);
 
-  auto output_type =
+  RankedTensorType output_type =
       tf_mean_op.getResult().getType().dyn_cast<RankedTensorType>();
   if (!output_type) return failure();
 
@@ -1157,8 +1168,8 @@
   auto keep_dims_attr = tf_mean_op.keep_dimsAttr();
   if (keep_dims_attr) keep_dims = keep_dims_attr.getValue();
 
-  auto result = convertReduceMeanOp(rewriter, op, output_type,
-                                    tf_mean_op.input(), axes_elems, keep_dims);
+  llvm::Optional<Value> result = convertReduceMeanOp(
+      rewriter, op, output_type, tf_mean_op.input(), axes_elems, keep_dims);
 
   if (!result) return failure();
 
@@ -1171,7 +1182,7 @@
     Operation* op, PatternRewriter& rewriter) const {
   auto tf_prod_op = cast<TF::ProdOp>(op);
 
-  auto output_type =
+  RankedTensorType output_type =
       tf_prod_op.getResult().getType().dyn_cast<RankedTensorType>();
   if (!output_type) return failure();
 
@@ -1183,8 +1194,8 @@
   auto keep_dims_attr = tf_prod_op.keep_dimsAttr();
   if (keep_dims_attr) keep_dims = keep_dims_attr.getValue();
 
-  auto result = convertReduceProdOp(rewriter, op, output_type,
-                                    tf_prod_op.input(), axes_elems, keep_dims);
+  llvm::Optional<Value> result = convertReduceProdOp(
+      rewriter, op, output_type, tf_prod_op.input(), axes_elems, keep_dims);
 
   if (!result) return failure();
 
@@ -1197,7 +1208,7 @@
                                               PatternRewriter& rewriter) const {
   auto tf_sum_op = cast<TF::SumOp>(op);
 
-  auto output_type =
+  RankedTensorType output_type =
       tf_sum_op.getResult().getType().dyn_cast<RankedTensorType>();
   if (!output_type) return failure();
 
@@ -1209,8 +1220,8 @@
   auto keep_dims_attr = tf_sum_op.keep_dimsAttr();
   if (keep_dims_attr) keep_dims = keep_dims_attr.getValue();
 
-  auto result = convertReduceSumOp(rewriter, op, output_type, tf_sum_op.input(),
-                                   axes_elems, keep_dims);
+  llvm::Optional<Value> result = convertReduceSumOp(
+      rewriter, op, output_type, tf_sum_op.input(), axes_elems, keep_dims);
 
   if (!result) return failure();
 
@@ -1223,7 +1234,7 @@
                                               PatternRewriter& rewriter) const {
   auto tf_elu_op = cast<TF::EluOp>(op);
 
-  auto result =
+  llvm::Optional<Value> result =
       convertEluOp(rewriter, op, tf_elu_op.getResult(), tf_elu_op.features());
 
   if (!result) return failure();
@@ -1237,8 +1248,8 @@
     Operation* op, PatternRewriter& rewriter) const {
   auto tf_softmax_op = cast<TF::SoftmaxOp>(op);
 
-  auto result = convertSoftmaxOp(rewriter, op, tf_softmax_op.getResult(),
-                                 tf_softmax_op.logits());
+  llvm::Optional<Value> result = convertSoftmaxOp(
+      rewriter, op, tf_softmax_op.getResult(), tf_softmax_op.logits());
 
   if (!result) return failure();
 
@@ -1251,8 +1262,8 @@
     Operation* op, PatternRewriter& rewriter) const {
   auto tf_logsoftmax_op = cast<TF::LogSoftmaxOp>(op);
 
-  auto result = convertLogSoftmaxOp(rewriter, op, tf_logsoftmax_op.getResult(),
-                                    tf_logsoftmax_op.logits());
+  llvm::Optional<Value> result = convertLogSoftmaxOp(
+      rewriter, op, tf_logsoftmax_op.getResult(), tf_logsoftmax_op.logits());
 
   if (!result) return failure();
 
@@ -1265,7 +1276,7 @@
     Operation* op, PatternRewriter& rewriter) const {
   auto tf_batchnorm_op = cast<TF::FusedBatchNormOp>(op);
 
-  auto output_type =
+  RankedTensorType output_type =
       tf_batchnorm_op.getResult(0).getType().dyn_cast<RankedTensorType>();
   // Not a ranked tensor output
   if (!output_type) return failure();
@@ -1290,9 +1301,9 @@
   // op5 = mul(op4, bscale)
   // op6 = add(op5, boffset)
 
-  auto mean_type =
+  RankedTensorType mean_type =
       tf_batchnorm_op.mean().getType().dyn_cast<RankedTensorType>();
-  auto variance_type =
+  RankedTensorType variance_type =
       tf_batchnorm_op.variance().getType().dyn_cast<RankedTensorType>();
   if (!variance_type || !mean_type) return failure();
 
@@ -1310,7 +1321,7 @@
     variance_val = tf_batchnorm_op.variance();
   }
 
-  auto epsilon_type =
+  RankedTensorType epsilon_type =
       RankedTensorType::get({1}, variance_type.getElementType());
   auto epsilon_attr =
       DenseFPElementsAttr::get(epsilon_type, {tf_batchnorm_op.epsilon()});
@@ -1348,7 +1359,7 @@
     Operation* op, PatternRewriter& rewriter) const {
   auto tf_batchnorm_op = cast<TF::FusedBatchNormV3Op>(op);
 
-  auto output_type =
+  RankedTensorType output_type =
       tf_batchnorm_op.getResult(0).getType().dyn_cast<RankedTensorType>();
   // Not a ranked tensor output
   if (!output_type) return failure();
@@ -1366,7 +1377,7 @@
       op->getLoc(), tf_batchnorm_op.getResult(0).getType(), tf_batchnorm_op.x(),
       tf_batchnorm_op.mean());
 
-  auto variance_type =
+  RankedTensorType variance_type =
       tf_batchnorm_op.variance().getType().dyn_cast<RankedTensorType>();
   if (!variance_type) return failure();
 
@@ -1405,7 +1416,7 @@
     Operation* op, PatternRewriter& rewriter) const {
   auto tf_biasadd_op = cast<TF::BiasAddOp>(op);
 
-  auto output_type =
+  RankedTensorType output_type =
       tf_biasadd_op.getResult().getType().dyn_cast<RankedTensorType>();
   // Not a ranked tensor output
   if (!output_type) return failure();
@@ -1421,7 +1432,7 @@
     Operation* op, PatternRewriter& rewriter) const {
   auto tf_slice_op = cast<TF::SliceOp>(op);
 
-  auto output_type =
+  RankedTensorType output_type =
       tf_slice_op.getResult().getType().dyn_cast<RankedTensorType>();
   // Not a ranked tensor output
   if (!output_type) return failure();
@@ -1460,7 +1471,7 @@
     Operation* op, PatternRewriter& rewriter) const {
   auto tf_tile_op = cast<TF::TileOp>(op);
 
-  auto output_type =
+  RankedTensorType output_type =
       tf_tile_op.getResult().getType().dyn_cast<RankedTensorType>();
   // Not a ranked tensor output
   if (!output_type) return failure();
@@ -1484,7 +1495,7 @@
     Operation* op, PatternRewriter& rewriter) const {
   auto tf_transpose_op = cast<TF::TransposeOp>(op);
 
-  auto output_type =
+  RankedTensorType output_type =
       tf_transpose_op.getResult().getType().dyn_cast<RankedTensorType>();
   // Not a ranked tensor output
   if (!output_type) {
@@ -1513,7 +1524,7 @@
   }
   int32_t axis_i32 = axis_attr.getInt();
 
-  auto result =
+  llvm::Optional<Value> result =
       convertPackOp(rewriter, op, tf_pack_op.getResult(), inputs, axis_i32);
 
   if (!result) return failure();
@@ -1535,7 +1546,8 @@
   }
   int32_t axis_i32 = axis_attr.getInt();
 
-  auto results = convertUnpackOp(rewriter, op, tf_unpack_op.value(), axis_i32);
+  llvm::Optional<ValueRange> results =
+      convertUnpackOp(rewriter, op, tf_unpack_op.value(), axis_i32);
 
   if (!results) return failure();
 
@@ -1562,8 +1574,9 @@
     axis = axisAttrElems.getValue<IntegerAttr>({}).getInt();
   }
 
-  auto results = convertSplitOp(rewriter, op, tf_split_op.getResult(0),
-                                tf_split_op.value(), num_split, axis);
+  llvm::Optional<ValueRange> results =
+      convertSplitOp(rewriter, op, tf_split_op.getResult(0),
+                     tf_split_op.value(), num_split, axis);
 
   if (!results) return failure();
 
@@ -1597,8 +1610,9 @@
 
   int32_t axis = axisAttrElems.getValue<IntegerAttr>(0).getInt();
 
-  auto results = convertSplitVOp(rewriter, op, tf_splitv_op.getResult(0),
-                                 tf_splitv_op.value(), size_split, axis);
+  llvm::Optional<ValueRange> results =
+      convertSplitVOp(rewriter, op, tf_splitv_op.getResult(0),
+                      tf_splitv_op.value(), size_split, axis);
 
   if (!results) return failure();
 
@@ -1611,7 +1625,7 @@
     Operation* op, PatternRewriter& rewriter) const {
   auto tf_less_op = cast<TF::LessOp>(op);
 
-  auto output_type =
+  RankedTensorType output_type =
       tf_less_op.getResult().getType().dyn_cast<RankedTensorType>();
   // Not a ranked tensor output
   if (!output_type) return failure();
@@ -1631,7 +1645,7 @@
     Operation* op, PatternRewriter& rewriter) const {
   auto tf_less_equal_op = cast<TF::LessEqualOp>(op);
 
-  auto output_type =
+  RankedTensorType output_type =
       tf_less_equal_op.getResult().getType().dyn_cast<RankedTensorType>();
   // Not a ranked tensor output
   if (!output_type) return failure();
@@ -1651,7 +1665,7 @@
                                               PatternRewriter& rewriter) const {
   auto tf_pad_op = cast<TF::PadOp>(op);
 
-  auto output_type =
+  RankedTensorType output_type =
       tf_pad_op.getResult().getType().dyn_cast<RankedTensorType>();
   // Not a ranked tensor output
   if (!output_type) return failure();
@@ -1667,13 +1681,13 @@
     Operation* op, PatternRewriter& rewriter) const {
   auto tf_resize_op = cast<TF::ResizeBilinearOp>(op);
 
-  auto output_type =
+  RankedTensorType output_type =
       tf_resize_op.getResult().getType().dyn_cast<RankedTensorType>();
   // Not a ranked tensor output
   if (!output_type) return failure();
 
-  auto result = convertResizeOp(rewriter, op, output_type,
-                                tf_resize_op.images(), StringRef("BILINEAR"));
+  llvm::Optional<Value> result = convertResizeOp(
+      rewriter, op, output_type, tf_resize_op.images(), StringRef("BILINEAR"));
 
   if (!result) return failure();
 
@@ -1686,13 +1700,13 @@
     Operation* op, PatternRewriter& rewriter) const {
   auto tf_resize_op = cast<TF::ResizeNearestNeighborOp>(op);
 
-  auto output_type =
+  RankedTensorType output_type =
       tf_resize_op.getResult().getType().dyn_cast<RankedTensorType>();
   // Not a ranked tensor output
   if (!output_type) return failure();
 
-  auto result = convertResizeOp(rewriter, op, output_type,
-                                tf_resize_op.images(), StringRef("NEAREST"));
+  llvm::Optional<Value> result = convertResizeOp(
+      rewriter, op, output_type, tf_resize_op.images(), StringRef("NEAREST"));
 
   if (!result) return failure();
 
@@ -1705,9 +1719,11 @@
     Operation* op, PatternRewriter& rewriter) const {
   auto tf_matmul_op = cast<TF::MatMulOp>(op);
 
-  auto a_type = tf_matmul_op.a().getType().dyn_cast<RankedTensorType>();
-  auto b_type = tf_matmul_op.b().getType().dyn_cast<RankedTensorType>();
-  auto output_type =
+  RankedTensorType a_type =
+      tf_matmul_op.a().getType().dyn_cast<RankedTensorType>();
+  RankedTensorType b_type =
+      tf_matmul_op.b().getType().dyn_cast<RankedTensorType>();
+  RankedTensorType output_type =
       tf_matmul_op.getResult().getType().dyn_cast<RankedTensorType>();
 
   if (!(a_type && b_type && output_type)) {
@@ -1727,7 +1743,7 @@
     Operation* op, PatternRewriter& rewriter) const {
   auto tf_gather_op = cast<TF::GatherOp>(op);
 
-  auto output_type =
+  RankedTensorType output_type =
       tf_gather_op.getResult().getType().dyn_cast<RankedTensorType>();
   if (!output_type) return failure();
 
@@ -1746,7 +1762,7 @@
     Operation* op, PatternRewriter& rewriter) const {
   auto tf_gather_op = cast<TF::GatherV2Op>(op);
 
-  auto output_type =
+  RankedTensorType output_type =
       tf_gather_op.getResult().getType().dyn_cast<RankedTensorType>();
   if (!output_type) return failure();
 
@@ -1776,7 +1792,7 @@
     Operation* op, PatternRewriter& rewriter) const {
   auto tf_sel_op = cast<TF::SelectV2Op>(op);
 
-  auto result =
+  llvm::Optional<Value> result =
       convertSelectOp(rewriter, op, tf_sel_op.getResult(),
                       tf_sel_op.condition(), tf_sel_op.t(), tf_sel_op.e());
 
@@ -1791,7 +1807,7 @@
     Operation* op, PatternRewriter& rewriter) const {
   auto tf_s2d_op = cast<TF::SpaceToDepthOp>(op);
 
-  auto result = convertSpaceToDepthOp(
+  llvm::Optional<Value> result = convertSpaceToDepthOp(
       rewriter, op, tf_s2d_op.getResult(), tf_s2d_op.input(),
       tf_s2d_op.block_sizeAttr(), tf_s2d_op.data_formatAttr());
 
@@ -1806,7 +1822,7 @@
     Operation* op, PatternRewriter& rewriter) const {
   auto tf_d2s_op = cast<TF::DepthToSpaceOp>(op);
 
-  auto result = convertDepthToSpaceOp(
+  llvm::Optional<Value> result = convertDepthToSpaceOp(
       rewriter, op, tf_d2s_op.getResult(), tf_d2s_op.input(),
       tf_d2s_op.block_sizeAttr(), tf_d2s_op.data_formatAttr());
 
@@ -1821,7 +1837,7 @@
     Operation* op, PatternRewriter& rewriter) const {
   auto tf_s2b_op = cast<TF::SpaceToBatchNDOp>(op);
 
-  auto result = convertSpaceToBatchNDOp(
+  llvm::Optional<Value> result = convertSpaceToBatchNDOp(
       rewriter, op, tf_s2b_op.getResult(), tf_s2b_op.input(),
       tf_s2b_op.block_shape(), tf_s2b_op.paddings());
   if (!result) return failure();
@@ -1835,7 +1851,7 @@
     Operation* op, PatternRewriter& rewriter) const {
   auto tf_b2s_op = cast<TF::BatchToSpaceNDOp>(op);
 
-  auto result = convertBatchToSpaceNDOp(
+  llvm::Optional<Value> result = convertBatchToSpaceNDOp(
       rewriter, op, tf_b2s_op.getResult(), tf_b2s_op.input(),
       tf_b2s_op.block_shape(), tf_b2s_op.crops());
 
@@ -1850,7 +1866,7 @@
     Operation* op, PatternRewriter& rewriter) const {
   auto tf_ss_op = cast<TF::StridedSliceOp>(op);
 
-  auto result = convertStridedSliceOp(
+  llvm::Optional<Value> result = convertStridedSliceOp(
       rewriter, op, tf_ss_op.getResult(), tf_ss_op.input(), tf_ss_op.begin(),
       tf_ss_op.end(), tf_ss_op.strides(), tf_ss_op.begin_maskAttr().getInt(),
       tf_ss_op.end_maskAttr().getInt(), tf_ss_op.ellipsis_maskAttr().getInt(),
@@ -1868,8 +1884,8 @@
     Operation* op, PatternRewriter& rewriter) const {
   auto tf_zeroslike_op = cast<TF::ZerosLikeOp>(op);
 
-  auto result = convertZerosLikeOp(rewriter, op, tf_zeroslike_op.getResult(),
-                                   tf_zeroslike_op.x());
+  llvm::Optional<Value> result = convertZerosLikeOp(
+      rewriter, op, tf_zeroslike_op.getResult(), tf_zeroslike_op.x());
 
   if (!result) return failure();
 
@@ -1881,7 +1897,7 @@
 LogicalResult ConvertTFSigmoidOp::matchAndRewrite(
     Operation* op, PatternRewriter& rewriter) const {
   auto tf_sigmoid_op = cast<TF::SigmoidOp>(op);
-  auto output_type =
+  RankedTensorType output_type =
       tf_sigmoid_op.getResult().getType().dyn_cast<RankedTensorType>();
   if (!output_type) return failure();
 
@@ -1894,7 +1910,7 @@
 LogicalResult ConvertTFTanhOp::matchAndRewrite(
     Operation* op, PatternRewriter& rewriter) const {
   auto tf_tanh_op = cast<TF::TanhOp>(op);
-  auto output_type =
+  RankedTensorType output_type =
       tf_tanh_op.getResult().getType().dyn_cast<RankedTensorType>();
   if (!output_type) return failure();
 
@@ -1906,7 +1922,7 @@
 LogicalResult ConvertTFLeakyReluOp::matchAndRewrite(
     Operation* op, PatternRewriter& rewriter) const {
   auto tf_leakyrelu_op = cast<TF::LeakyReluOp>(op);
-  auto output_type =
+  RankedTensorType output_type =
       tf_leakyrelu_op.getResult().getType().dyn_cast<RankedTensorType>();
   if (!output_type) return failure();
 
@@ -1918,7 +1934,7 @@
 LogicalResult ConvertTFNegOp::matchAndRewrite(Operation* op,
                                               PatternRewriter& rewriter) const {
   auto tf_neg_op = cast<TF::NegOp>(op);
-  auto output_type =
+  RankedTensorType output_type =
       tf_neg_op.getResult().getType().dyn_cast<RankedTensorType>();
   if (!output_type) return failure();
 
@@ -1930,7 +1946,7 @@
 LogicalResult ConvertTFStopGradientOp::matchAndRewrite(
     Operation* op, PatternRewriter& rewriter) const {
   auto tf_stopgrad_op = cast<TF::StopGradientOp>(op);
-  auto output_type =
+  RankedTensorType output_type =
       tf_stopgrad_op.getResult().getType().dyn_cast<RankedTensorType>();
   if (!output_type) return failure();
 
@@ -1943,9 +1959,9 @@
 LogicalResult ConvertTFReverseV2Op::matchAndRewrite(
     Operation* op, PatternRewriter& rewriter) const {
   auto tf_reverse_op = cast<TF::ReverseV2Op>(op);
-  auto input_type =
+  RankedTensorType input_type =
       tf_reverse_op.tensor().getType().dyn_cast<RankedTensorType>();
-  auto output_type =
+  RankedTensorType output_type =
       tf_reverse_op.getResult().getType().dyn_cast<RankedTensorType>();
   if (!input_type || !output_type) return failure();
 
@@ -1980,12 +1996,12 @@
     Operation* op, PatternRewriter& rewriter) const {
   auto tf_fakequant_op = cast<TF::FakeQuantWithMinMaxArgsOp>(op);
 
-  auto output_type =
+  RankedTensorType output_type =
       tf_fakequant_op.getResult().getType().dyn_cast<RankedTensorType>();
   // Not a ranked tensor output
   if (!output_type) return failure();
 
-  auto result =
+  llvm::Optional<Value> result =
       convertFakeQuantOp(rewriter, op, output_type, tf_fakequant_op.inputs(),
                          tf_fakequant_op.minAttr().getValueAsDouble(),
                          tf_fakequant_op.maxAttr().getValueAsDouble(),
@@ -2003,7 +2019,7 @@
     Operation* op, PatternRewriter& rewriter) const {
   auto tf_fakequant_op = cast<TF::FakeQuantWithMinMaxVarsOp>(op);
 
-  auto output_type =
+  RankedTensorType output_type =
       tf_fakequant_op.getResult().getType().dyn_cast<RankedTensorType>();
   // Not a ranked tensor output
   if (!output_type) return failure();
@@ -2022,7 +2038,7 @@
   int64_t min_val = min_elems.getValue<IntegerAttr>(0).getInt();
   int64_t max_val = max_elems.getValue<IntegerAttr>(0).getInt();
 
-  auto result = convertFakeQuantOp(
+  llvm::Optional<Value> result = convertFakeQuantOp(
       rewriter, op, output_type, tf_fakequant_op.inputs(), min_val, max_val,
       tf_fakequant_op.num_bitsAttr().getInt(),
       tf_fakequant_op.narrow_rangeAttr().getValue());
diff --git a/tensorflow/compiler/mlir/tosa/transforms/legalize_tfl.cc b/tensorflow/compiler/mlir/tosa/transforms/legalize_tfl.cc
index 895a6a4..c82d6ee 100644
--- a/tensorflow/compiler/mlir/tosa/transforms/legalize_tfl.cc
+++ b/tensorflow/compiler/mlir/tosa/transforms/legalize_tfl.cc
@@ -167,8 +167,9 @@
     Operation* op, PatternRewriter& rewriter) const {
   auto tfl_relu_op = cast<TFL::ReluOp>(op);
 
-  auto input_type = tfl_relu_op.x().getType().dyn_cast<RankedTensorType>();
-  auto output_type =
+  RankedTensorType input_type =
+      tfl_relu_op.x().getType().dyn_cast<RankedTensorType>();
+  RankedTensorType output_type =
       tfl_relu_op.getResult().getType().dyn_cast<RankedTensorType>();
   // Not a ranked tensor output
   if (!input_type || !output_type) return failure();
@@ -186,20 +187,22 @@
 
   Value output;
   if (output_is_qtype) {
-    auto rescale_type =
+    RankedTensorType rescale_type =
         RankedTensorType::get(output_type.getShape(), rewriter.getI32Type());
-    auto input_qtype = input_type.getElementType()
-                           .dyn_cast<mlir::quant::UniformQuantizedType>();
-    auto output_qtype = output_type.getElementType()
-                            .dyn_cast<mlir::quant::UniformQuantizedType>();
+    UniformQuantizedType input_qtype =
+        input_type.getElementType()
+            .dyn_cast<mlir::quant::UniformQuantizedType>();
+    UniformQuantizedType output_qtype =
+        output_type.getElementType()
+            .dyn_cast<mlir::quant::UniformQuantizedType>();
 
-    auto op1_rescale_in = buildRescaleToInt32(rewriter, op, tfl_relu_op.x(),
-                                              1.0f, input_qtype.getZeroPoint());
+    Value op1_rescale_in = buildRescaleToInt32(
+        rewriter, op, tfl_relu_op.x(), 1.0f, input_qtype.getZeroPoint());
     auto op2_relun_op1 = rewriter.create<tosa::ReluNOp>(
         op->getLoc(), rescale_type, op1_rescale_in,
         rewriter.getI64IntegerAttr(std::numeric_limits<int32_t>::max()),
         rewriter.getF32FloatAttr(0.0f));
-    auto op3_rescale_op2 = buildRescaleFromInt32(
+    Value op3_rescale_op2 = buildRescaleFromInt32(
         rewriter, op, output_type, op2_relun_op1.getResult(), 1.0f,
         output_qtype.getZeroPoint());
 
@@ -221,8 +224,9 @@
     Operation* op, PatternRewriter& rewriter) const {
   auto tfl_relu6_op = cast<TFL::Relu6Op>(op);
 
-  auto input_type = tfl_relu6_op.x().getType().dyn_cast<RankedTensorType>();
-  auto output_type =
+  RankedTensorType input_type =
+      tfl_relu6_op.x().getType().dyn_cast<RankedTensorType>();
+  RankedTensorType output_type =
       tfl_relu6_op.getResult().getType().dyn_cast<RankedTensorType>();
   // Not a ranked tensor output
   if (!input_type || !output_type) return failure();
@@ -240,21 +244,23 @@
 
   Value output;
   if (output_is_qtype && input_is_qtype) {
-    auto rescale_type =
+    RankedTensorType rescale_type =
         RankedTensorType::get(output_type.getShape(), rewriter.getI32Type());
-    auto input_qtype = input_type.getElementType()
-                           .dyn_cast<mlir::quant::UniformQuantizedType>();
-    auto output_qtype = output_type.getElementType()
-                            .dyn_cast<mlir::quant::UniformQuantizedType>();
+    UniformQuantizedType input_qtype =
+        input_type.getElementType()
+            .dyn_cast<mlir::quant::UniformQuantizedType>();
+    UniformQuantizedType output_qtype =
+        output_type.getElementType()
+            .dyn_cast<mlir::quant::UniformQuantizedType>();
     int64_t rescaled_6 = std::llround(6.0f / input_qtype.getScale()) +
                          input_qtype.getZeroPoint();
 
-    auto op1_rescale_in = buildRescaleToInt32(rewriter, op, tfl_relu6_op.x(),
-                                              1.0f, input_qtype.getZeroPoint());
+    Value op1_rescale_in = buildRescaleToInt32(
+        rewriter, op, tfl_relu6_op.x(), 1.0f, input_qtype.getZeroPoint());
     auto op2_relun_op1 = rewriter.create<tosa::ReluNOp>(
         op->getLoc(), rescale_type, op1_rescale_in,
         rewriter.getI64IntegerAttr(rescaled_6), rewriter.getF32FloatAttr(0.0f));
-    auto op3_rescale_op2 = buildRescaleFromInt32(
+    Value op3_rescale_op2 = buildRescaleFromInt32(
         rewriter, op, output_type, op2_relun_op1.getResult(), 1.0f,
         output_qtype.getZeroPoint());
 
@@ -276,9 +282,11 @@
     Operation* op, PatternRewriter& rewriter) const {
   auto tfl_equal_op = cast<TFL::EqualOp>(op);
 
-  auto input_x_type = tfl_equal_op.x().getType().dyn_cast<RankedTensorType>();
-  auto input_y_type = tfl_equal_op.y().getType().dyn_cast<RankedTensorType>();
-  auto output_type =
+  RankedTensorType input_x_type =
+      tfl_equal_op.x().getType().dyn_cast<RankedTensorType>();
+  RankedTensorType input_y_type =
+      tfl_equal_op.y().getType().dyn_cast<RankedTensorType>();
+  RankedTensorType output_type =
       tfl_equal_op.getResult().getType().dyn_cast<RankedTensorType>();
   // Not a ranked tensor output
   if (!input_x_type || !input_y_type || !output_type) return failure();
@@ -299,10 +307,12 @@
 
   Value output;
   if (output_is_qtype && input_x_is_qtype && input_y_is_qtype) {
-    auto input_x_qtype = input_x_type.getElementType()
-                             .dyn_cast<mlir::quant::UniformQuantizedType>();
-    auto input_y_qtype = input_y_type.getElementType()
-                             .dyn_cast<mlir::quant::UniformQuantizedType>();
+    UniformQuantizedType input_x_qtype =
+        input_x_type.getElementType()
+            .dyn_cast<mlir::quant::UniformQuantizedType>();
+    UniformQuantizedType input_y_qtype =
+        input_y_type.getElementType()
+            .dyn_cast<mlir::quant::UniformQuantizedType>();
 
     if (input_x_qtype.getScale() != input_y_qtype.getScale() ||
         input_x_qtype.getZeroPoint() != input_y_qtype.getZeroPoint()) {
@@ -311,9 +321,9 @@
           "must be the same");
     }
 
-    auto op1_rescale_x = buildRescaleToInt32(
+    Value op1_rescale_x = buildRescaleToInt32(
         rewriter, op, tfl_equal_op.x(), 1.0f, input_x_qtype.getZeroPoint());
-    auto op2_rescale_y = buildRescaleToInt32(
+    Value op2_rescale_y = buildRescaleToInt32(
         rewriter, op, tfl_equal_op.y(), 1.0f, input_y_qtype.getZeroPoint());
     auto op3_equal_op1_op2 = rewriter.create<tosa::EqualOp>(
         op->getLoc(), output_type, op1_rescale_x, op2_rescale_y);
@@ -334,11 +344,11 @@
     Operation* op, PatternRewriter& rewriter) const {
   auto tfl_not_equal_op = cast<TFL::NotEqualOp>(op);
 
-  auto input_lhs_type =
+  RankedTensorType input_lhs_type =
       tfl_not_equal_op.lhs().getType().dyn_cast<RankedTensorType>();
-  auto input_rhs_type =
+  RankedTensorType input_rhs_type =
       tfl_not_equal_op.rhs().getType().dyn_cast<RankedTensorType>();
-  auto output_type =
+  RankedTensorType output_type =
       tfl_not_equal_op.getResult().getType().dyn_cast<RankedTensorType>();
   // Not a ranked tensor output
   if (!input_lhs_type || !input_rhs_type || !output_type) return failure();
@@ -359,10 +369,12 @@
 
   Value output;
   if (output_is_qtype && input_lhs_is_qtype && input_rhs_is_qtype) {
-    auto input_lhs_qtype = input_lhs_type.getElementType()
-                               .dyn_cast<mlir::quant::UniformQuantizedType>();
-    auto input_rhs_qtype = input_rhs_type.getElementType()
-                               .dyn_cast<mlir::quant::UniformQuantizedType>();
+    UniformQuantizedType input_lhs_qtype =
+        input_lhs_type.getElementType()
+            .dyn_cast<mlir::quant::UniformQuantizedType>();
+    UniformQuantizedType input_rhs_qtype =
+        input_rhs_type.getElementType()
+            .dyn_cast<mlir::quant::UniformQuantizedType>();
 
     if (input_lhs_qtype.getScale() != input_rhs_qtype.getScale() ||
         input_lhs_qtype.getZeroPoint() != input_rhs_qtype.getZeroPoint()) {
@@ -371,10 +383,10 @@
           "must be the same");
     }
 
-    auto op1_rescale_lhs =
+    Value op1_rescale_lhs =
         buildRescaleToInt32(rewriter, op, tfl_not_equal_op.lhs(), 1.0f,
                             input_lhs_qtype.getZeroPoint());
-    auto op2_rescale_rhs =
+    Value op2_rescale_rhs =
         buildRescaleToInt32(rewriter, op, tfl_not_equal_op.rhs(), 1.0f,
                             input_rhs_qtype.getZeroPoint());
     auto op3_equal_op1_op2 = rewriter.create<tosa::EqualOp>(
@@ -401,11 +413,11 @@
     Operation* op, PatternRewriter& rewriter) const {
   auto tfl_greater_op = cast<TFL::GreaterOp>(op);
 
-  auto input_lhs_type =
+  RankedTensorType input_lhs_type =
       tfl_greater_op.lhs().getType().dyn_cast<RankedTensorType>();
-  auto input_rhs_type =
+  RankedTensorType input_rhs_type =
       tfl_greater_op.rhs().getType().dyn_cast<RankedTensorType>();
-  auto output_type =
+  RankedTensorType output_type =
       tfl_greater_op.getResult().getType().dyn_cast<RankedTensorType>();
   // Not a ranked tensor output
   if (!input_lhs_type || !input_rhs_type || !output_type) return failure();
@@ -426,10 +438,12 @@
 
   Value output;
   if (output_is_qtype && input_lhs_is_qtype && input_rhs_is_qtype) {
-    auto input_lhs_qtype = input_lhs_type.getElementType()
-                               .dyn_cast<mlir::quant::UniformQuantizedType>();
-    auto input_rhs_qtype = input_rhs_type.getElementType()
-                               .dyn_cast<mlir::quant::UniformQuantizedType>();
+    UniformQuantizedType input_lhs_qtype =
+        input_lhs_type.getElementType()
+            .dyn_cast<mlir::quant::UniformQuantizedType>();
+    UniformQuantizedType input_rhs_qtype =
+        input_rhs_type.getElementType()
+            .dyn_cast<mlir::quant::UniformQuantizedType>();
 
     if (input_lhs_qtype.getScale() != input_rhs_qtype.getScale() ||
         input_lhs_qtype.getZeroPoint() != input_rhs_qtype.getZeroPoint()) {
@@ -438,10 +452,10 @@
           "must be the same");
     }
 
-    auto op1_rescale_lhs =
+    Value op1_rescale_lhs =
         buildRescaleToInt32(rewriter, op, tfl_greater_op.lhs(), 1.0f,
                             input_lhs_qtype.getZeroPoint());
-    auto op2_rescale_rhs =
+    Value op2_rescale_rhs =
         buildRescaleToInt32(rewriter, op, tfl_greater_op.rhs(), 1.0f,
                             input_rhs_qtype.getZeroPoint());
     auto op3_greater_op1_op2 = rewriter.create<tosa::GreaterOp>(
@@ -463,11 +477,11 @@
     Operation* op, PatternRewriter& rewriter) const {
   auto tfl_greater_equal_op = cast<TFL::GreaterEqualOp>(op);
 
-  auto input_lhs_type =
+  RankedTensorType input_lhs_type =
       tfl_greater_equal_op.lhs().getType().dyn_cast<RankedTensorType>();
-  auto input_rhs_type =
+  RankedTensorType input_rhs_type =
       tfl_greater_equal_op.rhs().getType().dyn_cast<RankedTensorType>();
-  auto output_type =
+  RankedTensorType output_type =
       tfl_greater_equal_op.getResult().getType().dyn_cast<RankedTensorType>();
   // Not a ranked tensor output
   if (!input_lhs_type || !input_rhs_type || !output_type) return failure();
@@ -488,10 +502,12 @@
 
   Value output;
   if (output_is_qtype && input_lhs_is_qtype && input_rhs_is_qtype) {
-    auto input_lhs_qtype = input_lhs_type.getElementType()
-                               .dyn_cast<mlir::quant::UniformQuantizedType>();
-    auto input_rhs_qtype = input_rhs_type.getElementType()
-                               .dyn_cast<mlir::quant::UniformQuantizedType>();
+    UniformQuantizedType input_lhs_qtype =
+        input_lhs_type.getElementType()
+            .dyn_cast<mlir::quant::UniformQuantizedType>();
+    UniformQuantizedType input_rhs_qtype =
+        input_rhs_type.getElementType()
+            .dyn_cast<mlir::quant::UniformQuantizedType>();
 
     if (input_lhs_qtype.getScale() != input_rhs_qtype.getScale() ||
         input_lhs_qtype.getZeroPoint() != input_rhs_qtype.getZeroPoint()) {
@@ -500,10 +516,10 @@
           "must be the same");
     }
 
-    auto op1_rescale_lhs =
+    Value op1_rescale_lhs =
         buildRescaleToInt32(rewriter, op, tfl_greater_equal_op.lhs(), 1.0f,
                             input_lhs_qtype.getZeroPoint());
-    auto op2_rescale_rhs =
+    Value op2_rescale_rhs =
         buildRescaleToInt32(rewriter, op, tfl_greater_equal_op.rhs(), 1.0f,
                             input_rhs_qtype.getZeroPoint());
     auto op3_greater_equal_op1_op2 = rewriter.create<tosa::GreaterEqualOp>(
@@ -527,9 +543,11 @@
     Operation* op, PatternRewriter& rewriter) const {
   auto tfl_add_op = cast<TFL::AddOp>(op);
 
-  auto input_lhs_type = tfl_add_op.lhs().getType().dyn_cast<RankedTensorType>();
-  auto input_rhs_type = tfl_add_op.rhs().getType().dyn_cast<RankedTensorType>();
-  auto output_type =
+  RankedTensorType input_lhs_type =
+      tfl_add_op.lhs().getType().dyn_cast<RankedTensorType>();
+  RankedTensorType input_rhs_type =
+      tfl_add_op.rhs().getType().dyn_cast<RankedTensorType>();
+  RankedTensorType output_type =
       tfl_add_op.getResult().getType().dyn_cast<RankedTensorType>();
   // Not a ranked tensor output
   if (!input_lhs_type || !input_rhs_type || !output_type) return failure();
@@ -550,14 +568,17 @@
 
   Value output;
   if (output_is_qtype && input_lhs_is_qtype && input_rhs_is_qtype) {
-    auto rescale_type =
+    RankedTensorType rescale_type =
         RankedTensorType::get(output_type.getShape(), rewriter.getI32Type());
-    auto input_lhs_qtype = input_lhs_type.getElementType()
-                               .dyn_cast<mlir::quant::UniformQuantizedType>();
-    auto input_rhs_qtype = input_rhs_type.getElementType()
-                               .dyn_cast<mlir::quant::UniformQuantizedType>();
-    auto output_qtype = output_type.getElementType()
-                            .dyn_cast<mlir::quant::UniformQuantizedType>();
+    UniformQuantizedType input_lhs_qtype =
+        input_lhs_type.getElementType()
+            .dyn_cast<mlir::quant::UniformQuantizedType>();
+    UniformQuantizedType input_rhs_qtype =
+        input_rhs_type.getElementType()
+            .dyn_cast<mlir::quant::UniformQuantizedType>();
+    UniformQuantizedType output_qtype =
+        output_type.getElementType()
+            .dyn_cast<mlir::quant::UniformQuantizedType>();
 
     // Following quantization described in tensorflow/lite/kernels/add.cc
     // In details it does:
@@ -581,15 +602,15 @@
     double output_rescale_scale =
         max_scale_2x / (output_scale * static_cast<double>(1 << input_shift));
 
-    auto op1_rescale_lhs =
+    Value op1_rescale_lhs =
         buildRescaleToInt32(rewriter, op, tfl_add_op.lhs(), lhs_rescale_scale,
                             input_lhs_qtype.getZeroPoint());
-    auto op2_rescale_rhs =
+    Value op2_rescale_rhs =
         buildRescaleToInt32(rewriter, op, tfl_add_op.rhs(), rhs_rescale_scale,
                             input_rhs_qtype.getZeroPoint());
     auto op3_add_op1_op2 = rewriter.create<tosa::AddOp>(
         op->getLoc(), rescale_type, op1_rescale_lhs, op2_rescale_rhs);
-    auto op4_rescale_op3 = buildRescaleFromInt32(
+    Value op4_rescale_op3 = buildRescaleFromInt32(
         rewriter, op, output_type, op3_add_op1_op2.getResult(),
         output_rescale_scale, output_qtype.getZeroPoint());
     output = op4_rescale_op3;
@@ -603,7 +624,7 @@
   auto fused_activation_fn = tfl_add_op.fused_activation_functionAttr();
 
   if (fused_activation_fn) {
-    auto fused_activation_val =
+    llvm::Optional<Value> fused_activation_val =
         convertFusedActivation(rewriter, op, output, fused_activation_fn);
 
     if (!fused_activation_val) return failure();
@@ -620,9 +641,11 @@
     Operation* op, PatternRewriter& rewriter) const {
   auto tfl_sub_op = cast<TFL::SubOp>(op);
 
-  auto input_lhs_type = tfl_sub_op.lhs().getType().dyn_cast<RankedTensorType>();
-  auto input_rhs_type = tfl_sub_op.rhs().getType().dyn_cast<RankedTensorType>();
-  auto output_type =
+  RankedTensorType input_lhs_type =
+      tfl_sub_op.lhs().getType().dyn_cast<RankedTensorType>();
+  RankedTensorType input_rhs_type =
+      tfl_sub_op.rhs().getType().dyn_cast<RankedTensorType>();
+  RankedTensorType output_type =
       tfl_sub_op.getResult().getType().dyn_cast<RankedTensorType>();
   // Not a ranked tensor output
   if (!input_lhs_type || !input_rhs_type || !output_type) return failure();
@@ -643,13 +666,15 @@
 
   Value output;
   if (output_is_qtype && input_lhs_is_qtype && input_rhs_is_qtype) {
-    auto rescale_type =
+    RankedTensorType rescale_type =
         RankedTensorType::get(output_type.getShape(), rewriter.getI32Type());
-    auto input_lhs_qtype = input_lhs_type.getElementType()
-                               .cast<mlir::quant::UniformQuantizedType>();
-    auto input_rhs_qtype = input_rhs_type.getElementType()
-                               .cast<mlir::quant::UniformQuantizedType>();
-    auto output_qtype =
+    UniformQuantizedType input_lhs_qtype =
+        input_lhs_type.getElementType()
+            .cast<mlir::quant::UniformQuantizedType>();
+    UniformQuantizedType input_rhs_qtype =
+        input_rhs_type.getElementType()
+            .cast<mlir::quant::UniformQuantizedType>();
+    UniformQuantizedType output_qtype =
         output_type.getElementType().cast<mlir::quant::UniformQuantizedType>();
 
     // Following quantization described in tensorflow/lite/kernels/add.cc
@@ -674,15 +699,15 @@
     double output_rescale_scale =
         max_scale_2x / (output_scale * static_cast<double>(1 << input_shift));
 
-    auto op1_rescale_lhs =
+    Value op1_rescale_lhs =
         buildRescaleToInt32(rewriter, op, tfl_sub_op.lhs(), lhs_rescale_scale,
                             input_lhs_qtype.getZeroPoint());
-    auto op2_rescale_rhs =
+    Value op2_rescale_rhs =
         buildRescaleToInt32(rewriter, op, tfl_sub_op.rhs(), rhs_rescale_scale,
                             input_rhs_qtype.getZeroPoint());
     auto op3_sub_op1_op2 = rewriter.create<tosa::SubOp>(
         op->getLoc(), rescale_type, op1_rescale_lhs, op2_rescale_rhs);
-    auto op4_rescale_op3 = buildRescaleFromInt32(
+    Value op4_rescale_op3 = buildRescaleFromInt32(
         rewriter, op, output_type, op3_sub_op1_op2.getResult(),
         output_rescale_scale, output_qtype.getZeroPoint());
     output = op4_rescale_op3;
@@ -696,7 +721,7 @@
   auto fused_activation_fn = tfl_sub_op.fused_activation_functionAttr();
 
   if (fused_activation_fn) {
-    auto fused_activation_val =
+    llvm::Optional<Value> fused_activation_val =
         convertFusedActivation(rewriter, op, output, fused_activation_fn);
 
     if (!fused_activation_val) return failure();
@@ -713,15 +738,15 @@
     Operation* op, PatternRewriter& rewriter) const {
   auto tfl_mul_op = cast<TFL::MulOp>(op);
 
-  auto result = convertMultiplyOp(rewriter, op, tfl_mul_op.getResult(),
-                                  tfl_mul_op.lhs(), tfl_mul_op.rhs());
+  llvm::Optional<Value> result = convertMultiplyOp(
+      rewriter, op, tfl_mul_op.getResult(), tfl_mul_op.lhs(), tfl_mul_op.rhs());
 
   if (!result) return failure();
 
   auto fused_activation_fn = tfl_mul_op.fused_activation_functionAttr();
 
   if (fused_activation_fn) {
-    auto fused_activation_val = convertFusedActivation(
+    llvm::Optional<Value> fused_activation_val = convertFusedActivation(
         rewriter, op, result.getValue(), fused_activation_fn);
 
     if (!fused_activation_val) return failure();
@@ -738,8 +763,9 @@
     Operation* op, PatternRewriter& rewriter) const {
   auto tfl_square_op = cast<TFL::SquareOp>(op);
 
-  auto result = convertMultiplyOp(rewriter, op, tfl_square_op.getResult(),
-                                  tfl_square_op.x(), tfl_square_op.x());
+  llvm::Optional<Value> result =
+      convertMultiplyOp(rewriter, op, tfl_square_op.getResult(),
+                        tfl_square_op.x(), tfl_square_op.x());
 
   if (!result) return failure();
 
@@ -751,7 +777,7 @@
     Operation* op, PatternRewriter& rewriter) const {
   auto tfl_squared_op = cast<TFL::SquaredDifferenceOp>(op);
 
-  auto result =
+  llvm::Optional<Value> result =
       convertSquaredDifferenceOp(rewriter, op, tfl_squared_op.getResult(),
                                  tfl_squared_op.lhs(), tfl_squared_op.rhs());
 
@@ -765,14 +791,15 @@
     Operation* op, PatternRewriter& rewriter) const {
   auto tfl_round_op = cast<TFL::RoundOp>(op);
 
-  auto input_type = tfl_round_op.x().getType().dyn_cast<RankedTensorType>();
+  RankedTensorType input_type =
+      tfl_round_op.x().getType().dyn_cast<RankedTensorType>();
   if (!input_type) {
     return op->emitOpError("Round: input not ranked tensor type");
   }
 
   if (input_type.getElementType().isa<FloatType>()) {
-    auto result = convertRoundOp(rewriter, op, tfl_round_op.getResult(),
-                                 tfl_round_op.x());
+    llvm::Optional<Value> result = convertRoundOp(
+        rewriter, op, tfl_round_op.getResult(), tfl_round_op.x());
 
     if (!result) return failure();
 
@@ -791,7 +818,7 @@
     Operation* op, PatternRewriter& rewriter) const {
   auto tfl_div_op = cast<TFL::DivOp>(op);
 
-  auto output_type =
+  RankedTensorType output_type =
       tfl_div_op.getResult().getType().dyn_cast<RankedTensorType>();
   // Not a ranked tensor output
   if (!output_type) return failure();
@@ -805,7 +832,7 @@
                                    reciprocal_op.getResult(), 0);
 
   if (fused_activation_fn) {
-    auto fused_activation_val = convertFusedActivation(
+    llvm::Optional<Value> fused_activation_val = convertFusedActivation(
         rewriter, op, mul_op.getResult(), fused_activation_fn);
 
     if (!fused_activation_val) return failure();
@@ -823,9 +850,11 @@
     Operation* op, PatternRewriter& rewriter) const {
   auto tfl_max_op = cast<TFL::MaximumOp>(op);
 
-  auto input_lhs_type = tfl_max_op.lhs().getType().dyn_cast<RankedTensorType>();
-  auto input_rhs_type = tfl_max_op.rhs().getType().dyn_cast<RankedTensorType>();
-  auto output_type =
+  RankedTensorType input_lhs_type =
+      tfl_max_op.lhs().getType().dyn_cast<RankedTensorType>();
+  RankedTensorType input_rhs_type =
+      tfl_max_op.rhs().getType().dyn_cast<RankedTensorType>();
+  RankedTensorType output_type =
       tfl_max_op.getResult().getType().dyn_cast<RankedTensorType>();
 
   // Not a ranked tensor output
@@ -847,16 +876,16 @@
 
   Value output;
   if (output_is_qtype) {
-    auto rescale_type =
+    RankedTensorType rescale_type =
         RankedTensorType::get(output_type.getShape(), rewriter.getI32Type());
 
-    auto op1_rescale_lhs =
+    Value op1_rescale_lhs =
         buildRescaleToInt32(rewriter, op, tfl_max_op.lhs(), 1.0f, 0);
-    auto op2_rescale_rhs =
+    Value op2_rescale_rhs =
         buildRescaleToInt32(rewriter, op, tfl_max_op.rhs(), 1.0f, 0);
     auto op3_max_op1_op2 = rewriter.create<tosa::MaximumOp>(
         op->getLoc(), rescale_type, op1_rescale_lhs, op2_rescale_rhs);
-    auto op4_rescale_op3 = buildRescaleFromInt32(
+    Value op4_rescale_op3 = buildRescaleFromInt32(
         rewriter, op, output_type, op3_max_op1_op2.getResult(), 1.0f, 0);
 
     output = op4_rescale_op3;
@@ -876,9 +905,11 @@
     Operation* op, PatternRewriter& rewriter) const {
   auto tfl_min_op = cast<TFL::MinimumOp>(op);
 
-  auto input_lhs_type = tfl_min_op.lhs().getType().dyn_cast<RankedTensorType>();
-  auto input_rhs_type = tfl_min_op.rhs().getType().dyn_cast<RankedTensorType>();
-  auto output_type =
+  RankedTensorType input_lhs_type =
+      tfl_min_op.lhs().getType().dyn_cast<RankedTensorType>();
+  RankedTensorType input_rhs_type =
+      tfl_min_op.rhs().getType().dyn_cast<RankedTensorType>();
+  RankedTensorType output_type =
       tfl_min_op.getResult().getType().dyn_cast<RankedTensorType>();
   // Not a ranked tensor output
   if (!input_lhs_type || !input_rhs_type || !output_type) return failure();
@@ -899,16 +930,16 @@
 
   Value output;
   if (output_is_qtype) {
-    auto rescale_type =
+    RankedTensorType rescale_type =
         RankedTensorType::get(output_type.getShape(), rewriter.getI32Type());
 
-    auto op1_rescale_lhs =
+    Value op1_rescale_lhs =
         buildRescaleToInt32(rewriter, op, tfl_min_op.lhs(), 1.0f, 0);
-    auto op2_rescale_rhs =
+    Value op2_rescale_rhs =
         buildRescaleToInt32(rewriter, op, tfl_min_op.rhs(), 1.0f, 0);
     auto op3_min_op1_op2 = rewriter.create<tosa::MinimumOp>(
         op->getLoc(), rescale_type, op1_rescale_lhs, op2_rescale_rhs);
-    auto op4_rescale_op3 = buildRescaleFromInt32(
+    Value op4_rescale_op3 = buildRescaleFromInt32(
         rewriter, op, output_type, op3_min_op1_op2.getResult(), 1.0f, 0);
 
     output = op4_rescale_op3;
@@ -928,8 +959,9 @@
     Operation* op, PatternRewriter& rewriter) const {
   auto tfl_floordiv_op = cast<TFL::FloorDivOp>(op);
 
-  auto result = convertFloorDivOp(rewriter, op, tfl_floordiv_op.getResult(),
-                                  tfl_floordiv_op.lhs(), tfl_floordiv_op.rhs());
+  llvm::Optional<Value> result =
+      convertFloorDivOp(rewriter, op, tfl_floordiv_op.getResult(),
+                        tfl_floordiv_op.lhs(), tfl_floordiv_op.rhs());
 
   if (!result) return failure();
 
@@ -942,8 +974,9 @@
     Operation* op, PatternRewriter& rewriter) const {
   auto tfl_floormod_op = cast<TFL::FloorModOp>(op);
 
-  auto result = convertFloorModOp(rewriter, op, tfl_floormod_op.getResult(),
-                                  tfl_floormod_op.lhs(), tfl_floormod_op.rhs());
+  llvm::Optional<Value> result =
+      convertFloorModOp(rewriter, op, tfl_floormod_op.getResult(),
+                        tfl_floormod_op.lhs(), tfl_floormod_op.rhs());
 
   if (!result) return failure();
 
@@ -956,7 +989,7 @@
     Operation* op, PatternRewriter& rewriter) const {
   auto tfl_addn_op = cast<TFL::AddNOp>(op);
 
-  auto output_type =
+  RankedTensorType output_type =
       tfl_addn_op.getResult().getType().dyn_cast<RankedTensorType>();
   // Not a ranked tensor output
   if (!output_type) return failure();
@@ -981,9 +1014,9 @@
     Operation* op, PatternRewriter& rewriter) const {
   auto tfl_avgpool_op = cast<TFL::AveragePool2DOp>(op);
 
-  auto input_type =
+  RankedTensorType input_type =
       tfl_avgpool_op.input().getType().dyn_cast<RankedTensorType>();
-  auto output_type =
+  RankedTensorType output_type =
       tfl_avgpool_op.getResult().getType().dyn_cast<RankedTensorType>();
   // Not a ranked tensor output
   if (!output_type) return failure();
@@ -1014,7 +1047,7 @@
     // Pooling has no non-unit dilation
     ArrayAttr dilation = rewriter.getI64ArrayAttr({1, 1});
 
-    auto filter_type = RankedTensorType::get(
+    RankedTensorType filter_type = RankedTensorType::get(
         llvm::makeArrayRef<int64_t>(i64array), rewriter.getIntegerType(64));
 
     // TFLite doesn't support explicit padding
@@ -1035,9 +1068,9 @@
     Operation* op, PatternRewriter& rewriter) const {
   auto tfl_maxpool_op = cast<TFL::MaxPool2DOp>(op);
 
-  auto input_type =
+  RankedTensorType input_type =
       tfl_maxpool_op.input().getType().dyn_cast<RankedTensorType>();
-  auto output_type =
+  RankedTensorType output_type =
       tfl_maxpool_op.getResult().getType().dyn_cast<RankedTensorType>();
   // Not a ranked tensor output
   if (!output_type) return failure();
@@ -1068,7 +1101,7 @@
     // Pooling has no non-unit dilation
     ArrayAttr dilation = rewriter.getI64ArrayAttr({1, 1});
 
-    auto filter_type = RankedTensorType::get(
+    RankedTensorType filter_type = RankedTensorType::get(
         llvm::makeArrayRef<int64_t>(i64array), rewriter.getIntegerType(64));
 
     // TFLite doesn't support explicit padding
@@ -1089,11 +1122,11 @@
     Operation* op, PatternRewriter& rewriter) const {
   auto tfl_conv2d_op = cast<TFL::Conv2DOp>(op);
 
-  auto input_type =
+  RankedTensorType input_type =
       tfl_conv2d_op.input().getType().dyn_cast<RankedTensorType>();
-  auto filter_type =
+  RankedTensorType filter_type =
       tfl_conv2d_op.filter().getType().dyn_cast<RankedTensorType>();
-  auto output_type =
+  RankedTensorType output_type =
       tfl_conv2d_op.getResult().getType().dyn_cast<RankedTensorType>();
   // Not a ranked tensor output
   if (!input_type) return failure();
@@ -1160,7 +1193,7 @@
   auto fused_activation_fn = tfl_conv2d_op.fused_activation_functionAttr();
 
   if (fused_activation_fn) {
-    auto fused_activation_val = convertFusedActivation(
+    llvm::Optional<Value> fused_activation_val = convertFusedActivation(
         rewriter, op, conv2d_output, fused_activation_fn);
 
     if (!fused_activation_val) return failure();
@@ -1178,10 +1211,11 @@
     Operation* op, PatternRewriter& rewriter) const {
   auto tfl_conv_op = cast<TFL::TransposeConvOp>(op);
 
-  auto input_type = tfl_conv_op.input().getType().dyn_cast<RankedTensorType>();
-  auto filter_type =
+  RankedTensorType input_type =
+      tfl_conv_op.input().getType().dyn_cast<RankedTensorType>();
+  RankedTensorType filter_type =
       tfl_conv_op.weights().getType().dyn_cast<RankedTensorType>();
-  auto output_type =
+  RankedTensorType output_type =
       tfl_conv_op.getResult().getType().dyn_cast<RankedTensorType>();
   // Not a ranked tensor output
   if (!input_type) return failure();
@@ -1289,11 +1323,11 @@
     Operation* op, PatternRewriter& rewriter) const {
   auto tfl_conv2d_op = cast<TFL::DepthwiseConv2DOp>(op);
 
-  auto input_type =
+  RankedTensorType input_type =
       tfl_conv2d_op.input().getType().dyn_cast<RankedTensorType>();
-  auto filter_type =
+  RankedTensorType filter_type =
       tfl_conv2d_op.filter().getType().dyn_cast<RankedTensorType>();
-  auto output_type =
+  RankedTensorType output_type =
       tfl_conv2d_op.getResult().getType().dyn_cast<RankedTensorType>();
   // Not a ranked tensor output
   if (!input_type) return failure();
@@ -1367,7 +1401,7 @@
   a2_reshape_dims.push_back(a1_transpose_dims[2] / depth_multiplier.getInt());
   a2_reshape_dims.push_back(depth_multiplier.getInt());
 
-  auto a1_filter_transpose_perms =
+  Value a1_filter_transpose_perms =
       get1DConstTensor<tosa::ConstOp, int32_t>(rewriter, op, {1, 2, 3, 0});
   auto a1_filter_transpose_op = rewriter.create<tosa::TransposeOp>(
       op->getLoc(),
@@ -1402,7 +1436,7 @@
   auto fused_activation_fn = tfl_conv2d_op.fused_activation_functionAttr();
 
   if (fused_activation_fn) {
-    auto fused_activation_val = convertFusedActivation(
+    llvm::Optional<Value> fused_activation_val = convertFusedActivation(
         rewriter, op, conv2d_output, fused_activation_fn);
 
     if (!fused_activation_val) return failure();
@@ -1420,14 +1454,17 @@
     Operation* op, PatternRewriter& rewriter) const {
   auto tfl_fc_op = cast<TFL::FullyConnectedOp>(op);
 
-  auto output_type =
+  RankedTensorType output_type =
       tfl_fc_op.getResult(0).getType().dyn_cast<RankedTensorType>();
   // Not a ranked tensor output
   if (!output_type) return failure();
 
-  auto input_type = tfl_fc_op.input().getType().dyn_cast<RankedTensorType>();
-  auto filter_type = tfl_fc_op.filter().getType().dyn_cast<RankedTensorType>();
-  auto bias_type = tfl_fc_op.bias().getType().dyn_cast<RankedTensorType>();
+  RankedTensorType input_type =
+      tfl_fc_op.input().getType().dyn_cast<RankedTensorType>();
+  RankedTensorType filter_type =
+      tfl_fc_op.filter().getType().dyn_cast<RankedTensorType>();
+  RankedTensorType bias_type =
+      tfl_fc_op.bias().getType().dyn_cast<RankedTensorType>();
   if (!input_type || !filter_type) return failure();
 
   bool input_is_qtype =
@@ -1459,8 +1496,8 @@
     int64_t num_batch = input_type.getNumElements() / num_elems;
     SmallVector<int64_t, 2> shape_vals({num_batch, num_elems});
 
-    auto reshape_type = RankedTensorType::get(ArrayRef<int64_t>(shape_vals),
-                                              input_type.getElementType());
+    RankedTensorType reshape_type = RankedTensorType::get(
+        ArrayRef<int64_t>(shape_vals), input_type.getElementType());
     auto reshape_op = rewriter.create<tosa::ReshapeOp>(
         op->getLoc(), reshape_type, tfl_fc_op.input(),
         rewriter.getI64ArrayAttr(shape_vals));
@@ -1474,8 +1511,8 @@
     // value. TOSA requires bias to be an array of output_channel_count values,
     // so create a constant of the appropriate number and type of zeros.
     SmallVector<int64_t, 1> bias_shape({filter_type.getShape()[0]});
-    auto bias_type = RankedTensorType::get(ArrayRef<int64_t>(bias_shape),
-                                           input_type.getElementType());
+    RankedTensorType bias_type = RankedTensorType::get(
+        ArrayRef<int64_t>(bias_shape), input_type.getElementType());
 
     DenseElementsAttr bias_attr;
     if (input_type.getElementType().isa<FloatType>()) {
@@ -1518,7 +1555,7 @@
   auto fused_activation_fn = tfl_fc_op.fused_activation_functionAttr();
 
   if (fused_activation_fn) {
-    auto fused_activation_val =
+    llvm::Optional<Value> fused_activation_val =
         convertFusedActivation(rewriter, op, fc_output, fused_activation_fn);
 
     if (!fused_activation_val) return failure();
@@ -1548,7 +1585,7 @@
   }
   int32_t axis = axis_attr.getInt();
 
-  auto result =
+  llvm::Optional<Value> result =
       convertConcatV2Op(rewriter, op, tfl_concat_op.getResult(), values, axis);
 
   if (!result) return failure();
@@ -1561,7 +1598,7 @@
     Operation* op, PatternRewriter& rewriter) const {
   auto tfl_reshape_op = cast<TFL::ReshapeOp>(op);
 
-  auto output_type =
+  RankedTensorType output_type =
       tfl_reshape_op.getResult().getType().dyn_cast<RankedTensorType>();
   // Not a ranked tensor output
   if (!output_type) return failure();
@@ -1581,12 +1618,14 @@
     Operation* op, PatternRewriter& rewriter) const {
   auto tfl_rank_op = cast<TFL::RankOp>(op);
 
-  auto input_type = tfl_rank_op.input().getType().dyn_cast<RankedTensorType>();
+  RankedTensorType input_type =
+      tfl_rank_op.input().getType().dyn_cast<RankedTensorType>();
   if (!input_type) return failure();
 
   int32_t rank = input_type.getRank();
 
-  auto rank_type = RankedTensorType::get({1}, rewriter.getIntegerType(32));
+  RankedTensorType rank_type =
+      RankedTensorType::get({1}, rewriter.getIntegerType(32));
   auto rank_attr = DenseElementsAttr::get(rank_type, {rank});
   auto rank_const =
       rewriter.create<tosa::ConstOp>(op->getLoc(), rank_type, rank_attr);
@@ -1600,12 +1639,13 @@
     Operation* op, PatternRewriter& rewriter) const {
   auto tfl_shape_op = cast<TFL::ShapeOp>(op);
 
-  auto output_type =
+  RankedTensorType output_type =
       tfl_shape_op.getResult().getType().dyn_cast<RankedTensorType>();
   // Not a ranked tensor output
   if (!output_type) return failure();
 
-  auto input_type = tfl_shape_op.input().getType().dyn_cast<RankedTensorType>();
+  RankedTensorType input_type =
+      tfl_shape_op.input().getType().dyn_cast<RankedTensorType>();
   if (!input_type) return failure();
 
   auto input_shape = input_type.getShape();
@@ -1615,7 +1655,7 @@
     shape_arr.emplace_back(input_shape[i]);
   }
 
-  auto shape_type = RankedTensorType::get(
+  RankedTensorType shape_type = RankedTensorType::get(
       {static_cast<int32_t>(shape_arr.size())}, rewriter.getIntegerType(32));
   auto shape_attr = DenseElementsAttr::get(
       shape_type, llvm::makeArrayRef<int32_t>(shape_arr));
@@ -1631,7 +1671,7 @@
     Operation* op, PatternRewriter& rewriter) const {
   auto tfl_expanddims_op = cast<TFL::ExpandDimsOp>(op);
 
-  auto result =
+  llvm::Optional<Value> result =
       convertExpandDimsOp(rewriter, op, tfl_expanddims_op.getResult(),
                           tfl_expanddims_op.input(), tfl_expanddims_op.dim());
 
@@ -1653,8 +1693,9 @@
     squeeze_dims.emplace_back(squeeze_dim.dyn_cast<IntegerAttr>().getInt());
   }
 
-  auto result = convertSqueezeOp(rewriter, op, tfl_squeeze_op.getResult(),
-                                 tfl_squeeze_op.input(), squeeze_dims);
+  llvm::Optional<Value> result =
+      convertSqueezeOp(rewriter, op, tfl_squeeze_op.getResult(),
+                       tfl_squeeze_op.input(), squeeze_dims);
 
   if (!result) return failure();
 
@@ -1667,7 +1708,7 @@
     Operation* op, PatternRewriter& rewriter) const {
   auto tfl_fill_op = cast<TFL::FillOp>(op);
 
-  auto output_type =
+  RankedTensorType output_type =
       tfl_fill_op.getResult().getType().dyn_cast<RankedTensorType>();
   // Not a ranked tensor output
   if (!output_type) return failure();
@@ -1686,8 +1727,8 @@
   if (!matchPattern(tfl_fill_op.input(), m_Constant(&value_elem)))
     return failure();
 
-  auto fill_type = RankedTensorType::get(ArrayRef<int64_t>(dims_vals),
-                                         value_elem.getType().getElementType());
+  RankedTensorType fill_type = RankedTensorType::get(
+      ArrayRef<int64_t>(dims_vals), value_elem.getType().getElementType());
   DenseElementsAttr fill_attr;
 
   // Convert to a compatible zero type.
@@ -1715,7 +1756,7 @@
     Operation* op, PatternRewriter& rewriter) const {
   auto tfl_any_op = cast<TFL::ReduceAnyOp>(op);
 
-  auto output_type =
+  RankedTensorType output_type =
       tfl_any_op.getResult().getType().dyn_cast<RankedTensorType>();
   if (!output_type) return failure();
 
@@ -1727,8 +1768,8 @@
   auto keep_dims_attr = tfl_any_op.keep_dimsAttr();
   if (keep_dims_attr) keep_dims = keep_dims_attr.getValue();
 
-  auto result = convertReduceAnyOp(rewriter, op, output_type,
-                                   tfl_any_op.input(), axes_elems, keep_dims);
+  llvm::Optional<Value> result = convertReduceAnyOp(
+      rewriter, op, output_type, tfl_any_op.input(), axes_elems, keep_dims);
 
   if (!result) return failure();
 
@@ -1741,7 +1782,7 @@
     Operation* op, PatternRewriter& rewriter) const {
   auto tfl_max_op = cast<TFL::ReduceMaxOp>(op);
 
-  auto output_type =
+  RankedTensorType output_type =
       tfl_max_op.getResult().getType().dyn_cast<RankedTensorType>();
   if (!output_type) return failure();
 
@@ -1753,8 +1794,8 @@
   auto keep_dims_attr = tfl_max_op.keep_dimsAttr();
   if (keep_dims_attr) keep_dims = keep_dims_attr.getValue();
 
-  auto result = convertReduceMaxOp(rewriter, op, output_type,
-                                   tfl_max_op.input(), axes_elems, keep_dims);
+  llvm::Optional<Value> result = convertReduceMaxOp(
+      rewriter, op, output_type, tfl_max_op.input(), axes_elems, keep_dims);
 
   if (!result) return failure();
 
@@ -1767,7 +1808,7 @@
     Operation* op, PatternRewriter& rewriter) const {
   auto tfl_min_op = cast<TFL::ReduceMinOp>(op);
 
-  auto output_type =
+  RankedTensorType output_type =
       tfl_min_op.getResult().getType().dyn_cast<RankedTensorType>();
   if (!output_type) return failure();
 
@@ -1779,8 +1820,8 @@
   auto keep_dims_attr = tfl_min_op.keep_dimsAttr();
   if (keep_dims_attr) keep_dims = keep_dims_attr.getValue();
 
-  auto result = convertReduceMinOp(rewriter, op, output_type,
-                                   tfl_min_op.input(), axes_elems, keep_dims);
+  llvm::Optional<Value> result = convertReduceMinOp(
+      rewriter, op, output_type, tfl_min_op.input(), axes_elems, keep_dims);
 
   if (!result) return failure();
 
@@ -1793,7 +1834,7 @@
     Operation* op, PatternRewriter& rewriter) const {
   auto tfl_prod_op = cast<TFL::ReduceProdOp>(op);
 
-  auto output_type =
+  RankedTensorType output_type =
       tfl_prod_op.getResult().getType().dyn_cast<RankedTensorType>();
   if (!output_type) return failure();
 
@@ -1805,8 +1846,8 @@
   auto keep_dims_attr = tfl_prod_op.keep_dimsAttr();
   if (keep_dims_attr) keep_dims = keep_dims_attr.getValue();
 
-  auto result = convertReduceProdOp(rewriter, op, output_type,
-                                    tfl_prod_op.input(), axes_elems, keep_dims);
+  llvm::Optional<Value> result = convertReduceProdOp(
+      rewriter, op, output_type, tfl_prod_op.input(), axes_elems, keep_dims);
 
   if (!result) return failure();
 
@@ -1819,7 +1860,7 @@
     Operation* op, PatternRewriter& rewriter) const {
   auto tfl_mean_op = cast<TFL::MeanOp>(op);
 
-  auto output_type =
+  RankedTensorType output_type =
       tfl_mean_op.getResult().getType().dyn_cast<RankedTensorType>();
   if (!output_type) return failure();
 
@@ -1831,8 +1872,8 @@
   auto keep_dims_attr = tfl_mean_op.keep_dimsAttr();
   if (keep_dims_attr) keep_dims = keep_dims_attr.getValue();
 
-  auto result = convertReduceMeanOp(rewriter, op, output_type,
-                                    tfl_mean_op.input(), axes_elems, keep_dims);
+  llvm::Optional<Value> result = convertReduceMeanOp(
+      rewriter, op, output_type, tfl_mean_op.input(), axes_elems, keep_dims);
 
   if (!result) return failure();
 
@@ -1845,7 +1886,7 @@
     Operation* op, PatternRewriter& rewriter) const {
   auto tfl_sum_op = cast<TFL::SumOp>(op);
 
-  auto output_type =
+  RankedTensorType output_type =
       tfl_sum_op.getResult().getType().dyn_cast<RankedTensorType>();
   if (!output_type) return failure();
 
@@ -1857,8 +1898,8 @@
   auto keep_dims_attr = tfl_sum_op.keep_dimsAttr();
   if (keep_dims_attr) keep_dims = keep_dims_attr.getValue();
 
-  auto result = convertReduceSumOp(rewriter, op, output_type,
-                                   tfl_sum_op.input(), axes_elems, keep_dims);
+  llvm::Optional<Value> result = convertReduceSumOp(
+      rewriter, op, output_type, tfl_sum_op.input(), axes_elems, keep_dims);
 
   if (!result) return failure();
 
@@ -1871,7 +1912,7 @@
     Operation* op, PatternRewriter& rewriter) const {
   auto tfl_elu_op = cast<TFL::EluOp>(op);
 
-  auto result =
+  llvm::Optional<Value> result =
       convertEluOp(rewriter, op, tfl_elu_op.getResult(), tfl_elu_op.x());
 
   if (!result) return failure();
@@ -1885,8 +1926,8 @@
     Operation* op, PatternRewriter& rewriter) const {
   auto tfl_softmax_op = cast<TFL::SoftmaxOp>(op);
 
-  auto result = convertSoftmaxOp(rewriter, op, tfl_softmax_op.getResult(),
-                                 tfl_softmax_op.input());
+  llvm::Optional<Value> result = convertSoftmaxOp(
+      rewriter, op, tfl_softmax_op.getResult(), tfl_softmax_op.input());
 
   if (!result) return failure();
 
@@ -1899,8 +1940,8 @@
     Operation* op, PatternRewriter& rewriter) const {
   auto tfl_logsoftmax_op = cast<TFL::LogSoftmaxOp>(op);
 
-  auto result = convertLogSoftmaxOp(rewriter, op, tfl_logsoftmax_op.getResult(),
-                                    tfl_logsoftmax_op.input());
+  llvm::Optional<Value> result = convertLogSoftmaxOp(
+      rewriter, op, tfl_logsoftmax_op.getResult(), tfl_logsoftmax_op.input());
 
   if (!result) return failure();
 
@@ -1913,7 +1954,7 @@
     Operation* op, PatternRewriter& rewriter) const {
   auto tfl_slice_op = cast<TFL::SliceOp>(op);
 
-  auto output_type =
+  RankedTensorType output_type =
       tfl_slice_op.getResult().getType().dyn_cast<RankedTensorType>();
   // Not a ranked tensor output
   if (!output_type) return failure();
@@ -1945,7 +1986,7 @@
     Operation* op, PatternRewriter& rewriter) const {
   auto tfl_tile_op = cast<TFL::TileOp>(op);
 
-  auto output_type =
+  RankedTensorType output_type =
       tfl_tile_op.getResult().getType().dyn_cast<RankedTensorType>();
   // Not a ranked tensor output
   if (!output_type) return failure();
@@ -1968,7 +2009,7 @@
     Operation* op, PatternRewriter& rewriter) const {
   auto tfl_transpose_op = cast<TFL::TransposeOp>(op);
 
-  auto output_type =
+  RankedTensorType output_type =
       tfl_transpose_op.getResult().getType().dyn_cast<RankedTensorType>();
   // Not a ranked tensor output
   if (!output_type) return failure();
@@ -1994,7 +2035,7 @@
   }
   int32_t axis_i32 = axis_attr.getInt();
 
-  auto result =
+  llvm::Optional<Value> result =
       convertPackOp(rewriter, op, tfl_pack_op.getResult(), inputs, axis_i32);
 
   if (!result) return failure();
@@ -2016,7 +2057,8 @@
   }
   int32_t axis_i32 = axis_attr.getInt();
 
-  auto results = convertUnpackOp(rewriter, op, tfl_unpack_op.input(), axis_i32);
+  llvm::Optional<ValueRange> results =
+      convertUnpackOp(rewriter, op, tfl_unpack_op.input(), axis_i32);
 
   if (!results) return failure();
 
@@ -2049,8 +2091,9 @@
   // an integer attribute in TFLite MLIR.
   int32_t axis = axisAttrElems.getValue<IntegerAttr>({}).getInt();
 
-  auto results = convertSplitOp(rewriter, op, tfl_split_op.getResult(0),
-                                tfl_split_op.value(), num_split, axis);
+  llvm::Optional<ValueRange> results =
+      convertSplitOp(rewriter, op, tfl_split_op.getResult(0),
+                     tfl_split_op.value(), num_split, axis);
 
   if (!results) return failure();
 
@@ -2086,8 +2129,9 @@
   // an integer attribute in TFLite MLIR.
   int32_t axis = axisAttrElems.getValue<IntegerAttr>(0).getInt();
 
-  auto results = convertSplitVOp(rewriter, op, tfl_splitv_op.getResult(0),
-                                 tfl_splitv_op.value(), size_split, axis);
+  llvm::Optional<ValueRange> results =
+      convertSplitVOp(rewriter, op, tfl_splitv_op.getResult(0),
+                      tfl_splitv_op.value(), size_split, axis);
 
   if (!results) return failure();
 
@@ -2100,11 +2144,11 @@
     Operation* op, PatternRewriter& rewriter) const {
   auto tfl_less_op = cast<TFL::LessOp>(op);
 
-  auto input_lhs_type =
+  RankedTensorType input_lhs_type =
       tfl_less_op.lhs().getType().dyn_cast<RankedTensorType>();
-  auto input_rhs_type =
+  RankedTensorType input_rhs_type =
       tfl_less_op.rhs().getType().dyn_cast<RankedTensorType>();
-  auto output_type =
+  RankedTensorType output_type =
       tfl_less_op.getResult().getType().dyn_cast<RankedTensorType>();
   // Not a ranked tensor output
   if (!input_lhs_type || !input_rhs_type || !output_type) return failure();
@@ -2125,10 +2169,12 @@
 
   Value output;
   if (output_is_qtype) {
-    auto input_lhs_qtype = input_lhs_type.getElementType()
-                               .dyn_cast<mlir::quant::UniformQuantizedType>();
-    auto input_rhs_qtype = input_rhs_type.getElementType()
-                               .dyn_cast<mlir::quant::UniformQuantizedType>();
+    UniformQuantizedType input_lhs_qtype =
+        input_lhs_type.getElementType()
+            .dyn_cast<mlir::quant::UniformQuantizedType>();
+    UniformQuantizedType input_rhs_qtype =
+        input_rhs_type.getElementType()
+            .dyn_cast<mlir::quant::UniformQuantizedType>();
 
     if (input_lhs_qtype.getScale() != input_rhs_qtype.getScale() ||
         input_lhs_qtype.getZeroPoint() != input_rhs_qtype.getZeroPoint()) {
@@ -2137,9 +2183,9 @@
           "must be the same");
     }
 
-    auto op1_rescale_lhs = buildRescaleToInt32(
+    Value op1_rescale_lhs = buildRescaleToInt32(
         rewriter, op, tfl_less_op.lhs(), 1.0f, input_lhs_qtype.getZeroPoint());
-    auto op2_rescale_rhs = buildRescaleToInt32(
+    Value op2_rescale_rhs = buildRescaleToInt32(
         rewriter, op, tfl_less_op.rhs(), 1.0f, input_rhs_qtype.getZeroPoint());
     auto op3_greater_equal_op1_op2 = rewriter.create<tosa::GreaterEqualOp>(
         op->getLoc(), output_type, op1_rescale_lhs, op2_rescale_rhs);
@@ -2164,11 +2210,11 @@
     Operation* op, PatternRewriter& rewriter) const {
   auto tfl_less_equal_op = cast<TFL::LessEqualOp>(op);
 
-  auto input_lhs_type =
+  RankedTensorType input_lhs_type =
       tfl_less_equal_op.lhs().getType().dyn_cast<RankedTensorType>();
-  auto input_rhs_type =
+  RankedTensorType input_rhs_type =
       tfl_less_equal_op.rhs().getType().dyn_cast<RankedTensorType>();
-  auto output_type =
+  RankedTensorType output_type =
       tfl_less_equal_op.getResult().getType().dyn_cast<RankedTensorType>();
   // Not a ranked tensor output
   if (!input_lhs_type || !input_rhs_type || !output_type) return failure();
@@ -2189,10 +2235,12 @@
 
   Value output;
   if (output_is_qtype) {
-    auto input_lhs_qtype = input_lhs_type.getElementType()
-                               .dyn_cast<mlir::quant::UniformQuantizedType>();
-    auto input_rhs_qtype = input_rhs_type.getElementType()
-                               .dyn_cast<mlir::quant::UniformQuantizedType>();
+    UniformQuantizedType input_lhs_qtype =
+        input_lhs_type.getElementType()
+            .dyn_cast<mlir::quant::UniformQuantizedType>();
+    UniformQuantizedType input_rhs_qtype =
+        input_rhs_type.getElementType()
+            .dyn_cast<mlir::quant::UniformQuantizedType>();
 
     if (input_lhs_qtype.getScale() != input_rhs_qtype.getScale() ||
         input_lhs_qtype.getZeroPoint() != input_rhs_qtype.getZeroPoint()) {
@@ -2201,10 +2249,10 @@
           "must be the same");
     }
 
-    auto op1_rescale_lhs =
+    Value op1_rescale_lhs =
         buildRescaleToInt32(rewriter, op, tfl_less_equal_op.lhs(), 1.0f,
                             input_lhs_qtype.getZeroPoint());
-    auto op2_rescale_rhs =
+    Value op2_rescale_rhs =
         buildRescaleToInt32(rewriter, op, tfl_less_equal_op.rhs(), 1.0f,
                             input_rhs_qtype.getZeroPoint());
     auto op3_greater_op1_op2 = rewriter.create<tosa::GreaterOp>(
@@ -2231,7 +2279,7 @@
     Operation* op, PatternRewriter& rewriter) const {
   auto tfl_pad_op = cast<TFL::PadOp>(op);
 
-  auto output_type =
+  RankedTensorType output_type =
       tfl_pad_op.getResult().getType().dyn_cast<RankedTensorType>();
   // Not a ranked tensor output
   if (!output_type) return failure();
@@ -2247,13 +2295,13 @@
     Operation* op, PatternRewriter& rewriter) const {
   auto tfl_resize_op = cast<TFL::ResizeBilinearOp>(op);
 
-  auto output_type =
+  RankedTensorType output_type =
       tfl_resize_op.getResult().getType().dyn_cast<RankedTensorType>();
   // Not a ranked tensor output
   if (!output_type) return failure();
 
-  auto result = convertResizeOp(rewriter, op, output_type,
-                                tfl_resize_op.input(), StringRef("BILINEAR"));
+  llvm::Optional<Value> result = convertResizeOp(
+      rewriter, op, output_type, tfl_resize_op.input(), StringRef("BILINEAR"));
 
   if (!result) return failure();
 
@@ -2266,13 +2314,13 @@
     Operation* op, PatternRewriter& rewriter) const {
   auto tfl_resize_op = cast<TFL::ResizeNearestNeighborOp>(op);
 
-  auto output_type =
+  RankedTensorType output_type =
       tfl_resize_op.getResult().getType().dyn_cast<RankedTensorType>();
   // Not a ranked tensor output
   if (!output_type) return failure();
 
-  auto result = convertResizeOp(rewriter, op, output_type,
-                                tfl_resize_op.input(), StringRef("NEAREST"));
+  llvm::Optional<Value> result = convertResizeOp(
+      rewriter, op, output_type, tfl_resize_op.input(), StringRef("NEAREST"));
 
   if (!result) return failure();
 
@@ -2285,7 +2333,7 @@
     Operation* op, PatternRewriter& rewriter) const {
   auto tfl_sel_op = cast<TFL::SelectOp>(op);
 
-  auto result =
+  llvm::Optional<Value> result =
       convertSelectOp(rewriter, op, tfl_sel_op.getResult(),
                       tfl_sel_op.condition(), tfl_sel_op.x(), tfl_sel_op.y());
   if (!result) return failure();
@@ -2299,7 +2347,7 @@
     Operation* op, PatternRewriter& rewriter) const {
   auto tfl_sel_op = cast<TFL::SelectV2Op>(op);
 
-  auto result =
+  llvm::Optional<Value> result =
       convertSelectOp(rewriter, op, tfl_sel_op.getResult(),
                       tfl_sel_op.condition(), tfl_sel_op.x(), tfl_sel_op.y());
   if (!result) return failure();
@@ -2312,7 +2360,7 @@
 LogicalResult ConvertTFLSpaceToBatchNdOp::matchAndRewrite(
     Operation* op, PatternRewriter& rewriter) const {
   auto tfl_s2b_op = cast<TFL::SpaceToBatchNdOp>(op);
-  auto result = convertSpaceToBatchNDOp(
+  llvm::Optional<Value> result = convertSpaceToBatchNDOp(
       rewriter, op, tfl_s2b_op.getResult(), tfl_s2b_op.input(),
       tfl_s2b_op.block_shape(), tfl_s2b_op.paddings());
 
@@ -2327,7 +2375,7 @@
     Operation* op, PatternRewriter& rewriter) const {
   auto tfl_b2s_op = cast<TFL::BatchToSpaceNdOp>(op);
 
-  auto result = convertBatchToSpaceNDOp(
+  llvm::Optional<Value> result = convertBatchToSpaceNDOp(
       rewriter, op, tfl_b2s_op.getResult(), tfl_b2s_op.input(),
       tfl_b2s_op.block_shape(), tfl_b2s_op.indices());
 
@@ -2343,9 +2391,9 @@
   auto tfl_s2d_op = cast<TFL::SpaceToDepthOp>(op);
 
   auto block_size_attr = tfl_s2d_op.block_sizeAttr();
-  auto result = convertSpaceToDepthOp(rewriter, op, tfl_s2d_op.getResult(),
-                                      tfl_s2d_op.input(), block_size_attr,
-                                      rewriter.getStringAttr("NHWC"));
+  llvm::Optional<Value> result = convertSpaceToDepthOp(
+      rewriter, op, tfl_s2d_op.getResult(), tfl_s2d_op.input(), block_size_attr,
+      rewriter.getStringAttr("NHWC"));
 
   if (!result) return failure();
 
@@ -2359,9 +2407,9 @@
   auto tfl_d2s_op = cast<TFL::DepthToSpaceOp>(op);
 
   auto block_size_attr = tfl_d2s_op.block_sizeAttr();
-  auto result = convertDepthToSpaceOp(rewriter, op, tfl_d2s_op.getResult(),
-                                      tfl_d2s_op.input(), block_size_attr,
-                                      rewriter.getStringAttr("NHWC"));
+  llvm::Optional<Value> result = convertDepthToSpaceOp(
+      rewriter, op, tfl_d2s_op.getResult(), tfl_d2s_op.input(), block_size_attr,
+      rewriter.getStringAttr("NHWC"));
 
   if (!result) return failure();
 
@@ -2374,7 +2422,7 @@
     Operation* op, PatternRewriter& rewriter) const {
   auto tfl_ss_op = cast<TFL::StridedSliceOp>(op);
 
-  auto result = convertStridedSliceOp(
+  llvm::Optional<Value> result = convertStridedSliceOp(
       rewriter, op, tfl_ss_op.getResult(), tfl_ss_op.input(), tfl_ss_op.begin(),
       tfl_ss_op.end(), tfl_ss_op.strides(), tfl_ss_op.begin_maskAttr().getInt(),
       tfl_ss_op.end_maskAttr().getInt(), tfl_ss_op.ellipsis_maskAttr().getInt(),
@@ -2391,8 +2439,8 @@
     Operation* op, PatternRewriter& rewriter) const {
   auto tfl_zeroslike_op = cast<TFL::ZerosLikeOp>(op);
 
-  auto result = convertZerosLikeOp(rewriter, op, tfl_zeroslike_op.getResult(),
-                                   tfl_zeroslike_op.input());
+  llvm::Optional<Value> result = convertZerosLikeOp(
+      rewriter, op, tfl_zeroslike_op.getResult(), tfl_zeroslike_op.input());
 
   if (!result) return failure();
 
@@ -2404,12 +2452,12 @@
 LogicalResult ConvertTFLHardSwishOp::matchAndRewrite(
     Operation* op, PatternRewriter& rewriter) const {
   auto tfl_hardswish_op = cast<TFL::HardSwishOp>(op);
-  auto output_type =
+  RankedTensorType output_type =
       tfl_hardswish_op.getResult().getType().dyn_cast<RankedTensorType>();
   // Not a ranked tensor output
   if (!output_type) return failure();
 
-  auto input_type =
+  RankedTensorType input_type =
       tfl_hardswish_op.input().getType().dyn_cast<RankedTensorType>();
   // Not a ranked tensor output
   if (!input_type) return failure();
@@ -2431,12 +2479,16 @@
         output_type.getElementType()
             .dyn_cast_or_null<mlir::quant::UniformQuantizedType>();
 
-    auto int16_element_qtype = mlir::quant::UniformQuantizedType::get(
-        true, rewriter.getIntegerType(16), rewriter.getF32Type(), 1.0f, 0,
-        -32768, 32767);
-    auto bool_type = RankedTensorType::get(input_shape, rewriter.getI1Type());
-    auto int16_type = RankedTensorType::get(input_shape, int16_element_qtype);
-    auto int32_type = RankedTensorType::get(input_shape, rewriter.getI32Type());
+    UniformQuantizedType int16_element_qtype =
+        mlir::quant::UniformQuantizedType::get(
+            true, rewriter.getIntegerType(16), rewriter.getF32Type(), 1.0f, 0,
+            -32768, 32767);
+    RankedTensorType bool_type =
+        RankedTensorType::get(input_shape, rewriter.getI1Type());
+    RankedTensorType int16_type =
+        RankedTensorType::get(input_shape, int16_element_qtype);
+    RankedTensorType int32_type =
+        RankedTensorType::get(input_shape, rewriter.getI32Type());
 
     // Table's real input range [-4.0, 4.0].
     // Use TABLE op to get relu6(x+3) / 6
@@ -2449,10 +2501,10 @@
       return std::lround(32768.0 * v);
     };
 
-    auto table_const = getTosa1DConstTensorTable(rewriter, op, hardswish_func);
+    Value table_const = getTosa1DConstTensorTable(rewriter, op, hardswish_func);
 
     // Rescale input to 9.7
-    auto op1_rescale_in =
+    Value op1_rescale_in =
         buildRescale(rewriter, op, int16_type, tfl_hardswish_op.input(),
                      (in_quant_type.getScale() * 128.0) / input_sample_grain,
                      in_quant_type.getZeroPoint(), 0);
@@ -2462,13 +2514,13 @@
         op->getLoc(), int32_type, op1_rescale_in, table_const);
 
     // scale table output back to quantized space
-    auto op3_rescale_op2 =
+    Value op3_rescale_op2 =
         buildRescale(rewriter, op, output_type, op2_table_op1.getResult(),
                      1.0 / (128.0 * 32768.0 * out_quant_type.getScale()), 0,
                      out_quant_type.getZeroPoint());
 
-    auto op4_rescale_in = buildRescale(rewriter, op, int32_type,
-                                       tfl_hardswish_op.input(), 1.0, 0, 0);
+    Value op4_rescale_in = buildRescale(rewriter, op, int32_type,
+                                        tfl_hardswish_op.input(), 1.0, 0, 0);
 
     // Get 3.0 in quantized space
     int32_t quantized_3 =
@@ -2495,7 +2547,7 @@
     // op5 = reciprocal(6)
     // op6 = mul (op4, op5)
 
-    auto op1_value = getTosaConstTensorSingleF32(rewriter, op, 3.0);
+    Value op1_value = getTosaConstTensorSingleF32(rewriter, op, 3.0);
 
     auto op2_add_x_op1 = rewriter.create<tosa::AddOp>(
         op->getLoc(), output_type, tfl_hardswish_op.input(), op1_value);
@@ -2526,9 +2578,10 @@
     Operation* op, PatternRewriter& rewriter) const {
   auto tfl_logistic_op = cast<TFL::LogisticOp>(op);
 
-  auto output_type =
+  RankedTensorType output_type =
       tfl_logistic_op.getResult().getType().dyn_cast<RankedTensorType>();
-  auto input_type = tfl_logistic_op.x().getType().dyn_cast<RankedTensorType>();
+  RankedTensorType input_type =
+      tfl_logistic_op.x().getType().dyn_cast<RankedTensorType>();
   if (!input_type || !output_type) return failure();
 
   bool input_is_qtype =
@@ -2543,13 +2596,14 @@
   }
 
   if (input_is_qtype) {
-    auto int16_element_qtype = mlir::quant::UniformQuantizedType::get(
-        true, rewriter.getIntegerType(16), rewriter.getF32Type(), 1.0f, 0,
-        -32768, 32767);
-    auto int16_type =
+    UniformQuantizedType int16_element_qtype =
+        mlir::quant::UniformQuantizedType::get(
+            true, rewriter.getIntegerType(16), rewriter.getF32Type(), 1.0f, 0,
+            -32768, 32767);
+    RankedTensorType int16_type =
         RankedTensorType::get(output_type.getShape(), int16_element_qtype);
-    auto int32_type = RankedTensorType::get(output_type.getShape(),
-                                            rewriter.getIntegerType(32));
+    RankedTensorType int32_type = RankedTensorType::get(
+        output_type.getShape(), rewriter.getIntegerType(32));
     mlir::quant::UniformQuantizedType input_qtype =
         input_type.getElementType()
             .dyn_cast_or_null<mlir::quant::UniformQuantizedType>();
@@ -2565,10 +2619,10 @@
       return std::lround(32768.0 * v);
     };
 
-    auto table_const = getTosa1DConstTensorTable(rewriter, op, sigmoid_func);
+    Value table_const = getTosa1DConstTensorTable(rewriter, op, sigmoid_func);
 
     // Rescale input to 9.7 precision.
-    auto op1_rescale_in =
+    Value op1_rescale_in =
         buildRescale(rewriter, op, int16_type, tfl_logistic_op.x(),
                      (input_qtype.getScale() * 128.0) / input_sample_grain,
                      input_qtype.getZeroPoint(), 0);
@@ -2579,7 +2633,7 @@
     double output_rescale_scale =
         1.0 / (output_qtype.getScale() * 32768.0 * 128.0);
 
-    auto op3_rescale_op2 =
+    Value op3_rescale_op2 =
         buildRescale(rewriter, op, output_type, op2_table_op1.getResult(),
                      output_rescale_scale, 0, output_qtype.getZeroPoint());
 
@@ -2595,9 +2649,10 @@
 LogicalResult ConvertTFLTanhOp::matchAndRewrite(
     Operation* op, PatternRewriter& rewriter) const {
   auto tfl_tanh_op = cast<TFL::TanhOp>(op);
-  auto output_type =
+  RankedTensorType output_type =
       tfl_tanh_op.getResult().getType().dyn_cast<RankedTensorType>();
-  auto input_type = tfl_tanh_op.input().getType().dyn_cast<RankedTensorType>();
+  RankedTensorType input_type =
+      tfl_tanh_op.input().getType().dyn_cast<RankedTensorType>();
   if (!input_type || !output_type) return failure();
 
   bool input_is_qtype =
@@ -2612,13 +2667,14 @@
   }
 
   if (input_is_qtype) {
-    auto int16_element_qtype = mlir::quant::UniformQuantizedType::get(
-        true, rewriter.getIntegerType(16), rewriter.getF32Type(), 1.0f, 0,
-        -32768, 32767);
-    auto int16_type =
+    UniformQuantizedType int16_element_qtype =
+        mlir::quant::UniformQuantizedType::get(
+            true, rewriter.getIntegerType(16), rewriter.getF32Type(), 1.0f, 0,
+            -32768, 32767);
+    RankedTensorType int16_type =
         RankedTensorType::get(output_type.getShape(), int16_element_qtype);
-    auto int32_type = RankedTensorType::get(output_type.getShape(),
-                                            rewriter.getIntegerType(32));
+    RankedTensorType int32_type = RankedTensorType::get(
+        output_type.getShape(), rewriter.getIntegerType(32));
     mlir::quant::UniformQuantizedType input_qtype =
         input_type.getElementType()
             .dyn_cast_or_null<mlir::quant::UniformQuantizedType>();
@@ -2635,10 +2691,10 @@
       return std::lround(32768.0 * v);
     };
 
-    auto table_const = getTosa1DConstTensorTable(rewriter, op, tanh_func);
+    Value table_const = getTosa1DConstTensorTable(rewriter, op, tanh_func);
 
     // Rescale input to 9.7 precision.
-    auto op1_rescale_in =
+    Value op1_rescale_in =
         buildRescale(rewriter, op, int16_type, tfl_tanh_op.input(),
                      (input_qtype.getScale() * 128.0) / input_sample_grain,
                      input_qtype.getZeroPoint(), 0);
@@ -2649,7 +2705,7 @@
     double output_rescale_scale =
         1.0 / (output_qtype.getScale() * 32768.0 * 128.0);
 
-    auto op3_rescale_op2 =
+    Value op3_rescale_op2 =
         buildRescale(rewriter, op, output_type, op2_table_op1.getResult(),
                      output_rescale_scale, 0, output_qtype.getZeroPoint());
 
@@ -2665,7 +2721,7 @@
 LogicalResult ConvertTFLPReluOp::matchAndRewrite(
     Operation* op, PatternRewriter& rewriter) const {
   auto tfl_prelu_op = cast<TFL::PReluOp>(op);
-  auto output_type =
+  RankedTensorType output_type =
       tfl_prelu_op.getResult().getType().dyn_cast<RankedTensorType>();
   if (!output_type) return failure();
 
@@ -2677,7 +2733,7 @@
 LogicalResult ConvertTFLLeakyReluOp::matchAndRewrite(
     Operation* op, PatternRewriter& rewriter) const {
   auto tfl_leakyrelu_op = cast<TFL::LeakyReluOp>(op);
-  auto output_type =
+  RankedTensorType output_type =
       tfl_leakyrelu_op.getResult().getType().dyn_cast<RankedTensorType>();
   if (!output_type) return failure();
 
@@ -2689,7 +2745,7 @@
 LogicalResult ConvertTFLNegOp::matchAndRewrite(
     Operation* op, PatternRewriter& rewriter) const {
   auto tfl_neg_op = cast<TFL::NegOp>(op);
-  auto output_type =
+  RankedTensorType output_type =
       tfl_neg_op.getResult().getType().dyn_cast<RankedTensorType>();
   if (!output_type) return failure();
 
@@ -2719,9 +2775,9 @@
     Operation* op, PatternRewriter& rewriter) const {
   auto tfl_reverse_op = cast<TFL::ReverseV2Op>(op);
 
-  auto input_type =
+  RankedTensorType input_type =
       tfl_reverse_op.input().getType().dyn_cast<RankedTensorType>();
-  auto output_type =
+  RankedTensorType output_type =
       tfl_reverse_op.getResult().getType().dyn_cast<RankedTensorType>();
   if (!input_type || !output_type) return failure();
 
@@ -2756,22 +2812,22 @@
     Operation* op, PatternRewriter& rewriter) const {
   auto tfl_quantize_op = cast<TFL::QuantizeOp>(op);
 
-  auto input_type =
+  RankedTensorType input_type =
       tfl_quantize_op.input().getType().dyn_cast<RankedTensorType>();
-  auto output_type =
+  RankedTensorType output_type =
       tfl_quantize_op.getResult().getType().dyn_cast<RankedTensorType>();
 
   if (!input_type || !output_type) return failure();
 
-  auto qtype =
+  RankedTensorType qtype =
       tfl_quantize_op.qtypeAttr().getValue().dyn_cast<RankedTensorType>();
   if (!qtype) return failure();
 
-  auto element_type =
+  UniformQuantizedType element_type =
       qtype.getElementType().dyn_cast<mlir::quant::UniformQuantizedType>();
   if (!element_type) return failure();
 
-  auto input_element_type =
+  UniformQuantizedType input_element_type =
       input_type.getElementType().dyn_cast<mlir::quant::UniformQuantizedType>();
 
   // If input is already a quantized type, this is basically a RESCALE (or
@@ -2779,7 +2835,7 @@
   if (input_element_type) {
     double rescale_scale =
         input_element_type.getScale() / element_type.getScale();
-    auto rescale_op = buildRescale(
+    Value rescale_op = buildRescale(
         rewriter, op, output_type, tfl_quantize_op.input(), rescale_scale,
         input_element_type.getZeroPoint(), element_type.getZeroPoint());
 
@@ -2791,8 +2847,8 @@
     int64_t num_bits = element_type.getStorageTypeIntegralWidth();
     zp = element_type.isSigned() ? zp : zp - (1 << (num_bits - 1));
 
-    auto result = convertQuantizeOp(rewriter, op, output_type,
-                                    tfl_quantize_op.input(), scale, zp);
+    llvm::Optional<Value> result = convertQuantizeOp(
+        rewriter, op, output_type, tfl_quantize_op.input(), scale, zp);
 
     if (!result) return failure();
 
@@ -2806,15 +2862,16 @@
     Operation* op, PatternRewriter& rewriter) const {
   auto tfl_dequantize_op = cast<TFL::DequantizeOp>(op);
 
-  auto output_type =
+  RankedTensorType output_type =
       tfl_dequantize_op.getResult().getType().dyn_cast<RankedTensorType>();
   // Not a ranked tensor output
   if (!output_type) return failure();
 
-  auto qtype = tfl_dequantize_op.input().getType().dyn_cast<RankedTensorType>();
+  RankedTensorType qtype =
+      tfl_dequantize_op.input().getType().dyn_cast<RankedTensorType>();
   if (!qtype) return failure();
 
-  auto element_type =
+  UniformQuantizedType element_type =
       qtype.getElementType().dyn_cast<mlir::quant::UniformQuantizedType>();
   if (!element_type) return failure();
 
@@ -2823,8 +2880,8 @@
   int64_t num_bits = element_type.getStorageTypeIntegralWidth();
   zp = element_type.isSigned() ? zp : zp - (1 << (num_bits - 1));
 
-  auto result = convertDequantizeOp(rewriter, op, output_type,
-                                    tfl_dequantize_op.input(), scale, zp);
+  llvm::Optional<Value> result = convertDequantizeOp(
+      rewriter, op, output_type, tfl_dequantize_op.input(), scale, zp);
 
   if (!result) return failure();
 
@@ -2837,7 +2894,7 @@
     Operation* op, PatternRewriter& rewriter) const {
   auto tfl_qconst_op = cast<TFL::QConstOp>(op);
 
-  auto output_type =
+  RankedTensorType output_type =
       tfl_qconst_op.getResult().getType().dyn_cast<RankedTensorType>();
   // Not a ranked tensor output
   if (!output_type) return failure();