Move xtensa_hifimini/fixedpoint_utils to flat namespace.

PiperOrigin-RevId: 336551368
Change-Id: Ie299b0b1a5570558242decb58beb87859764a1f8
diff --git a/tensorflow/lite/micro/kernels/xtensa_hifimini/conv.cc b/tensorflow/lite/micro/kernels/xtensa_hifimini/conv.cc
index 28bf205..2c3577d 100644
--- a/tensorflow/lite/micro/kernels/xtensa_hifimini/conv.cc
+++ b/tensorflow/lite/micro/kernels/xtensa_hifimini/conv.cc
@@ -172,7 +172,7 @@
           // Apply quantized multiplier and accumulate result at 48bit
           // alignment. Convert the (unsigned) 32-bit multiplier down to a
           // 24-bit multiplier.
-          acc_56 = ops::micro::xtensa::hifimini::MultiplyByQuantizedMultiplier(
+          acc_56 = MultiplyByQuantizedMultiplier(
               acc_24x2, output_multiplier[out_channel] >> 8,
               output_shift[out_channel]);
 
@@ -249,8 +249,8 @@
 
     // Apply quantized multiplier and accumulate result at 48bit alignment.
     // Convert the (unsigned) 32-bit multiplier down to a 24-bit multiplier.
-    acc_56 = ops::micro::xtensa::hifimini::MultiplyByQuantizedMultiplier(
-        acc_24x2, output_multiplier[ch] >> 8, output_shift[ch]);
+    acc_56 = MultiplyByQuantizedMultiplier(acc_24x2, output_multiplier[ch] >> 8,
+                                           output_shift[ch]);
 
     // Add output offset, cap activation, and assign to the output:
     acc_56 = AE_ADDQ56(acc_56, output_offset_56);
diff --git a/tensorflow/lite/micro/kernels/xtensa_hifimini/depthwise_conv.cc b/tensorflow/lite/micro/kernels/xtensa_hifimini/depthwise_conv.cc
index 73da39c..4a37bec 100644
--- a/tensorflow/lite/micro/kernels/xtensa_hifimini/depthwise_conv.cc
+++ b/tensorflow/lite/micro/kernels/xtensa_hifimini/depthwise_conv.cc
@@ -172,10 +172,9 @@
 
             // Apply quantized multiplier and accumulate result at 48bit
             // alignment:
-            acc_56 =
-                ops::micro::xtensa::hifimini::MultiplyByQuantizedMultiplier(
-                    acc_24x2, output_multiplier[output_channel],
-                    output_shift[output_channel]);
+            acc_56 = MultiplyByQuantizedMultiplier(
+                acc_24x2, output_multiplier[output_channel],
+                output_shift[output_channel]);
 
             // Add output offset, cap activation, and assign to the output:
             acc_56 = AE_ADDQ56(acc_56, output_offset_56);
@@ -288,12 +287,10 @@
 
     // Apply quantized multiplier and accumulate result at 48bit
     // alignment:
-    block_0_acc = ops::micro::xtensa::hifimini::MultiplyByQuantizedMultiplier(
-        acc_24x2_0, mult, shift);
+    block_0_acc = MultiplyByQuantizedMultiplier(acc_24x2_0, mult, shift);
     // Apply quantized multiplier and accumulate result at 48bit
     // alignment:
-    block_1_acc = ops::micro::xtensa::hifimini::MultiplyByQuantizedMultiplier(
-        acc_24x2_1, mult, shift);
+    block_1_acc = MultiplyByQuantizedMultiplier(acc_24x2_1, mult, shift);
 
     // Add output offset, cap activation, and assign to the output:
     block_0_acc = AE_ADDQ56(block_0_acc, output_offset_56);
diff --git a/tensorflow/lite/micro/kernels/xtensa_hifimini/fixedpoint_utils.h b/tensorflow/lite/micro/kernels/xtensa_hifimini/fixedpoint_utils.h
index 918192c..74bd7e2 100644
--- a/tensorflow/lite/micro/kernels/xtensa_hifimini/fixedpoint_utils.h
+++ b/tensorflow/lite/micro/kernels/xtensa_hifimini/fixedpoint_utils.h
@@ -25,10 +25,6 @@
 #include "tensorflow/lite/kernels/internal/compatibility.h"
 
 namespace tflite {
-namespace ops {
-namespace micro {
-namespace xtensa {
-namespace hifimini {
 
 // INT24 MIN/MAX
 #define INT24_MIN -8388608
@@ -77,13 +73,10 @@
   return result_56;
 }
 
-//
 // Multiply 32bit value by a quantized multiplier (w/ shift) and returns a 48bit
 // aligned value in the QR register.
-//
-inline ae_q56s MultiplyByQuantizedMultiplier(int32_t x,
-                                             int32_t quantized_multiplier,
-                                             int shift) {
+inline ae_q56s MultiplyByQuantizedMultiplierResult48Bit(
+    int32_t x, int32_t quantized_multiplier, int shift) {
   // Convert x into a 2x24bit PR register file. If x is outside the numerical
   // limits of a 24bit integer, the "fractional" or lower 8bits are discarded.
   // If x is within the range of a 24 bit integer, the "signed" or upper 8bits
@@ -144,10 +137,6 @@
   return static_cast<int>(raw);
 }
 
-}  // namespace hifimini
-}  // namespace xtensa
-}  // namespace micro
-}  // namespace ops
 }  // namespace tflite
 
 #endif  // TENSORFLOW_LITE_MICRO_KERNELS_XTENSA_HIFIMINI_FIXEDPOINT_UTILS_H_
diff --git a/tensorflow/lite/micro/kernels/xtensa_hifimini/fully_connected.cc b/tensorflow/lite/micro/kernels/xtensa_hifimini/fully_connected.cc
index 40ab04f..06e16d3 100644
--- a/tensorflow/lite/micro/kernels/xtensa_hifimini/fully_connected.cc
+++ b/tensorflow/lite/micro/kernels/xtensa_hifimini/fully_connected.cc
@@ -124,8 +124,8 @@
       ae_p24x2s sum_24x2 = AE_TRUNCP24Q48(sum_56);
 
       // MultiplyByQuantizedMultiplier returns a 48bit aligned value
-      sum_56 = ops::micro::xtensa::hifimini::MultiplyByQuantizedMultiplier(
-          sum_24x2, output_multiplier, output_shift);
+      sum_56 = MultiplyByQuantizedMultiplier(sum_24x2, output_multiplier,
+                                             output_shift);
 
       // Add output_offset and cap min/max values:
       sum_56 = AE_ADDQ56(sum_56, output_offset_56);
@@ -147,8 +147,8 @@
   double real_multiplier = 0.0;
   TF_LITE_ENSURE_STATUS(GetQuantizedConvolutionMultipler(
       context, input, filter, bias, output, &real_multiplier));
-  ops::micro::xtensa::hifimini::QuantizeMultiplier(
-      real_multiplier, &data->output_multiplier, &data->output_shift);
+  QuantizeMultiplier(real_multiplier, &data->output_multiplier,
+                     &data->output_shift);
   return CalculateActivationRangeQuantized(context, activation, output,
                                            &data->output_activation_min,
                                            &data->output_activation_max);
diff --git a/tensorflow/lite/micro/kernels/xtensa_hifimini/quantize.cc b/tensorflow/lite/micro/kernels/xtensa_hifimini/quantize.cc
index 0574e8e..b867e70 100644
--- a/tensorflow/lite/micro/kernels/xtensa_hifimini/quantize.cc
+++ b/tensorflow/lite/micro/kernels/xtensa_hifimini/quantize.cc
@@ -113,8 +113,7 @@
 
   // TODO(b/155682734): Fix dangerous input/output scale ratio assumptions.
   op_data->scale_multiplier =
-      ops::micro::xtensa::hifimini::CreateQConstantForInt24(
-          0, input->params.scale / output->params.scale);
+      CreateQConstantForInt24(0, input->params.scale / output->params.scale);
 
   op_data->zero_point = output->params.zero_point;
 
diff --git a/tensorflow/lite/micro/kernels/xtensa_hifimini/svdf.cc b/tensorflow/lite/micro/kernels/xtensa_hifimini/svdf.cc
index 98a5e93..1b8a1ba 100644
--- a/tensorflow/lite/micro/kernels/xtensa_hifimini/svdf.cc
+++ b/tensorflow/lite/micro/kernels/xtensa_hifimini/svdf.cc
@@ -150,10 +150,8 @@
         dot_prod_56 = AE_Q56S_SLAI(dot_prod_56, 24);
         ae_p24x2s dot_prod_24x2 = AE_TRUNCP24Q48(dot_prod_56);
 
-        dot_prod_56 =
-            tflite::ops::micro::xtensa::hifimini::MultiplyByQuantizedMultiplier(
-                dot_prod_24x2, data.effective_scale_1_a,
-                data.effective_scale_1_b);
+        dot_prod_56 = MultiplyByQuantizedMultiplier(
+            dot_prod_24x2, data.effective_scale_1_a, data.effective_scale_1_b);
 
         // Cap min/max and convert to int32_t:
         dot_prod_56 = AE_MAXQ56S(dot_prod_56, output_int16_min_56);
@@ -244,10 +242,9 @@
     ae_q56s output_int8_min_56 = AE_CVTQ48A32S(INT8_MIN);
     ae_q56s output_zp_56 = AE_CVTQ48A32S(data.output_zero_point);
     for (int i = 0; i < n_batch * n_unit; ++i) {
-      ae_q56s x_56 =
-          tflite::ops::micro::xtensa::hifimini::MultiplyByQuantizedMultiplier(
-              scratch_output_tensor[i], data.effective_scale_2_a,
-              data.effective_scale_2_b);
+      ae_q56s x_56 = MultiplyByQuantizedMultiplierResult48Bit(
+          scratch_output_tensor[i], data.effective_scale_2_a,
+          data.effective_scale_2_b);
       // Add output adjustment:
       x_56 = AE_ADDQ56(x_56, output_zp_56);
       // Cap min/max and convert to int32_t (already aligned to 32bit):
@@ -360,12 +357,10 @@
   TFLITE_DCHECK(node->user_data != nullptr);
   OpData* data = static_cast<OpData*>(node->user_data);
 
-  ops::micro::xtensa::hifimini::QuantizeMultiplier(effective_scale_1,
-                                                   &data->effective_scale_1_a,
-                                                   &data->effective_scale_1_b);
-  ops::micro::xtensa::hifimini::QuantizeMultiplier(effective_scale_2,
-                                                   &data->effective_scale_2_a,
-                                                   &data->effective_scale_2_b);
+  QuantizeMultiplier(effective_scale_1, &data->effective_scale_1_a,
+                     &data->effective_scale_1_b);
+  QuantizeMultiplier(effective_scale_2, &data->effective_scale_2_a,
+                     &data->effective_scale_2_b);
 
   data->input_zero_point = input->params.zero_point;
   data->output_zero_point = output->params.zero_point;