Remove support for different input and output quantization
diff --git a/tensorflow/lite/micro/kernels/reduce.cc b/tensorflow/lite/micro/kernels/reduce.cc
index 3ecfa7a..3ad1068 100644
--- a/tensorflow/lite/micro/kernels/reduce.cc
+++ b/tensorflow/lite/micro/kernels/reduce.cc
@@ -291,6 +291,9 @@
}));
break;
case kTfLiteInt8:
+ TF_LITE_ENSURE_EQ(context, static_cast<double>(op_data->input_scale),
+ static_cast<double>(op_data->output_scale));
+ TF_LITE_ENSURE_EQ(context, op_data->input_zp, op_data->output_zp);
TF_LITE_ENSURE(
context,
reference_ops::ReduceGeneric<int8_t>(
@@ -303,18 +306,6 @@
[](const int8_t current, const int8_t in) -> int8_t {
return (in > current) ? in : current;
}));
-
- // Convert between different output scales
- if (op_data->input_scale != op_data->output_scale) {
- int8_t* output_data = tflite::micro::GetTensorData<int8_t>(output);
- for (int i = 0; i < op_data->num_output_elements; i++) {
- output_data[i] = static_cast<int8_t>(std::max(
- std::min(MultiplyByQuantizedMultiplier(
- output_data[i], op_data->multiplier, op_data->shift),
- static_cast<int>(std::numeric_limits<int8_t>::max())),
- static_cast<int>(std::numeric_limits<int8_t>::min())));
- }
- }
break;
default:
TF_LITE_KERNEL_LOG(context,
diff --git a/tensorflow/lite/micro/kernels/reduce_test.cc b/tensorflow/lite/micro/kernels/reduce_test.cc
index dd5fd1b..8a649d7 100644
--- a/tensorflow/lite/micro/kernels/reduce_test.cc
+++ b/tensorflow/lite/micro/kernels/reduce_test.cc
@@ -397,32 +397,6 @@
tflite::ops::micro::Register_REDUCE_MAX(), ¶ms);
}
-TF_LITE_MICRO_TEST(Int8MaxOpTestKeepDimsDifferentScale) {
- const int input_shape[] = {3, 1, 3, 2};
- const float input_data[] = {0.4, 0.2, 0.3, 0.4, 0.5, 0.6};
- const int axis_shape[] = {1, 1};
- const int axis_data[] = {1, 1};
- const int output_shape[] = {1, 2};
- const float expected_output_data[] = {0.5, 0.6};
-
- float input_scale = 2 / 255.0;
- int input_zp = 0;
- float output_scale = 3 / 255.0;
- int output_zp = 0;
-
- TfLiteReducerParams params = {true};
-
- int8_t input_data_quant[6];
- int8_t output_data_quant[2];
- int8_t expected_output_data_quant[2];
-
- tflite::testing::TestReduceOpQuantized<int8_t>(
- input_shape, input_data, input_data_quant, input_scale, input_zp,
- axis_shape, axis_data, output_shape, expected_output_data,
- output_data_quant, expected_output_data_quant, output_scale, output_zp,
- tflite::ops::micro::Register_REDUCE_MAX(), ¶ms);
-}
-
TF_LITE_MICRO_TEST(Int8MaxOpTestWithoutKeepDims) {
const int input_shape[] = {3, 1, 3, 2};
const float input_data[] = {0.4, 0.2, 0.3, 0.4, 0.5, 0.6};
@@ -449,32 +423,6 @@
tflite::ops::micro::Register_REDUCE_MAX(), ¶ms);
}
-TF_LITE_MICRO_TEST(Int8MaxOpTestWithoutKeepDimsDifferentScale) {
- const int input_shape[] = {3, 1, 3, 2};
- const float input_data[] = {0.4, 0.2, 0.3, 0.4, 0.5, 0.6};
- const int axis_shape[] = {1, 1};
- const int axis_data[] = {1, 1};
- const int output_shape[] = {1, 2};
- const float expected_output_data[] = {0.5, 0.6};
-
- float input_scale = 2 / 255.0;
- int input_zp = 0;
- float output_scale = 3 / 255.0;
- int output_zp = 0;
-
- TfLiteReducerParams params = {false};
-
- int8_t input_data_quant[6];
- int8_t output_data_quant[2];
- int8_t expected_output_data_quant[2];
-
- tflite::testing::TestReduceOpQuantized<int8_t>(
- input_shape, input_data, input_data_quant, input_scale, input_zp,
- axis_shape, axis_data, output_shape, expected_output_data,
- output_data_quant, expected_output_data_quant, output_scale, output_zp,
- tflite::ops::micro::Register_REDUCE_MAX(), ¶ms);
-}
-
TF_LITE_MICRO_TEST(MeanInt84DWithoutKeepDimsWithPrecision) {
const int kInputShape4D[] = {4, 2, 2, 3, 1};
const float kInputData4D[] = {1.0, 24.0, 13.0, 3.0, 9.0, 17.0,