Fix for TFlite quantized mean operator.
PiperOrigin-RevId: 337326316
Change-Id: I04a50cfff4a3e0a90c4e36d604cd442f34a8d988
diff --git a/tensorflow/lite/kernels/internal/reference/reduce.h b/tensorflow/lite/kernels/internal/reference/reduce.h
index d57b6f2..a7c86dd 100644
--- a/tensorflow/lite/kernels/internal/reference/reduce.h
+++ b/tensorflow/lite/kernels/internal/reference/reduce.h
@@ -381,8 +381,7 @@
const float scale = input_scale / output_scale;
if (compute_sum) {
// TODO(b/116341117): Eliminate float and do this completely in 8bit.
- const float bias =
- -input_zero_point * scale * num_elements_in_axis + 0.5f;
+ const float bias = -input_zero_point * scale * num_elements_in_axis;
for (size_t idx = 0; idx < num_outputs; ++idx) {
const U value =
static_cast<U>(TfLiteRound(temp_sum[idx] * scale + bias)) +
@@ -390,7 +389,7 @@
output_data[idx] = static_cast<T>(value);
}
} else {
- const float bias = -input_zero_point * scale + 0.5f;
+ const float bias = -input_zero_point * scale;
for (size_t idx = 0; idx < num_outputs; ++idx) {
float float_mean = static_cast<float>(temp_sum[idx]) /
static_cast<float>(num_elements_in_axis);