Change tflite::PopulateConvolutionQuantizationParams() per_channel_shift parameter to be int32_t* instead of int*.
This brings it into line with TfLite Micro, which allocates int32_t storage for per_channel_shift parameters. See https://github.com/tensorflow/tflite-micro/issues/164.
PiperOrigin-RevId: 379185579
Change-Id: I349b3c7a7c7b29d768f649b3ab86b657d73402f6
diff --git a/tensorflow/lite/kernels/kernel_util.cc b/tensorflow/lite/kernels/kernel_util.cc
index accea8f..f7ce049 100644
--- a/tensorflow/lite/kernels/kernel_util.cc
+++ b/tensorflow/lite/kernels/kernel_util.cc
@@ -197,7 +197,7 @@
const TfLiteTensor* filter, const TfLiteTensor* bias, TfLiteTensor* output,
const TfLiteFusedActivation& activation, int32_t* multiplier, int* shift,
int32_t* output_activation_min, int32_t* output_activation_max,
- int32_t* per_channel_multiplier, int* per_channel_shift) {
+ int32_t* per_channel_multiplier, int32_t* per_channel_shift) {
const auto* affine_quantization =
reinterpret_cast<TfLiteAffineQuantization*>(filter->quantization.params);
return PopulateConvolutionQuantizationParams(
@@ -212,7 +212,8 @@
const TfLiteTensor* filter, const TfLiteTensor* bias, TfLiteTensor* output,
const TfLiteFusedActivation& activation, int32_t* multiplier, int* shift,
int32_t* output_activation_min, int32_t* output_activation_max,
- int32_t* per_channel_multiplier, int* per_channel_shift, int num_channels) {
+ int32_t* per_channel_multiplier, int32_t* per_channel_shift,
+ int num_channels) {
TF_LITE_ENSURE_EQ(context, input->quantization.type,
kTfLiteAffineQuantization);
TF_LITE_ENSURE_EQ(context, filter->quantization.type,
diff --git a/tensorflow/lite/kernels/kernel_util.h b/tensorflow/lite/kernels/kernel_util.h
index 9441842..ffae070 100644
--- a/tensorflow/lite/kernels/kernel_util.h
+++ b/tensorflow/lite/kernels/kernel_util.h
@@ -214,14 +214,15 @@
const TfLiteTensor* filter, const TfLiteTensor* bias, TfLiteTensor* output,
const TfLiteFusedActivation& activation, int32_t* multiplier, int* shift,
int32_t* output_activation_min, int32_t* output_activation_max,
- int32_t* per_channel_multiplier, int* per_channel_shift);
+ int32_t* per_channel_multiplier, int32_t* per_channel_shift);
TfLiteStatus PopulateConvolutionQuantizationParams(
TfLiteContext* context, const TfLiteTensor* input,
const TfLiteTensor* filter, const TfLiteTensor* bias, TfLiteTensor* output,
const TfLiteFusedActivation& activation, int32_t* multiplier, int* shift,
int32_t* output_activation_min, int32_t* output_activation_max,
- int32_t* per_channel_multiplier, int* per_channel_shift, int num_channels);
+ int32_t* per_channel_multiplier, int32_t* per_channel_shift,
+ int num_channels);
// Calculates the multiplication factor for a quantized convolution (or
// quantized depthwise convolution) involving the given tensors. Returns an
diff --git a/tensorflow/lite/kernels/kernel_util_test.cc b/tensorflow/lite/kernels/kernel_util_test.cc
index 643bfcd..3276f21 100644
--- a/tensorflow/lite/kernels/kernel_util_test.cc
+++ b/tensorflow/lite/kernels/kernel_util_test.cc
@@ -353,7 +353,7 @@
int32_t output_activation_min;
int32_t output_activation_max;
std::vector<int32_t> per_channel_multiplier(3);
- std::vector<int> per_channel_shift(3);
+ std::vector<int32_t> per_channel_shift(3);
// Call and verify results for per channel case.
EXPECT_EQ(
diff --git a/tensorflow/lite/micro/kernels/conv_common.cc b/tensorflow/lite/micro/kernels/conv_common.cc
index a4a36ae..3d94760 100644
--- a/tensorflow/lite/micro/kernels/conv_common.cc
+++ b/tensorflow/lite/micro/kernels/conv_common.cc
@@ -111,8 +111,7 @@
context, input, filter, bias, output, params.activation,
&data->output_multiplier, &data->output_shift,
&data->output_activation_min, &data->output_activation_max,
- data->per_channel_output_multiplier,
- reinterpret_cast<int*>(data->per_channel_output_shift),
+ data->per_channel_output_multiplier, data->per_channel_output_shift,
output_channels));
}
diff --git a/tensorflow/lite/micro/kernels/depthwise_conv_common.cc b/tensorflow/lite/micro/kernels/depthwise_conv_common.cc
index 6e6693a..6a00dc4 100644
--- a/tensorflow/lite/micro/kernels/depthwise_conv_common.cc
+++ b/tensorflow/lite/micro/kernels/depthwise_conv_common.cc
@@ -113,8 +113,7 @@
context, input, filter, bias, output, params.activation,
&data->output_multiplier, &data->output_shift,
&data->output_activation_min, &data->output_activation_max,
- data->per_channel_output_multiplier,
- reinterpret_cast<int*>(data->per_channel_output_shift),
+ data->per_channel_output_multiplier, data->per_channel_output_shift,
output_channels));
}
diff --git a/tensorflow/lite/micro/kernels/transpose_conv.cc b/tensorflow/lite/micro/kernels/transpose_conv.cc
index c49a998..da926fd 100644
--- a/tensorflow/lite/micro/kernels/transpose_conv.cc
+++ b/tensorflow/lite/micro/kernels/transpose_conv.cc
@@ -103,8 +103,7 @@
&data->params.output_multiplier, &data->params.output_shift,
&data->params.quantized_activation_min,
&data->params.quantized_activation_max,
- data->per_channel_output_multiplier,
- reinterpret_cast<int*>(data->per_channel_output_shift),
+ data->per_channel_output_multiplier, data->per_channel_output_shift,
output_channels));
}
return kTfLiteOk;