| /* Copyright 2017 The TensorFlow Authors. All Rights Reserved. |
| |
| Licensed under the Apache License, Version 2.0 (the "License"); |
| you may not use this file except in compliance with the License. |
| You may obtain a copy of the License at |
| |
| http://www.apache.org/licenses/LICENSE-2.0 |
| |
| Unless required by applicable law or agreed to in writing, software |
| distributed under the License is distributed on an "AS IS" BASIS, |
| WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| See the License for the specific language governing permissions and |
| limitations under the License. |
| ==============================================================================*/ |
| |
| #include "tensorflow/lite/kernels/internal/reference/integer_ops/depthwise_conv.h" |
| |
| #include "tensorflow/lite/c/builtin_op_data.h" |
| #include "tensorflow/lite/c/common.h" |
| #include "tensorflow/lite/kernels/internal/common.h" |
| #include "tensorflow/lite/kernels/internal/quantization_util.h" |
| #include "tensorflow/lite/kernels/internal/reference/depthwiseconv_float.h" |
| #include "tensorflow/lite/kernels/internal/reference/depthwiseconv_uint8.h" |
| #include "tensorflow/lite/kernels/internal/tensor_ctypes.h" |
| #include "tensorflow/lite/kernels/kernel_util.h" |
| #include "tensorflow/lite/kernels/padding.h" |
| |
| namespace tflite { |
| namespace ops { |
| namespace micro { |
| namespace depthwise_conv { |
| namespace { |
| |
| constexpr int kInputTensor = 0; |
| constexpr int kFilterTensor = 1; |
| constexpr int kBiasTensor = 2; |
| constexpr int kOutputTensor = 0; |
| |
| // Depthwise conv is quantized along dimension 3: |
| // https://www.tensorflow.org/lite/performance/quantization_spec |
| constexpr int kDepthwiseConvQuantizedDimension = 3; |
| |
| struct OpData { |
| TfLitePaddingValues padding; |
| // The scaling factor from input to output (aka the 'real multiplier') can |
| // be represented as a fixed point multiplier plus a left shift. |
| int32_t output_multiplier; |
| int output_shift; |
| |
| // Per channel output multiplier and shift. |
| int32_t* per_channel_output_multiplier; |
| int32_t* per_channel_output_shift; |
| // The range of the fused activation layer. For example for kNone and |
| // uint8_t these would be 0 and 255. |
| int32_t output_activation_min; |
| int32_t output_activation_max; |
| }; |
| |
| TfLiteStatus CalculateOpData(TfLiteContext* context, TfLiteNode* node, |
| TfLiteDepthwiseConvParams* params, int width, |
| int height, int filter_width, int filter_height, |
| const TfLiteType data_type, OpData* data) { |
| bool has_bias = node->inputs->size == 3; |
| // Check number of inputs/outputs |
| TF_LITE_ENSURE(context, has_bias || node->inputs->size == 2); |
| TF_LITE_ENSURE_EQ(context, node->outputs->size, 1); |
| |
| int unused_output_height, unused_output_width; |
| data->padding = ComputePaddingHeightWidth( |
| params->stride_height, params->stride_width, 1, 1, height, width, |
| filter_height, filter_width, params->padding, &unused_output_height, |
| &unused_output_width); |
| |
| // Note that quantized inference requires that all tensors have their |
| // parameters set. This is usually done during quantized training. |
| if (data_type != kTfLiteFloat32) { |
| const TfLiteTensor* input = GetInput(context, node, kInputTensor); |
| const TfLiteTensor* filter = GetInput(context, node, kFilterTensor); |
| const TfLiteTensor* bias = |
| GetOptionalInputTensor(context, node, kBiasTensor); |
| TfLiteTensor* output = GetOutput(context, node, kOutputTensor); |
| int num_channels = filter->dims->data[kDepthwiseConvQuantizedDimension]; |
| |
| return tflite::PopulateConvolutionQuantizationParams( |
| context, input, filter, bias, output, params->activation, |
| &data->output_multiplier, &data->output_shift, |
| &data->output_activation_min, &data->output_activation_max, |
| data->per_channel_output_multiplier, |
| reinterpret_cast<int*>(data->per_channel_output_shift), num_channels); |
| } |
| return kTfLiteOk; |
| } |
| |
| } // namespace |
| |
| void* Init(TfLiteContext* context, const char* buffer, size_t length) { |
| TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); |
| return context->AllocatePersistentBuffer(context, sizeof(OpData)); |
| } |
| |
| TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { |
| TFLITE_DCHECK(node->user_data != nullptr); |
| TFLITE_DCHECK(node->builtin_data != nullptr); |
| |
| auto* params = |
| reinterpret_cast<TfLiteDepthwiseConvParams*>(node->builtin_data); |
| OpData* data = static_cast<OpData*>(node->user_data); |
| |
| const TfLiteTensor* input = GetInput(context, node, kInputTensor); |
| const TfLiteTensor* filter = GetInput(context, node, kFilterTensor); |
| |
| const TfLiteType data_type = input->type; |
| int width = SizeOfDimension(input, 2); |
| int height = SizeOfDimension(input, 1); |
| int filter_width = SizeOfDimension(filter, 2); |
| int filter_height = SizeOfDimension(filter, 1); |
| |
| // Per channel quantization is only needed for int8 inference. For other |
| // quantized types, only a single scale and zero point is needed. |
| const int num_channels = filter->dims->data[kDepthwiseConvQuantizedDimension]; |
| // Dynimically allocate per-channel quantization parameters. |
| data->per_channel_output_multiplier = |
| reinterpret_cast<int32_t*>(context->AllocatePersistentBuffer( |
| context, num_channels * sizeof(int32_t))); |
| data->per_channel_output_shift = |
| reinterpret_cast<int32_t*>(context->AllocatePersistentBuffer( |
| context, num_channels * sizeof(int32_t))); |
| |
| // All per-channel quantized tensors need valid zero point and scale arrays. |
| if (input->type == kTfLiteInt8) { |
| TF_LITE_ENSURE_EQ(context, filter->quantization.type, |
| kTfLiteAffineQuantization); |
| |
| const auto* affine_quantization = |
| reinterpret_cast<TfLiteAffineQuantization*>( |
| filter->quantization.params); |
| TF_LITE_ENSURE(context, affine_quantization); |
| TF_LITE_ENSURE(context, affine_quantization->scale); |
| TF_LITE_ENSURE(context, affine_quantization->zero_point); |
| TF_LITE_ENSURE( |
| context, affine_quantization->scale->size == 1 || |
| affine_quantization->scale->size == |
| filter->dims->data[kDepthwiseConvQuantizedDimension]); |
| TF_LITE_ENSURE_EQ(context, affine_quantization->scale->size, |
| affine_quantization->zero_point->size); |
| } |
| |
| return CalculateOpData(context, node, params, width, height, filter_width, |
| filter_height, data_type, data); |
| } |
| |
| void EvalFloat(TfLiteContext* context, TfLiteNode* node, |
| TfLiteDepthwiseConvParams* params, const OpData* data, |
| const TfLiteTensor* input, const TfLiteTensor* filter, |
| const TfLiteTensor* bias, TfLiteTensor* output) { |
| float output_activation_min, output_activation_max; |
| CalculateActivationRange(params->activation, &output_activation_min, |
| &output_activation_max); |
| |
| tflite::DepthwiseParams op_params; |
| // Padding type is ignored, but still set. |
| op_params.padding_type = PaddingType::kSame; |
| op_params.padding_values.width = data->padding.width; |
| op_params.padding_values.height = data->padding.height; |
| op_params.stride_width = params->stride_width; |
| op_params.stride_height = params->stride_height; |
| op_params.dilation_width_factor = params->dilation_width_factor; |
| op_params.dilation_height_factor = params->dilation_height_factor; |
| op_params.depth_multiplier = params->depth_multiplier; |
| op_params.float_activation_min = output_activation_min; |
| op_params.float_activation_max = output_activation_max; |
| |
| tflite::reference_ops::DepthwiseConv( |
| op_params, GetTensorShape(input), GetTensorData<float>(input), |
| GetTensorShape(filter), GetTensorData<float>(filter), |
| GetTensorShape(bias), GetTensorData<float>(bias), GetTensorShape(output), |
| GetTensorData<float>(output)); |
| } |
| |
| void EvalQuantizedPerChannel(TfLiteContext* context, TfLiteNode* node, |
| TfLiteDepthwiseConvParams* params, |
| const OpData* data, const TfLiteTensor* input, |
| const TfLiteTensor* filter, |
| const TfLiteTensor* bias, TfLiteTensor* output) { |
| DepthwiseParams op_params; |
| op_params.padding_type = PaddingType::kSame; |
| op_params.padding_values.width = data->padding.width; |
| op_params.padding_values.height = data->padding.height; |
| op_params.stride_width = params->stride_width; |
| op_params.stride_height = params->stride_height; |
| op_params.dilation_width_factor = params->dilation_width_factor; |
| op_params.dilation_height_factor = params->dilation_height_factor; |
| op_params.depth_multiplier = params->depth_multiplier; |
| op_params.input_offset = -input->params.zero_point; |
| op_params.weights_offset = 0; |
| op_params.output_offset = output->params.zero_point; |
| // TODO(b/130439627): Use calculated value for clamping. |
| op_params.quantized_activation_min = std::numeric_limits<int8_t>::min(); |
| op_params.quantized_activation_max = std::numeric_limits<int8_t>::max(); |
| |
| reference_integer_ops::DepthwiseConvPerChannel( |
| op_params, data->per_channel_output_multiplier, |
| data->per_channel_output_shift, GetTensorShape(input), |
| GetTensorData<int8>(input), GetTensorShape(filter), |
| GetTensorData<int8>(filter), GetTensorShape(bias), |
| GetTensorData<int32>(bias), GetTensorShape(output), |
| GetTensorData<int8>(output)); |
| } |
| |
| void EvalQuantized(TfLiteContext* context, TfLiteNode* node, |
| TfLiteDepthwiseConvParams* params, const OpData* data, |
| const TfLiteTensor* input, const TfLiteTensor* filter, |
| const TfLiteTensor* bias, TfLiteTensor* output) { |
| const int32_t input_offset = -input->params.zero_point; |
| const int32_t filter_offset = -filter->params.zero_point; |
| const int32_t output_offset = output->params.zero_point; |
| |
| tflite::DepthwiseParams op_params; |
| // Padding type is ignored, but still set. |
| op_params.padding_type = PaddingType::kSame; |
| op_params.padding_values.width = data->padding.width; |
| op_params.padding_values.height = data->padding.height; |
| op_params.stride_width = params->stride_width; |
| op_params.stride_height = params->stride_height; |
| op_params.dilation_width_factor = params->dilation_width_factor; |
| op_params.dilation_height_factor = params->dilation_height_factor; |
| op_params.depth_multiplier = params->depth_multiplier; |
| op_params.quantized_activation_min = data->output_activation_min; |
| op_params.quantized_activation_max = data->output_activation_max; |
| op_params.input_offset = input_offset; |
| op_params.weights_offset = filter_offset; |
| op_params.output_offset = output_offset; |
| op_params.output_multiplier = data->output_multiplier; |
| // Legacy ops used mixed left and right shifts. Now all are +ve-means-left. |
| op_params.output_shift = -data->output_shift; |
| |
| tflite::reference_ops::DepthwiseConv( |
| op_params, GetTensorShape(input), GetTensorData<uint8_t>(input), |
| GetTensorShape(filter), GetTensorData<uint8_t>(filter), |
| GetTensorShape(bias), GetTensorData<int32_t>(bias), |
| GetTensorShape(output), GetTensorData<uint8_t>(output)); |
| } |
| |
| TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { |
| TFLITE_DCHECK(node->user_data != nullptr); |
| TFLITE_DCHECK(node->builtin_data != nullptr); |
| |
| auto* params = |
| reinterpret_cast<TfLiteDepthwiseConvParams*>(node->builtin_data); |
| const OpData& data = *(static_cast<const OpData*>(node->user_data)); |
| |
| TfLiteTensor* output = GetOutput(context, node, kOutputTensor); |
| const TfLiteTensor* input = GetInput(context, node, kInputTensor); |
| const TfLiteTensor* filter = GetInput(context, node, kFilterTensor); |
| const TfLiteTensor* bias = |
| (NumInputs(node) == 3) ? GetInput(context, node, kBiasTensor) : nullptr; |
| |
| // TODO(aselle): Consider whether float conv and quantized conv should be |
| // separate ops to avoid dispatch overhead here. |
| switch (input->type) { // Already know in/out types are same. |
| case kTfLiteFloat32: |
| EvalFloat(context, node, params, &data, input, filter, bias, output); |
| break; |
| case kTfLiteInt8: |
| EvalQuantizedPerChannel(context, node, params, &data, input, filter, bias, |
| output); |
| break; |
| case kTfLiteUInt8: |
| EvalQuantized(context, node, params, &data, input, filter, bias, output); |
| break; |
| default: |
| TF_LITE_KERNEL_LOG(context, "Type %s (%d) not supported.", |
| TfLiteTypeGetName(input->type), input->type); |
| return kTfLiteError; |
| } |
| return kTfLiteOk; |
| } |
| |
| } // namespace depthwise_conv |
| |
| TfLiteRegistration Register_DEPTHWISE_CONV_2D() { |
| return {/*init=*/depthwise_conv::Init, |
| /*free=*/nullptr, |
| /*prepare=*/depthwise_conv::Prepare, |
| /*invoke=*/depthwise_conv::Eval, |
| /*profiling_string=*/nullptr, |
| /*builtin_code=*/0, |
| /*custom_name=*/nullptr, |
| /*version=*/0}; |
| } |
| |
| } // namespace micro |
| } // namespace ops |
| } // namespace tflite |