blob: 565f1e37c0d331e805f3f86630ef0aad7c4f97f1 [file] [log] [blame]
//
// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once
#include <armnn/utility/IgnoreUnused.hpp>
#include <tensorflow/lite/builtin_ops.h>
#include <tensorflow/lite/c/builtin_op_data.h>
#include <tensorflow/lite/c/common.h>
#include <tensorflow/lite/minimal_logging.h>
namespace armnnDelegate
{
TfLiteStatus VisitDequantizeOperator(DelegateData& delegateData,
TfLiteContext* tfLiteContext,
TfLiteNode* tfLiteNode,
int nodeIndex,
int32_t tfLiteDequantizeOperatorCode)
{
TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[0]];
if (IsDynamicTensor(tfLiteInputTensor))
{
TF_LITE_MAYBE_KERNEL_LOG(
tfLiteContext,
"TfLiteArmnnDelegate: Dynamic input tensors are not supported in operator #%d node #%d: ",
tfLiteDequantizeOperatorCode, nodeIndex);
return kTfLiteError;
}
const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]];
if (IsDynamicTensor(tfLiteOutputTensor))
{
TF_LITE_MAYBE_KERNEL_LOG(
tfLiteContext,
"TfLiteArmnnDelegate: Dynamic output tensors are not supported in operator #%d node #%d: ",
tfLiteDequantizeOperatorCode, nodeIndex);
return kTfLiteError;
}
const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor);
bool isSupported = false;
auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
{
FORWARD_LAYER_SUPPORT_FUNC(__func__,
tfLiteContext,
IsDequantizeSupported,
delegateData.m_Backends,
isSupported,
inputTensorInfo,
outputTensorInfo);
};
if (!delegateData.m_Network)
{
validateFunc(outputTensorInfo, isSupported);
return isSupported ? kTfLiteOk : kTfLiteError;
}
armnn::IConnectableLayer* dequantizeLayer = delegateData.m_Network->AddDequantizeLayer();
ARMNN_ASSERT(dequantizeLayer != nullptr);
armnn::IOutputSlot& outputSlot = dequantizeLayer->GetOutputSlot(0);
outputSlot.SetTensorInfo(outputTensorInfo);
return Connect(dequantizeLayer, tfLiteNode, delegateData);
}
TfLiteStatus VisitQuantizeOperator(DelegateData& delegateData,
TfLiteContext* tfLiteContext,
TfLiteNode* tfLiteNode,
int nodeIndex,
int32_t tfLiteQuantizeOperatorCode)
{
TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[0]];
if (IsDynamicTensor(tfLiteInputTensor))
{
TF_LITE_MAYBE_KERNEL_LOG(
tfLiteContext,
"TfLiteArmnnDelegate: Dynamic input tensors are not supported in operator #%d node #%d: ",
tfLiteQuantizeOperatorCode, nodeIndex);
return kTfLiteError;
}
const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]];
if (IsDynamicTensor(tfLiteOutputTensor))
{
TF_LITE_MAYBE_KERNEL_LOG(
tfLiteContext,
"TfLiteArmnnDelegate: Dynamic output tensors are not supported in operator #%d node #%d: ",
tfLiteQuantizeOperatorCode, nodeIndex);
return kTfLiteError;
}
// Only affine per-layer quantization is supported.
if (!IsAffineQuantization(tfLiteOutputTensor))
{
TF_LITE_MAYBE_KERNEL_LOG(
tfLiteContext,
"TfLiteArmnnDelegate: Only affine per-layer quantization is supported in operator #%d node #%d: ",
tfLiteQuantizeOperatorCode, nodeIndex);
return kTfLiteError;
}
const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor);
bool isSupported = false;
auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
{
FORWARD_LAYER_SUPPORT_FUNC(__func__,
tfLiteContext,
IsQuantizeSupported,
delegateData.m_Backends,
isSupported,
inputTensorInfo,
outputTensorInfo);
};
if (!delegateData.m_Network)
{
validateFunc(outputTensorInfo, isSupported);
return isSupported ? kTfLiteOk : kTfLiteError;
}
armnn::IConnectableLayer* quantizeLayer = delegateData.m_Network->AddQuantizeLayer();
ARMNN_ASSERT(quantizeLayer != nullptr);
armnn::IOutputSlot& outputSlot = quantizeLayer->GetOutputSlot(0);
outputSlot.SetTensorInfo(outputTensorInfo);
return Connect(quantizeLayer, tfLiteNode, delegateData);
}
} // namespace armnnDelegate