| // |
| // Copyright © 2017 Arm Ltd. All rights reserved. |
| // SPDX-License-Identifier: MIT |
| // |
| |
| #include "HalPolicy.hpp" |
| |
| #include "OutputShapeUtils.hpp" |
| |
| #include "../1.0/HalPolicy.hpp" |
| #include "../1.1/HalPolicy.hpp" |
| |
| #include <DataLayoutIndexed.hpp> |
| #include <Half.hpp> |
| |
| #include <cmath> |
| |
| namespace armnn_driver |
| { |
| namespace hal_1_2 |
| { |
| |
| bool HandledByV1_0(V1_2::OperationType operationType) |
| { |
| switch (static_cast<V1_0::OperationType>(operationType)) |
| { |
| case V1_0::OperationType::ADD: |
| case V1_0::OperationType::AVERAGE_POOL_2D: |
| case V1_0::OperationType::CONCATENATION: |
| case V1_0::OperationType::DEPTH_TO_SPACE: |
| case V1_0::OperationType::DEQUANTIZE: |
| case V1_0::OperationType::EMBEDDING_LOOKUP: |
| case V1_0::OperationType::FLOOR: |
| case V1_0::OperationType::FULLY_CONNECTED: |
| case V1_0::OperationType::HASHTABLE_LOOKUP: |
| case V1_0::OperationType::L2_NORMALIZATION: |
| case V1_0::OperationType::L2_POOL_2D: |
| case V1_0::OperationType::LOCAL_RESPONSE_NORMALIZATION: |
| case V1_0::OperationType::LOGISTIC: |
| case V1_0::OperationType::LSH_PROJECTION: |
| case V1_0::OperationType::LSTM: |
| case V1_0::OperationType::MAX_POOL_2D: |
| case V1_0::OperationType::MUL: |
| case V1_0::OperationType::RELU: |
| case V1_0::OperationType::RELU1: |
| case V1_0::OperationType::RELU6: |
| case V1_0::OperationType::RESHAPE: |
| case V1_0::OperationType::RNN: |
| case V1_0::OperationType::SOFTMAX: |
| case V1_0::OperationType::SPACE_TO_DEPTH: |
| case V1_0::OperationType::SVDF: |
| case V1_0::OperationType::TANH: |
| case V1_0::OperationType::OEM_OPERATION: |
| return true; |
| default: |
| return false; |
| } |
| } |
| |
| bool HandledByV1_1(V1_2::OperationType operationType) |
| { |
| if (HandledByV1_0(operationType)) |
| { |
| return true; |
| } |
| switch (static_cast<V1_1::OperationType>(operationType)) |
| { |
| case V1_1::OperationType::BATCH_TO_SPACE_ND: |
| case V1_1::OperationType::DIV: |
| case V1_1::OperationType::MEAN: |
| case V1_1::OperationType::PAD: |
| case V1_1::OperationType::SPACE_TO_BATCH_ND: |
| case V1_1::OperationType::SQUEEZE: |
| case V1_1::OperationType::STRIDED_SLICE: |
| case V1_1::OperationType::SUB: |
| case V1_1::OperationType::TRANSPOSE: |
| return true; |
| default: |
| return false; |
| } |
| } |
| |
| bool HandledByV1_0(const V1_2::Operation& operation) |
| { |
| return HandledByV1_0(operation.type); |
| } |
| |
| bool HandledByV1_1(const V1_2::Operation& operation) |
| { |
| return HandledByV1_1(operation.type); |
| } |
| |
| V1_0::OperationType CastToV1_0(V1_2::OperationType type) |
| { |
| return static_cast<V1_0::OperationType>(type); |
| } |
| |
| V1_1::OperationType CastToV1_1(V1_2::OperationType type) |
| { |
| return static_cast<V1_1::OperationType>(type); |
| } |
| |
| V1_0::Operation ConvertToV1_0(const V1_2::Operation& operation) |
| { |
| V1_0::Operation op; |
| op.type = CastToV1_0(operation.type); |
| op.inputs = operation.inputs; |
| op.outputs = operation.outputs; |
| return op; |
| } |
| |
| V1_1::Operation ConvertToV1_1(const V1_2::Operation& operation) |
| { |
| V1_1::Operation op; |
| op.type = CastToV1_1(operation.type); |
| op.inputs = operation.inputs; |
| op.outputs = operation.outputs; |
| return op; |
| } |
| |
| bool HalPolicy::ConvertOperation(const Operation& operation, const Model& model, ConversionData& data) |
| { |
| if (HandledByV1_0(operation) && compliantWithV1_0(model)) |
| { |
| hal_1_0::HalPolicy::Operation v10Operation = ConvertToV1_0(operation); |
| hal_1_0::HalPolicy::Model v10Model = convertToV1_0(model); |
| |
| return hal_1_0::HalPolicy::ConvertOperation(v10Operation, v10Model, data); |
| } |
| |
| if (HandledByV1_1(operation) && compliantWithV1_1(model)) |
| { |
| hal_1_1::HalPolicy::Operation v11Operation = ConvertToV1_1(operation); |
| hal_1_1::HalPolicy::Model v11Model = convertToV1_1(model); |
| |
| return hal_1_1::HalPolicy::ConvertOperation(v11Operation, v11Model, data); |
| } |
| |
| switch (operation.type) |
| { |
| case V1_2::OperationType::CONV_2D: |
| return ConvertConv2d(operation, model, data); |
| case V1_2::OperationType::DEPTHWISE_CONV_2D: |
| return ConvertDepthwiseConv2d(operation, model, data); |
| case V1_2::OperationType::MAXIMUM: |
| return ConvertMaximum(operation, model, data); |
| case V1_2::OperationType::PAD_V2: |
| return ConvertPadV2(operation, model, data); |
| case V1_2::OperationType::PRELU: |
| return ConvertPrelu(operation, model, data); |
| case V1_2::OperationType::RESIZE_BILINEAR: |
| return ConvertResize(operation, model, data, armnn::ResizeMethod::Bilinear); |
| case V1_2::OperationType::RESIZE_NEAREST_NEIGHBOR: |
| return ConvertResize(operation, model, data, armnn::ResizeMethod::NearestNeighbor); |
| default: |
| return Fail("%s: Operation type %s not supported in ArmnnDriver", |
| __func__, toString(operation.type).c_str()); |
| } |
| } |
| |
| bool HalPolicy::ConvertConv2d(const Operation& operation, const Model& model, ConversionData& data) |
| { |
| LayerInputHandle input = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data); |
| if (!input.IsValid()) |
| { |
| return Fail("%s: Operation has invalid inputs", __func__); |
| } |
| |
| const Operand* output = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model); |
| if (!output) |
| { |
| return Fail("%s: Could not read output 0", __func__); |
| } |
| |
| const armnn::TensorInfo& inputInfo = input.GetTensorInfo(); |
| armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*output); |
| |
| armnn::Convolution2dDescriptor desc; |
| desc.m_DataLayout = armnn::DataLayout::NHWC; |
| |
| // Determine whether padding is implicit or explicit |
| bool implicitPadding = operation.inputs.size() == 7 || |
| (operation.inputs.size() >= 8 && |
| GetInputOperand<hal_1_2::HalPolicy>(operation, 7, model)->type == OperandType::BOOL); |
| |
| if (implicitPadding) |
| { |
| desc.m_DataLayout = OptionalDataLayout<hal_1_2::HalPolicy>(operation, 7, model, data); |
| } |
| else if (operation.inputs.size() >= 10) |
| { |
| desc.m_DataLayout = OptionalDataLayout<hal_1_2::HalPolicy>(operation, 10, model, data); |
| } |
| |
| const armnn::PermutationVector OHWIToOIHW = {0, 2, 3, 1}; |
| |
| // ArmNN does not currently support non-fixed weights or bias |
| // The NNAPI filter is always OHWI [depth_out, filter_height, filter_width, depth_in] but ArmNN expects the |
| // filter's height and width indices to match the input's height and width indices so we permute it to OIHW if |
| // the DataLayout is NCHW |
| const ConstTensorPin weightsPin = (desc.m_DataLayout == armnn::DataLayout::NCHW) ? |
| ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 1, model, data, OHWIToOIHW) : |
| ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 1, model, data); |
| const ConstTensorPin biasPin = |
| ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 2, model, data); |
| |
| if (!weightsPin.IsValid()) |
| { |
| return Fail("%s: Operation has invalid weights", __func__); |
| } |
| |
| if (!biasPin.IsValid()) |
| { |
| return Fail("%s: Operation has invalid biases", __func__); |
| } |
| |
| armnn::ConstTensor weights = weightsPin.GetConstTensor(); |
| armnn::ConstTensor bias = biasPin.GetConstTensor(); |
| SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), inputInfo); |
| |
| ActivationFn activation; |
| |
| if (implicitPadding) |
| { |
| android::nn::PaddingScheme paddingScheme; |
| if (!GetInputPaddingScheme<hal_1_2::HalPolicy>(operation, 3, paddingScheme, model, data) || |
| !GetInputScalar<hal_1_2::HalPolicy>(operation, 4, OperandType::INT32, desc.m_StrideX, model, data) || |
| !GetInputScalar<hal_1_2::HalPolicy>(operation, 5, OperandType::INT32, desc.m_StrideY, model, data) || |
| !GetInputActivationFunction<hal_1_2::HalPolicy>(operation, 6, activation, model, data) || |
| !GetOptionalConvolutionDilationParams<hal_1_2::HalPolicy>(operation, 8, desc, model, data)) |
| { |
| return Fail("%s: Operation has invalid inputs (implicit padding)", __func__); |
| } |
| |
| armnnUtils::DataLayoutIndexed dataLayoutIndexed(desc.m_DataLayout); |
| unsigned int widthIndex = dataLayoutIndexed.GetWidthIndex(); |
| unsigned int heightIndex = dataLayoutIndexed.GetHeightIndex(); |
| const uint32_t kernelX = weights.GetShape()[widthIndex]; |
| const uint32_t kernelY = weights.GetShape()[heightIndex]; |
| const uint32_t inputX = inputInfo.GetShape()[widthIndex]; |
| const uint32_t inputY = inputInfo.GetShape()[heightIndex]; |
| |
| CalcPadding(inputX, kernelX, desc.m_StrideX, desc.m_DilationX, desc.m_PadLeft, desc.m_PadRight, paddingScheme); |
| CalcPadding(inputY, kernelY, desc.m_StrideY, desc.m_DilationY, desc.m_PadTop, desc.m_PadBottom, paddingScheme); |
| |
| } |
| else if (operation.inputs.size() >= 10) |
| { |
| // explicit padding |
| if (!GetInputScalar<hal_1_2::HalPolicy>(operation, 3, OperandType::INT32, desc.m_PadLeft, model, data) || |
| !GetInputScalar<hal_1_2::HalPolicy>(operation, 4, OperandType::INT32, desc.m_PadRight, model, data) || |
| !GetInputScalar<hal_1_2::HalPolicy>(operation, 5, OperandType::INT32, desc.m_PadTop, model, data) || |
| !GetInputScalar<hal_1_2::HalPolicy>(operation, 6, OperandType::INT32, desc.m_PadBottom, model, data) || |
| !GetInputScalar<hal_1_2::HalPolicy>(operation, 7, OperandType::INT32, desc.m_StrideX, model, data) || |
| !GetInputScalar<hal_1_2::HalPolicy>(operation, 8, OperandType::INT32, desc.m_StrideY, model, data) || |
| !GetInputActivationFunction<hal_1_2::HalPolicy>(operation, 9, activation, model, data) || |
| !GetOptionalConvolutionDilationParams<hal_1_2::HalPolicy>(operation, 11, desc, model, data)) |
| { |
| return Fail("%s: Operation has invalid inputs (explicit padding)", __func__); |
| } |
| } |
| else |
| { |
| return Fail("%s: Unsupported number of operation inputs", __func__); |
| } |
| |
| desc.m_BiasEnabled = true; |
| armnn::Optional<armnn::TensorInfo> biases(bias.GetInfo()); |
| |
| if (IsDynamicOutput(outputInfo)) |
| { |
| try |
| { |
| ALOGD("Output shape not set, will infer from inputs"); |
| outputInfo.SetShape(InferConvolution2dOutputShape(inputInfo.GetShape(), |
| weights.GetInfo().GetShape(), |
| desc)); |
| } |
| catch (armnn::Exception& e) |
| { |
| return Fail("%s: Could not infer dynamic output shape: %s", __func__, e.what()); |
| } |
| } |
| |
| bool isSupported = false; |
| FORWARD_LAYER_SUPPORT_FUNC(__func__, |
| IsConvolution2dSupported, |
| data.m_Backends, |
| isSupported, |
| inputInfo, |
| outputInfo, |
| desc, |
| weights.GetInfo(), |
| biases); |
| |
| if (!isSupported) |
| { |
| return false; |
| } |
| |
| armnn::IConnectableLayer* startLayer = |
| data.m_Network->AddConvolution2dLayer(desc, weights, armnn::Optional<armnn::ConstTensor>(bias)); |
| |
| if (!startLayer) |
| { |
| return Fail("%s: AddConvolution2dLayer failed", __func__); |
| } |
| |
| armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activation, startLayer, data); |
| |
| if (!endLayer) |
| { |
| return Fail("%s: ProcessActivation failed", __func__); |
| } |
| |
| input.Connect(startLayer->GetInputSlot(0)); |
| |
| return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, |
| 0, |
| *endLayer, |
| model, |
| data, |
| armnn::Optional<armnn::TensorInfo>(outputInfo)); |
| } |
| |
| bool HalPolicy::ConvertDepthwiseConv2d(const Operation& operation, const Model& model, ConversionData& data) |
| { |
| LayerInputHandle input = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data); |
| |
| if (!input.IsValid()) |
| { |
| return Fail("%s: Operation has invalid inputs", __func__); |
| } |
| |
| const Operand* output = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model); |
| |
| if (!output) |
| { |
| return Fail("%s: Could not read output 0", __func__); |
| } |
| |
| const armnn::TensorInfo& inputInfo = input.GetTensorInfo(); |
| const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output); |
| |
| // ArmNN does not currently support non-fixed weights or bias |
| // Find the shape of the weights tensor. In AndroidNN this will be [ 1, H, W, I * M ] |
| const Operand* weightsOperand = GetInputOperand<hal_1_2::HalPolicy>(operation, 1, model); |
| |
| if (weightsOperand == nullptr) |
| { |
| return Fail("%s: Operand is invalid", __func__); |
| } |
| armnn::DepthwiseConvolution2dDescriptor desc; |
| desc.m_DataLayout = armnn::DataLayout::NHWC; |
| |
| // Determine whether padding is implicit or explicit |
| bool implicitPadding = operation.inputs.size() == 8 || |
| (operation.inputs.size() >= 9 && |
| GetInputOperand<hal_1_2::HalPolicy>(operation, 8, model)->type == OperandType::BOOL); |
| |
| // Look ahead to find the optional DataLayout, if present |
| const uint32_t dataLayoutFlagIndex = implicitPadding ? 8 : 11; |
| desc.m_DataLayout = OptionalDataLayout<hal_1_2::HalPolicy>(operation, dataLayoutFlagIndex, model, data); |
| |
| armnnUtils::DataLayoutIndexed dataLayoutIndexed(desc.m_DataLayout); |
| unsigned int channelsIndex = dataLayoutIndexed.GetChannelsIndex(); |
| unsigned int widthIndex = dataLayoutIndexed.GetWidthIndex(); |
| unsigned int heightIndex = dataLayoutIndexed.GetHeightIndex(); |
| |
| // Reinterpret weight data as [ H, W, I, M ] |
| armnn::TensorShape weightsShape({ weightsOperand->dimensions[1], |
| weightsOperand->dimensions[2], |
| inputInfo.GetShape()[channelsIndex], |
| weightsOperand->dimensions[3] / inputInfo.GetShape()[channelsIndex] }); |
| |
| // Swizzle weight data [ H, W, I, M ] -> [ M, I, H, W ] |
| const armnn::PermutationVector HWIMToMIHW = { 2U, 3U, 1U, 0U }; |
| |
| const ConstTensorPin weightsPin = |
| ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, |
| 1, |
| model, |
| data, |
| HWIMToMIHW, |
| &weightsShape); |
| |
| // Bias is a 1D tensor |
| const ConstTensorPin biasPin = |
| ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 2, model, data); |
| |
| if (!weightsPin.IsValid()) |
| { |
| return Fail("%s: Operation has invalid weights", __func__); |
| } |
| |
| if (!biasPin.IsValid()) |
| { |
| return Fail("%s: Operation has invalid biases", __func__); |
| } |
| |
| armnn::ConstTensor weights = weightsPin.GetConstTensor(); |
| armnn::ConstTensor bias = biasPin.GetConstTensor(); |
| SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), inputInfo); |
| |
| ActivationFn activation; |
| |
| if (implicitPadding) |
| { |
| android::nn::PaddingScheme paddingScheme; |
| if (!GetInputPaddingScheme<hal_1_2::HalPolicy>(operation, 3, paddingScheme, model, data) || |
| !GetInputScalar<hal_1_2::HalPolicy>(operation, 4, OperandType::INT32, desc.m_StrideX, model, data) || |
| !GetInputScalar<hal_1_2::HalPolicy>(operation, 5, OperandType::INT32, desc.m_StrideY, model, data) || |
| !GetInputActivationFunction<hal_1_2::HalPolicy>(operation, 7, activation, model, data) || |
| !GetOptionalConvolutionDilationParams<hal_1_2::HalPolicy>(operation, 9, desc, model, data)) |
| { |
| return Fail("%s: Operation has invalid inputs (implicit padding)", __func__); |
| } |
| |
| const uint32_t kernelX = weights.GetShape()[3]; |
| const uint32_t kernelY = weights.GetShape()[2]; |
| const uint32_t inputX = inputInfo.GetShape()[widthIndex]; |
| const uint32_t inputY = inputInfo.GetShape()[heightIndex]; |
| |
| CalcPadding(inputX, kernelX, desc.m_StrideX, desc.m_DilationX, desc.m_PadLeft, desc.m_PadRight, paddingScheme); |
| CalcPadding(inputY, kernelY, desc.m_StrideY, desc.m_DilationY, desc.m_PadTop, desc.m_PadBottom, paddingScheme); |
| } |
| else if (operation.inputs.size() >= 11) |
| { |
| // explicit padding |
| if (!GetInputScalar<hal_1_2::HalPolicy>(operation, 3, OperandType::INT32, desc.m_PadLeft, model, data) || |
| !GetInputScalar<hal_1_2::HalPolicy>(operation, 4, OperandType::INT32, desc.m_PadRight, model, data) || |
| !GetInputScalar<hal_1_2::HalPolicy>(operation, 5, OperandType::INT32, desc.m_PadTop, model, data) || |
| !GetInputScalar<hal_1_2::HalPolicy>(operation, 6, OperandType::INT32, desc.m_PadBottom, model, data) || |
| !GetInputScalar<hal_1_2::HalPolicy>(operation, 7, OperandType::INT32, desc.m_StrideX, model, data) || |
| !GetInputScalar<hal_1_2::HalPolicy>(operation, 8, OperandType::INT32, desc.m_StrideY, model, data) || |
| !GetInputActivationFunction<hal_1_2::HalPolicy>(operation, 10, activation, model, data) || |
| !GetOptionalConvolutionDilationParams<hal_1_2::HalPolicy>(operation, 12, desc, model, data)) |
| { |
| return Fail("%s: Operation has invalid inputs (explicit padding)", __func__); |
| } |
| } |
| else |
| { |
| return Fail("%s: Unsupported number of operation inputs", __func__); |
| } |
| |
| desc.m_BiasEnabled = true; |
| armnn::Optional<armnn::TensorInfo> biases(bias.GetInfo()); |
| |
| bool isSupported = false; |
| FORWARD_LAYER_SUPPORT_FUNC(__func__, |
| IsDepthwiseConvolutionSupported, |
| data.m_Backends, |
| isSupported, |
| inputInfo, |
| outputInfo, |
| desc, |
| weights.GetInfo(), |
| biases); |
| if (!isSupported) |
| { |
| return false; |
| } |
| |
| armnn::IConnectableLayer* startLayer = |
| data.m_Network->AddDepthwiseConvolution2dLayer(desc, weights, armnn::Optional<armnn::ConstTensor>(bias)); |
| if (!startLayer) |
| { |
| return Fail("%s: AddDepthwiseConvolution2dLayer failed", __func__); |
| } |
| |
| armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activation, startLayer, data); |
| if (!endLayer) |
| { |
| return Fail("%s: ProcessActivation failed", __func__); |
| } |
| |
| input.Connect(startLayer->GetInputSlot(0)); |
| |
| return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 0, *endLayer, model, data); |
| } |
| |
| bool HalPolicy::ConvertMaximum(const Operation& operation, const Model& model, ConversionData& data) |
| { |
| LayerInputHandle input0 = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data); |
| LayerInputHandle input1 = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 1, model, data); |
| |
| if (!input0.IsValid() || !input1.IsValid()) |
| { |
| return Fail("%s: Operation has invalid inputs", __func__); |
| } |
| |
| const Operand* outputOperand = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model); |
| if (!outputOperand) |
| { |
| return Fail("%s: Could not read output", __func__); |
| } |
| |
| armnn::TensorInfo outInfo = GetTensorInfoForOperand(*outputOperand); |
| if (IsDynamicOutput(outInfo)) |
| { |
| ALOGD("Output shape not set, will infer from inputs"); |
| outInfo.SetShape(InferMaximumOutputShape(input0.GetTensorInfo().GetShape(), input1.GetTensorInfo().GetShape())); |
| } |
| |
| bool isSupported = false; |
| FORWARD_LAYER_SUPPORT_FUNC(__func__, |
| IsMaximumSupported, |
| data.m_Backends, |
| isSupported, |
| input0.GetTensorInfo(), |
| input1.GetTensorInfo(), |
| outInfo); |
| |
| if (!isSupported) |
| { |
| return false; |
| } |
| |
| armnn::IConnectableLayer* layer = data.m_Network->AddMaximumLayer(); |
| assert(layer != nullptr); |
| BroadcastTensor(input0, input1, layer, *data.m_Network); |
| |
| return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, |
| 0, |
| *layer, |
| model, |
| data, |
| armnn::Optional<armnn::TensorInfo>(outInfo)); |
| } |
| |
| bool HalPolicy::ConvertPadV2(const Operation& operation, const Model& model, ConversionData& data) |
| { |
| LayerInputHandle input = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data); |
| if (!input.IsValid()) |
| { |
| return Fail("%s: Could not read input 0", __func__); |
| } |
| |
| const Operand* output = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model); |
| if (!output) |
| { |
| return Fail("%s: Could not read output", __func__); |
| } |
| |
| const armnn::TensorInfo& inputInfo = input.GetTensorInfo(); |
| unsigned int rank = inputInfo.GetNumDimensions(); |
| |
| armnn::PadDescriptor descriptor; |
| if (!ConvertPaddings<hal_1_2::HalPolicy>(operation, model, data, rank, descriptor)) |
| { |
| return Fail("%s: Could not convert paddings", __func__); |
| } |
| |
| armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*output); |
| if (IsDynamicOutput(outputInfo)) |
| { |
| ALOGD("Output shape not set, will infer from inputs"); |
| outputInfo.SetShape(InferPadOutputShape(inputInfo.GetShape(), descriptor.m_PadList)); |
| } |
| |
| // Determine type of padding value |
| OperandType operandType0; |
| OperandType operandType2; |
| |
| if (!GetOperandType<hal_1_2::HalPolicy>(operation, 0, model, operandType0) || |
| !GetOperandType<hal_1_2::HalPolicy>(operation, 2, model, operandType2)) |
| { |
| return Fail("%s: Operation has invalid inputs", __func__); |
| } |
| |
| // Read value to use for padding |
| if (operandType0 == OperandType::TENSOR_FLOAT16 && operandType2 == OperandType::FLOAT16) |
| { |
| armnn::Half f16PadValue; |
| if (!GetInputScalar<hal_1_2::HalPolicy>(operation, 2, operandType2, f16PadValue, model, data)) |
| { |
| return Fail("%s: Could not read input 2 (FLOAT16)", __func__); |
| } |
| |
| descriptor.m_PadValue = f16PadValue; |
| } |
| else if (operandType0 == OperandType::TENSOR_FLOAT32 && operandType2 == OperandType::FLOAT32) |
| { |
| if (!GetInputFloat32<hal_1_2::HalPolicy>(operation, 2, descriptor.m_PadValue, model, data)) |
| { |
| return Fail("%s: Could not read input 2 (FLOAT32)", __func__); |
| } |
| } |
| else if (operandType0 == OperandType::TENSOR_QUANT8_ASYMM && operandType2 == OperandType::INT32) |
| { |
| int32_t quantizedPadValue = 0; |
| if (!GetInputInt32<hal_1_2::HalPolicy>(operation, 2, quantizedPadValue, model, data)) |
| { |
| return Fail("%s: Could not read input 2 (INT32)", __func__); |
| } |
| |
| descriptor.m_PadValue = armnn::Dequantize(quantizedPadValue, |
| inputInfo.GetQuantizationScale(), |
| inputInfo.GetQuantizationOffset()); |
| } |
| else |
| { |
| return Fail("%s: Operation has invalid inputs: type mismatch", __func__); |
| } |
| |
| bool isSupported = false; |
| FORWARD_LAYER_SUPPORT_FUNC(__func__, |
| IsPadSupported, |
| data.m_Backends, |
| isSupported, |
| inputInfo, |
| outputInfo, |
| descriptor); |
| if (!isSupported) |
| { |
| return false; |
| } |
| |
| armnn::IConnectableLayer* const layer = data.m_Network->AddPadLayer(descriptor); |
| assert(layer != nullptr); |
| input.Connect(layer->GetInputSlot(0)); |
| layer->GetOutputSlot(0).SetTensorInfo(outputInfo); |
| |
| return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, |
| 0, |
| *layer, |
| model, |
| data, |
| armnn::Optional<armnn::TensorInfo>(outputInfo)); |
| } |
| |
| bool HalPolicy::ConvertPrelu(const Operation& operation, const Model& model, ConversionData& data) |
| { |
| LayerInputHandle input = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data); |
| LayerInputHandle alpha = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 1, model, data); |
| |
| if (!input.IsValid() || !alpha.IsValid()) |
| { |
| return Fail("%s: Operation has invalid inputs", __func__); |
| } |
| |
| const Operand* output = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model); |
| |
| if (!output) |
| { |
| return Fail("%s: Could not read output", __func__); |
| } |
| |
| const armnn::TensorInfo& inputInfo = input.GetTensorInfo(); |
| const armnn::TensorInfo& alphaInfo = alpha.GetTensorInfo(); |
| |
| armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*output); |
| if (IsDynamicOutput(outputInfo)) |
| { |
| ALOGD("Output shape not set, will infer from inputs"); |
| outputInfo.SetShape(InferPreluOutputShape(inputInfo.GetShape(), alphaInfo.GetShape())); |
| } |
| |
| bool isSupported = false; |
| FORWARD_LAYER_SUPPORT_FUNC(__func__, |
| IsPreluSupported, |
| data.m_Backends, |
| isSupported, |
| inputInfo, |
| alphaInfo, |
| outputInfo); |
| if (!isSupported) |
| { |
| return false; |
| } |
| |
| armnn::IConnectableLayer* const layer = data.m_Network->AddPreluLayer(); |
| |
| if (!layer) |
| { |
| return Fail("%s: AddPreluLayer failed", __func__); |
| } |
| |
| BroadcastTensor(input, alpha, layer, *data.m_Network); |
| |
| return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, |
| 0, |
| *layer, |
| model, |
| data, |
| armnn::Optional<armnn::TensorInfo>(outputInfo)); |
| } |
| |
| bool HalPolicy::ConvertResize(const Operation& operation, |
| const Model& model, |
| ConversionData& data, |
| armnn::ResizeMethod resizeMethod) |
| { |
| LayerInputHandle input = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data); |
| if (!input.IsValid()) |
| { |
| return Fail("%s: Could not read input 0", __func__); |
| } |
| |
| const Operand* output = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model); |
| if (!output) |
| { |
| return Fail("%s: Could not read output 0", __func__); |
| } |
| |
| const armnn::TensorInfo& inputInfo = input.GetTensorInfo(); |
| const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output); |
| |
| armnn::ResizeDescriptor descriptor; |
| descriptor.m_Method = resizeMethod; |
| descriptor.m_DataLayout = OptionalDataLayout<hal_1_2::HalPolicy>(operation, 3, model, data); |
| |
| OperandType operandType1; |
| OperandType operandType2; |
| |
| if (!GetOperandType<hal_1_2::HalPolicy>(operation, 1, model, operandType1) || |
| !GetOperandType<hal_1_2::HalPolicy>(operation, 2, model, operandType2)) |
| { |
| return Fail("%s: Operation has invalid inputs", __func__); |
| } |
| |
| if (operandType1 != operandType2) |
| { |
| return Fail("%s: Operation has invalid inputs. Type of input 1 and 2 should be the same", __func__); |
| } |
| |
| if (operandType1 == OperandType::INT32) |
| { |
| // Case 1: resizing by shape |
| int32_t targetWidth = 0; |
| int32_t targetHeight = 0; |
| |
| if (!GetInputInt32<hal_1_2::HalPolicy>(operation, 1, targetWidth, model, data) || |
| !GetInputInt32<hal_1_2::HalPolicy>(operation, 2, targetHeight, model, data)) |
| { |
| return Fail("%s: Operation has invalid inputs for resizing by shape", __func__); |
| } |
| |
| if (targetWidth < 0 || targetHeight < 0) |
| { |
| return Fail("%s: Operation has invalid inputs for resizing by shape. " |
| "Target width/height cannot be < 0", __func__); |
| } |
| |
| descriptor.m_TargetWidth = static_cast<uint32_t>(targetWidth); |
| descriptor.m_TargetWidth = static_cast<uint32_t>(targetHeight); |
| } |
| else if (operandType1 == OperandType::FLOAT32) |
| { |
| // Case 2: resizing by scale |
| float widthScale = 1.0f; |
| float heightScale = 1.0f; |
| |
| if (!GetInputFloat32<hal_1_2::HalPolicy>(operation, 1, widthScale, model, data) || |
| !GetInputFloat32<hal_1_2::HalPolicy>(operation, 2, heightScale, model, data)) |
| { |
| return Fail("%s: Operation has invalid inputs for resizing by scale", __func__); |
| } |
| |
| const armnn::TensorShape& inputShape = inputInfo.GetShape(); |
| armnnUtils::DataLayoutIndexed dataLayoutIndexed(descriptor.m_DataLayout); |
| |
| float width = inputShape[dataLayoutIndexed.GetWidthIndex()]; |
| float height = inputShape[dataLayoutIndexed.GetHeightIndex()]; |
| |
| descriptor.m_TargetWidth = std::floor(width * widthScale); |
| descriptor.m_TargetHeight = std::floor(height * heightScale); |
| } |
| else |
| { |
| // NOTE: FLOAT16 scales are not supported |
| return false; |
| } |
| |
| bool isSupported = false; |
| FORWARD_LAYER_SUPPORT_FUNC(__func__, |
| IsResizeSupported, |
| data.m_Backends, |
| isSupported, |
| inputInfo, |
| outputInfo, |
| descriptor); |
| if (!isSupported) |
| { |
| return false; |
| } |
| |
| armnn::IConnectableLayer* layer = data.m_Network->AddResizeLayer(descriptor); |
| |
| assert(layer != nullptr); |
| |
| layer->GetOutputSlot(0).SetTensorInfo(outputInfo); |
| input.Connect(layer->GetInputSlot(0)); |
| |
| return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 0, *layer, model, data); |
| } |
| |
| bool HalPolicy::ConvertSpaceToDepth(const Operation& operation, const Model& model, ConversionData& data) |
| { |
| LayerInputHandle input = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data); |
| |
| if (!input.IsValid() ) |
| { |
| return Fail("%s: Operation has invalid inputs", __func__); |
| } |
| |
| const armnn::TensorInfo& inputInfo = input.GetTensorInfo(); |
| unsigned int rank = inputInfo.GetNumDimensions(); |
| |
| if (rank != 4) |
| { |
| return Fail("%s: Only inputs with rank 4 are supported", __func__); |
| } |
| |
| armnn::SpaceToDepthDescriptor desc; |
| |
| GetInputScalar<hal_1_2::HalPolicy>(operation, 1, OperandType::INT32, desc.m_BlockSize, model, data); |
| |
| if (desc.m_BlockSize <= 1) |
| { |
| return Fail("%s: Block size must be at least 1 in all dimensions"); |
| } |
| |
| desc.m_DataLayout = OptionalDataLayout<hal_1_2::HalPolicy>(operation, 2, model, data); |
| |
| const Operand* output = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model); |
| if (!output) |
| { |
| return Fail("%s: Could not read output 0", __func__); |
| } |
| |
| const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output); |
| |
| bool isSupported = false; |
| FORWARD_LAYER_SUPPORT_FUNC(__func__, |
| IsSpaceToDepthSupported, |
| data.m_Backends, |
| isSupported, |
| inputInfo, |
| outputInfo, |
| desc); |
| if (!isSupported) |
| { |
| return false; |
| } |
| |
| armnn::IConnectableLayer* const layer = data.m_Network->AddSpaceToDepthLayer(desc); |
| assert(layer != nullptr); |
| input.Connect(layer->GetInputSlot(0)); |
| |
| return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 0, *layer, model, data); |
| } |
| |
| } // namespace hal_1_2 |
| } // namespace armnn_driver |