blob: 21633e76f1dcc12a54b2e1dab7a92eb9cd0a6be5 [file] [log] [blame]
 /* * Copyright (C) 2017 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** * @addtogroup NeuralNetworks * @{ */ /** * @file NeuralNetworks.h */ #ifndef ANDROID_FRAMEWORKS_ML_NN_RUNTIME_NEURAL_NETWORKS_H #define ANDROID_FRAMEWORKS_ML_NN_RUNTIME_NEURAL_NETWORKS_H /****************************************************************** * * IMPORTANT NOTICE: * * This file is part of Android's set of stable system headers * exposed by the Android NDK (Native Development Kit). * * Third-party source AND binary code relies on the definitions * here to be FROZEN ON ALL UPCOMING PLATFORM RELEASES. * * - DO NOT MODIFY ENUMS (EXCEPT IF YOU ADD NEW 32-BIT VALUES) * - DO NOT MODIFY CONSTANTS OR FUNCTIONAL MACROS * - DO NOT CHANGE THE SIGNATURE OF FUNCTIONS IN ANY WAY * - DO NOT CHANGE THE LAYOUT OR SIZE OF STRUCTURES */ #include #include #include #include #include __BEGIN_DECLS /** * Operand types. * * The type of an operand in a model. * * Types prefaced with ANEURALNETWORKS_TENSOR_* must be used for tensor data (i.e., tensors * with at least one dimension). Types not prefaced by ANEURALNETWORKS_TENSOR_* represent * scalar values and must have no dimensions. * * Although we define many types, most operators accept just a few * types. Most used are {@link ANEURALNETWORKS_TENSOR_FLOAT32}, * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}, * and {@link ANEURALNETWORKS_INT32}. * * Available since API level 27. */ typedef enum { /** A 32 bit floating point scalar value. */ ANEURALNETWORKS_FLOAT32 = 0, /** A signed 32 bit integer scalar value. */ ANEURALNETWORKS_INT32 = 1, /** An unsigned 32 bit integer scalar value. */ ANEURALNETWORKS_UINT32 = 2, /** A tensor of 32 bit floating point values. */ ANEURALNETWORKS_TENSOR_FLOAT32 = 3, /** A tensor of 32 bit integer values. */ ANEURALNETWORKS_TENSOR_INT32 = 4, /** * A tensor of 8 bit unsigned integers that represent real numbers. * * Attached to this tensor are two numbers that can be used to convert the * 8 bit integer to the real value and vice versa. These two numbers are: * - scale: a 32 bit floating point value greater than zero. * - zeroPoint: a 32 bit integer, in range [0, 255]. * * The formula is: * real_value = (integer_value - zeroPoint) * scale. */ ANEURALNETWORKS_TENSOR_QUANT8_ASYMM = 5, /** * An 8 bit boolean scalar value. * * Values of this operand type are either true or false. A zero value * represents false; any other value represents true. * * Available since API level 29. */ ANEURALNETWORKS_BOOL = 6, /** * A tensor of 16 bit signed integers that represent real numbers. * * Attached to this tensor is a number representing real value scale that is * used to convert the 16 bit number to a real value in the following way: * realValue = integerValue * scale. * * scale is a 32 bit floating point with value greater than zero. * * Available since API level 29. */ ANEURALNETWORKS_TENSOR_QUANT16_SYMM = 7, /** * A tensor of IEEE 754 16 bit floating point values. * * Available since API level 29. */ ANEURALNETWORKS_TENSOR_FLOAT16 = 8, /** * A tensor of 8 bit boolean values. * * Values of this operand type are either true or false. A zero value * represents false; any other value represents true. * * Available since API level 29. */ ANEURALNETWORKS_TENSOR_BOOL8 = 9, /** * An IEEE 754 16 bit floating point scalar value. * * Available since API level 29. */ ANEURALNETWORKS_FLOAT16 = 10, /** * A tensor of 8 bit signed integers that represent real numbers. * * This tensor is associated with additional fields that can * be used to convert the 8 bit signed integer to the real value and vice versa. * These fields are: * - channelDim: a 32 bit unsigned integer indicating channel dimension. * - scales: an array of positive 32 bit floating point values. * The size of the scales array must be equal to dimensions[channelDim]. * * {@link ANeuralNetworksModel_setOperandSymmPerChannelQuantParams} must be used * to set the parameters for an Operand of this type. * * The channel dimension of this tensor must not be unknown (dimensions[channelDim] != 0). * * The formula is: * realValue[..., C, ...] = * integerValue[..., C, ...] * scales[C] * where C is an index in the Channel dimension. * * Available since API level 29. */ ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL = 11, /** * A tensor of 16 bit unsigned integers that represent real numbers. * * Attached to this tensor are two numbers that can be used to convert the * 16 bit integer to the real value and vice versa. These two numbers are: * - scale: a 32 bit floating point value greater than zero. * - zeroPoint: a 32 bit integer, in range [0, 65535]. * * The formula is: * real_value = (integer_value - zeroPoint) * scale. * * Available since API level 29. */ ANEURALNETWORKS_TENSOR_QUANT16_ASYMM = 12, /** * A tensor of 8 bit signed integers that represent real numbers. * * Attached to this tensor is a number representing real value scale that is * used to convert the 8 bit number to a real value in the following way: * realValue = integerValue * scale. * * scale is a 32 bit floating point with value greater than zero. * * Available since API level 29. */ ANEURALNETWORKS_TENSOR_QUANT8_SYMM = 13, /** * A tensor of 8 bit signed integers that represent real numbers. * * Attached to this tensor are two numbers that can be used to convert the * 8 bit integer to the real value and vice versa. These two numbers are: * - scale: a 32 bit floating point value greater than zero. * - zeroPoint: a 32 bit integer, in range [-128, 127]. * * The formula is: * real_value = (integer_value - zeroPoint) * scale. * * Available since API level 30. */ ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED = 14, /** * A reference to a model. * * {@link ANeuralNetworksModel_setOperandValueFromModel} must be used to set * the value for an Operand of this type. * * Available since API level 30. */ ANEURALNETWORKS_MODEL = 15, } OperandCode; /** * Operation types. * * The type of an operation in a model. * * Available since API level 27. */ typedef enum { // Operations below are available since API level 27. /** * Adds two tensors, element-wise. * * Takes two input tensors of identical {@link OperandCode} and compatible * dimensions. The output is the sum of both input tensors, optionally * modified by an activation function. * * Two dimensions are compatible when: * 1. they are equal, or * 2. one of them is 1 * * The size of the output is the maximum size along each dimension of the * input operands. It starts with the trailing dimensions, and works its * way forward. * * Example: * * input1.dimension = {4, 1, 2} * input2.dimension = {5, 4, 3, 1} * output.dimension = {5, 4, 3, 2} * * Since API level 29, generic zero-sized input tensor is supported. Zero * dimension is only compatible with 0 or 1. The size of the output * dimension is zero if either of corresponding input dimension is zero. * * Supported tensor {@link OperandCode}: * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29) * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30) * * {@link ANEURALNETWORKS_TENSOR_INT32} (since API level 30) * * Supported tensor rank: up to 4 * * Inputs: * * 0: A tensor. * * 1: A tensor of the same {@link OperandCode}, and compatible dimensions * as input0. * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor, * the scales and zeroPoint can be different from input0 scale and zeroPoint. * * 2: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the * {@link FuseCode} values. Specifies the activation to * invoke on the result. * For a {@link ANEURALNETWORKS_TENSOR_INT32} tensor, * the {@link FuseCode} must be "NONE". * * Outputs: * * 0: The sum, a tensor of the same {@link OperandCode} as input0. * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor, * the scale and zeroPoint can be different from inputs' scale and zeroPoint. * * Available since API level 27. */ ANEURALNETWORKS_ADD = 0, /** * Performs a 2-D average pooling operation. * * The output dimensions are functions of the filter dimensions, stride, and * padding. * * The values in the output tensor are computed as: * * output[b, i, j, channel] = * sum_{di, dj}( * input[b, strides[1] * i + di, strides[2] * j + dj, channel] * ) / sum(1) * * Supported tensor {@link OperandCode}: * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29) * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30) * * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout. * With the default data layout NHWC, the data is stored in the order of: * [batch, height, width, channels]. Alternatively, the data layout could * be NCHW, the data storage order of: [batch, channels, height, width]. * NCHW is supported since API level 29. * * Both explicit padding and implicit padding are supported. * * Inputs (explicit padding): * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying * the input. * Since API level 29, zero batches is supported for this tensor. * * 1: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on * the left, in the ‘width’ dimension. * * 2: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on * the right, in the ‘width’ dimension. * * 3: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on * the top, in the ‘height’ dimension. * * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on * the bottom, in the ‘height’ dimension. * * 5: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when * walking through input in the ‘width’ dimension. * * 6: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when * walking through input in the ‘height’ dimension. * * 7: An {@link ANEURALNETWORKS_INT32} scalar, specifying the filter * width. * * 8: An {@link ANEURALNETWORKS_INT32} scalar, specifying the filter * height. * * 9: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the * {@link FuseCode} values. Specifies the activation to * invoke on the result. * * 10: An optional {@link ANEURALNETWORKS_BOOL} scalar, default to false. * Set to true to specify NCHW data layout for input0 and output0. * Available since API level 29. * * Inputs (implicit padding): * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying * the input. * Since API level 29, zero batches is supported for this tensor. * * 1: An {@link ANEURALNETWORKS_INT32} scalar, specifying the implicit * padding scheme, has to be one of the * {@link PaddingCode} values. * * 2: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when * walking through input in the ‘width’ dimension. * * 3: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when * walking through input in the ‘height’ dimension. * * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the filter * width. * * 5: An {@link ANEURALNETWORKS_INT32} scalar, specifying the filter * height. * * 6: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the * {@link FuseCode} values. Specifies the activation to * invoke on the result. * * 7: An optional {@link ANEURALNETWORKS_BOOL} scalar, default to false. * Set to true to specify NCHW data layout for input0 and output0. * Available since API level 29. * * Outputs: * * 0: The output 4-D tensor, of shape * [batches, out_height, out_width, depth]. * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor, * the scale and zeroPoint must be the same as input0. * * Available since API level 27. */ ANEURALNETWORKS_AVERAGE_POOL_2D = 1, /** * Concatenates the input tensors along the given dimension. * * The input tensors must have identical {@link OperandCode} and the same * dimensions except the dimension along the concatenation axis. * * Supported tensor {@link OperandCode}: * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29) * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} * (full support since API level 29, see the input section) * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30) * * Supported tensor rank: up to 4 * * Inputs: * * 0 ~ n-1: The list of n input tensors, of shape * [D0, D1, ..., Daxis(i), ..., Dm]. * Before API level 29, all input tensors of * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} * must have the same scale and zeroPoint as the output tensor. * Input tensors of * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} * are allowed to have different scale and zeroPoint. * Since API level 29, zero-sized tensors are supported. * * n: An {@link ANEURALNETWORKS_INT32} scalar, specifying the * concatenation axis. * * Outputs: * * 0: The output, a tensor of the same {@link OperandCode} as the input * tensors. The output shape is [D0, D1, ..., sum(Daxis(i)), ..., Dm]. * Since API level 29, for a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} tensor, * the scale and zeroPoint values can be different from * input tensors. Before API level 29 they have to be the same as for the input tensors. * * Available since API level 27. */ ANEURALNETWORKS_CONCATENATION = 2, /** * Performs a 2-D convolution operation. * * The CONV_2D op sweeps a 2-D filter that can mix channels together over a * batch of images, applying the filter to each window of each image of the * appropriate size. * * The output dimensions are functions of the filter dimensions, stride, and * padding. * * The values in the output tensor are computed as: * * output[b, i, j, channel] = * sum_{di, dj, k} ( * input[b, strides[1] * i + di, strides[2] * j + dj, k] * * filter[channel, di, dj, k] * ) + bias[channel] * * Supported tensor {@link OperandCode} configurations: * * 32 bit floating point: * * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} for input, filter, output, and bias. * * * Quantized: * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} for input, filter, and output. * * * {@link ANEURALNETWORKS_TENSOR_INT32} for bias (with scale set to * * * input.scale * filter.scale). * * Available since API level 29: * * 16 bit floating point: * * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} for input, filter, output, and bias. * * * Quantized with symmetric per channel quantization for the filter: * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} for input, and output. * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL} for filter. * * * {@link ANEURALNETWORKS_TENSOR_INT32} for bias (scale set to 0.0, * * * each value scaling is separate and equal to input.scale * filter.scales[channel]). * * Available since API level 30: * * Quantized signed (since API level 30): * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} for input, filter, and output. * * * {@link ANEURALNETWORKS_TENSOR_INT32} for bias (with scale set to * * * input.scale * filter.scale). * * * Quantized signed with filter symmetric per channel quantization (since API level 30): * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} for input, and output. * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL} for filter. * * * {@link ANEURALNETWORKS_TENSOR_INT32} for bias (scale set to 0.0, * * * each value scaling is separate and equal to input.scale * filter.scales[channel]). * * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout. * With the default data layout NHWC, the data is stored in the order of: * [batch, height, width, channels]. Alternatively, the data layout could * be NCHW, the data storage order of: [batch, channels, height, width]. * NCHW is supported since API level 29. * * Both explicit padding and implicit padding are supported. * * Inputs (explicit padding): * * 0: A 4-D tensor, of shape [batches, height, width, depth_in], * specifying the input. * Since API level 29, zero batches is supported for this tensor. * * 1: A 4-D tensor, of shape * [depth_out, filter_height, filter_width, depth_in], specifying the * filter. * For tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL} * the channel dimension (ANeuralNetworksSymmPerChannelQuantParams::channelDim) * must be set to 0. * * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input * tensor of type {@link ANEURALNETWORKS_TENSOR_FLOAT32} * or {@link ANEURALNETWORKS_TENSOR_FLOAT16} the bias must be of the same type. * For filter tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} * and {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED}, * the bias should be of {@link ANEURALNETWORKS_TENSOR_INT32}, with zeroPoint * of 0 and bias_scale == input_scale * filter_scale. * For filter tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL}, * the bias should be of {@link ANEURALNETWORKS_TENSOR_INT32}, with zeroPoint of 0 * and bias_scale of 0. The actual scale of each value 'i' is equal to * bias_scale[i] = input_scale * filter_scale[i]. * * 3: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on * the left, in the ‘width’ dimension. * * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on * the right, in the ‘width’ dimension. * * 5: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on * the top, in the ‘height’ dimension. * * 6: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on * the bottom, in the ‘height’ dimension. * * 7: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when * walking through input in the ‘width’ dimension. * * 8: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when * walking through input in the ‘height’ dimension. * * 9: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the * {@link FuseCode} values. Specifies the activation to * invoke on the result. * * 10: An optional {@link ANEURALNETWORKS_BOOL} scalar, default to false. * Set to true to specify NCHW data layout for input0 and output0. * Available since API level 29. * * 11: An optional {@link ANEURALNETWORKS_INT32} scalar, specifying the dilation * factor for width. Defaults to 1. If set to k > 1, there will be k-1 skipped * cells between each filter element on width dimension. If this input is set, * input 12 (dilation factor for height) must be specified as well. * Available since API level 29. * * 12: An optional {@link ANEURALNETWORKS_INT32} scalar, specifying the dilation * factor for height. Defaults to 1. If set to k > 1, there will be k-1 skipped * cells between each filter element on height dimension. If this input is set, * input 11 (dilation factor for width) must be specified as well. * Available since API level 29. * * Inputs (implicit padding): * * 0: A 4-D tensor, of shape [batches, height, width, depth_in], * specifying the input. * Since API level 29, zero batches is supported for this tensor. * * 1: A 4-D tensor, of shape * [depth_out, filter_height, filter_width, depth_in], specifying the * filter. * For tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL} * the channel dimension (ANeuralNetworksSymmPerChannelQuantParams::channelDim) * must be set to 0. * * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input * tensor of type {@link ANEURALNETWORKS_TENSOR_FLOAT32} * or {@link ANEURALNETWORKS_TENSOR_FLOAT16} the bias must be of the same * type. * For filter tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} * and {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED}, * the bias should be of {@link ANEURALNETWORKS_TENSOR_INT32}, with zeroPoint * of 0 and bias_scale == input_scale * filter_scale. * For filter tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL}, * the bias should be of {@link ANEURALNETWORKS_TENSOR_INT32}, with zeroPoint of 0 * and bias_scale of 0. The actual scale of each value 'i' is equal to * bias_scale[i] = input_scale * filter_scale[i]. * * 3: An {@link ANEURALNETWORKS_INT32} scalar, specifying the implicit * padding scheme, has to be one of the * {@link PaddingCode} values. * * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when * walking through input in the ‘width’ dimension. * * 5: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when * walking through input in the ‘height’ dimension. * * 6: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the * {@link FuseCode} values. Specifies the activation to * invoke on the result. * * 7: An optional {@link ANEURALNETWORKS_BOOL} scalar, default to false. * Set to true to specify NCHW data layout for input0 and output0. * Available since API level 29. * * 8: An optional {@link ANEURALNETWORKS_INT32} scalar, specifying the dilation * factor for width. Defaults to 1. If set to k > 1, there will be k-1 skipped * cells between each filter element on width dimension. If this input is set, * input 9 (dilation factor for height) must be specified as well. * Available since API level 29. * * 9: An optional {@link ANEURALNETWORKS_INT32} scalar, specifying the dilation * factor for height. Defaults to 1. If set to k > 1, there will be k-1 skipped * cells between each filter element on height dimension. If this input is set, * input 8 (dilation factor for width) must be specified as well. * Available since API level 29. * * Outputs: * * 0: The output 4-D tensor, of shape * [batches, out_height, out_width, depth_out]. * Before API level 29, for output tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}, * the following condition must be satisfied: output_scale > input_scale * filter_scale * * Available since API level 27. */ ANEURALNETWORKS_CONV_2D = 3, /** * Performs a depthwise 2-D convolution operation. * * Given an input tensor of shape [batches, height, width, depth_in] and a * filter tensor of shape [1, filter_height, filter_width, depth_out] * containing depth_out convolutional filters of depth 1, DEPTHWISE_CONV * applies a different filter to each input channel (expanding from 1 * channel to channel_multiplier channels for each), then concatenates the * results together. * * The output has depth_out = depth_in * depth_multiplier channels. * The output dimensions are functions of the filter dimensions, stride, and * padding. * * The values in the output tensor are computed as: * * output[b, i, j, k * channel_multiplier + q] = * sum_{di, dj} ( * input[b, strides[1] * i + di, strides[2] * j + dj, k] * * filter[1, di, dj, k * channel_multiplier + q] * ) + bias[k * channel_multiplier + q] * * Supported tensor {@link OperandCode} configurations: * * 32 bit floating point: * * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} for input, filter, output, and bias. * * * Quantized: * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} for input, filter, and output. * * * {@link ANEURALNETWORKS_TENSOR_INT32} for bias (with scale set to * * * input.scale * filter.scale). * * Available since API level 29: * * 16 bit floating point: * * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} for input, filter, output, and bias. * * * Quantized with symmetric per channel quantization for the filter: * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} for input, and output. * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL} for filter. * * * {@link ANEURALNETWORKS_TENSOR_INT32} for bias (scale set to 0.0, * * * each value scaling is separate and equal to input.scale * filter.scales[channel]). * * Available since API level 30: * * Quantized signed (since API level 30): * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} for input, filter, and output. * * * {@link ANEURALNETWORKS_TENSOR_INT32} for bias (with scale set to * * * input.scale * filter.scale). * * * Quantized signed with filter symmetric per channel quantization (since API level 30): * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} for input, and output. * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL} for filter. * * * {@link ANEURALNETWORKS_TENSOR_INT32} for bias (scale set to 0.0, * * * each value scaling is separate and equal to input.scale * filter.scales[channel]). * * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout. * With the default data layout NHWC, the data is stored in the order of: * [batch, height, width, channels]. Alternatively, the data layout could * be NCHW, the data storage order of: [batch, channels, height, width]. * NCHW is supported since API level 29. * * Both explicit padding and implicit padding are supported. * * Inputs (explicit padding): * * 0: A 4-D tensor, of shape [batches, height, width, depth_in], * specifying the input. * * 1: A 4-D tensor, of shape [1, filter_height, filter_width, depth_out], * specifying the filter. * For tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL} * the channel dimension (ANeuralNetworksSymmPerChannelQuantParams::channelDim) * must be set to 3. * * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input * tensor of type {@link ANEURALNETWORKS_TENSOR_FLOAT32} * or {@link ANEURALNETWORKS_TENSOR_FLOAT16} the bias must be of the same type. * For filter tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} * and {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED}, * the bias should be of {@link ANEURALNETWORKS_TENSOR_INT32}, with zeroPoint * of 0 and bias_scale == input_scale * filter_scale. * For filter tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL}, * the bias should be of {@link ANEURALNETWORKS_TENSOR_INT32}, with zeroPoint of 0 * and bias_scale of 0. The actual scale of each value 'i' is equal to * bias_scale[i] = input_scale * filter_scale[i]. * * 3: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on * the left, in the ‘width’ dimension. * * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on * the right, in the ‘width’ dimension. * * 5: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on * the top, in the ‘height’ dimension. * * 6: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on * the bottom, in the ‘height’ dimension. * * 7: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when * walking through input in the ‘width’ dimension. * * 8: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when * walking through input in the ‘height’ dimension. * * 9: An {@link ANEURALNETWORKS_INT32} scalar, specifying the depthwise * multiplier. * * 10: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the * {@link FuseCode} values. Specifies the activation to * invoke on the result. * * 11: An optional {@link ANEURALNETWORKS_BOOL} scalar, default to false. * Set to true to specify NCHW data layout for input0 and output0. * Available since API level 29. * * 12: An optional {@link ANEURALNETWORKS_INT32} scalar, specifying the dilation * factor for width. Defaults to 1. If set to k > 1, there will be k-1 skipped * cells between each filter element on width dimension. If this input is set, * input 13 (dilation factor for height) must be specified as well. * Available since API level 29. * * 13: An optional {@link ANEURALNETWORKS_INT32} scalar, specifying the dilation * factor for height. Defaults to 1. If set to k > 1, there will be k-1 skipped * cells between each filter element on height dimension. If this input is set, * input 12 (dilation factor for width) must be specified as well. * Available since API level 29. * * Inputs (implicit padding): * * 0: A 4-D tensor, of shape [batches, height, width, depth_in], * specifying the input. * * 1: A 4-D tensor, of shape [1, filter_height, filter_width, depth_out], * specifying the filter. * * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input * tensor of type {@link ANEURALNETWORKS_TENSOR_FLOAT32} * or {@link ANEURALNETWORKS_TENSOR_FLOAT16} the bias must be of the same type. * For filter tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} * and {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED}, * the bias should be of {@link ANEURALNETWORKS_TENSOR_INT32}, with zeroPoint * of 0 and bias_scale == input_scale * filter_scale. * For filter tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL}, * the bias should be of {@link ANEURALNETWORKS_TENSOR_INT32}, with zeroPoint of 0 * and bias_scale of 0. The actual scale of each value 'i' is equal to * bias_scale[i] = input_scale * filter_scale[i]. * * 3: An {@link ANEURALNETWORKS_INT32} scalar, specifying the implicit * padding scheme, has to be one of the * {@link PaddingCode} values. * * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when * walking through input in the ‘width’ dimension. * * 5: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when * walking through input in the ‘height’ dimension. * * 6: An {@link ANEURALNETWORKS_INT32} scalar, specifying the depthwise * multiplier. * * 7: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the * {@link FuseCode} values. Specifies the activation to * invoke on the result. * * 8: An optional {@link ANEURALNETWORKS_BOOL} scalar, default to false. * Set to true to specify NCHW data layout for input0 and output0. * Available since API level 29. * * 9: An optional {@link ANEURALNETWORKS_INT32} scalar, specifying the dilation * factor for width. Defaults to 1. If set to k > 1, there will be k-1 skipped * cells between each filter element on width dimension. If this input is set, * input 10 (dilation factor for height) must be specified as well. * Available since API level 29. * * 10: An optional {@link ANEURALNETWORKS_INT32} scalar, specifying the dilation * factor for height. Defaults to 1. If set to k > 1, there will be k-1 skipped * cells between each filter element on height dimension. If this input is set, * input 9 (dilation factor for width) must be specified as well. * Available since API level 29. * * Outputs: * * 0: The output 4-D tensor, of shape * [batches, out_height, out_width, depth_out]. Before API level 29, for * output tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}, * the following condition must be satisfied: * output_scale > input_scale * filter_scale * * Available since API level 27. */ ANEURALNETWORKS_DEPTHWISE_CONV_2D = 4, /** * Rearranges data from depth into blocks of spatial data. * * More specifically, this op outputs a copy of the input tensor where * values from the depth dimension are moved in spatial blocks to the height * and width dimensions. The value block_size indicates the input block size * and how the data is moved. * * Chunks of data of size block_size * block_size from depth are rearranged * into non-overlapping blocks of size block_size x block_size. * * The width of the output tensor is input_depth * block_size, whereas the * height is input_height * block_size. The depth of the input tensor must * be divisible by block_size * block_size * * Supported tensor {@link OperandCode}: * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29) * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30) * * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout. * With the default data layout NHWC, the data is stored in the order of: * [batch, height, width, channels]. Alternatively, the data layout could * be NCHW, the data storage order of: [batch, channels, height, width]. * NCHW is supported since API level 29. * * Inputs: * * 0: A 4-D tensor, of shape [batches, height, width, depth_in], * specifying the input. * * 1: An {@link ANEURALNETWORKS_INT32} scalar, specifying the block_size. * block_size must be >=1 and block_size * block_size must be a divisor * of the input depth. * * 2: An optional {@link ANEURALNETWORKS_BOOL} scalar, default to false. * Set to true to specify NCHW data layout for input0 and output0. * Available since API level 29. * * Outputs: * * 0: The output 4-D tensor, of shape [batch, height*block_size, * width*block_size, depth/(block_size*block_size)]. * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor, * the scale and zeroPoint must be the same as input0. * * Available since API level 27. */ ANEURALNETWORKS_DEPTH_TO_SPACE = 5, /** * Dequantizes the input tensor. * * The formula is: * * output = (input - zeroPoint) * scale. * * Supported input tensor {@link OperandCode}: * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} * * {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM} (since API level 29) * * {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL} (since API level 29) * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30) * * Supported output tensor {@link OperandCode}: * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29) * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}. * * Supported tensor rank: up to 4 * * Inputs: * * 0: A tensor. * Since API level 29, this tensor may be zero-sized. * * Outputs: * * 0: A tensor with the same shape as input0. * * Available since API level 27. */ ANEURALNETWORKS_DEQUANTIZE = 6, /** * Looks up sub-tensors in the input tensor. * * This operator takes for input a tensor of values (Values) and * a one-dimensional tensor of selection indices (Lookups). * The output tensor is the concatenation of sub-tensors of Values as * selected by Lookups. * * Think of Values as being sliced along its first dimension: * The entries in Lookups select which slices are concatenated together * to create the output tensor. * * For example, if Values has shape of [40, 200, 300] and * Lookups has shape of [3], all three values found in Lookups are * expected to be between 0 and 39. The resulting tensor must * have shape of [3, 200, 300]. * * If a value in Lookups is out of bounds, the operation must fail * and an error must be reported. * * Supported value tensor {@link OperandCode}: * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 30) * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} * * {@link ANEURALNETWORKS_TENSOR_INT32} (since API level 29) * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} (since API level 29) * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30) * * Supported value tensor rank: from 2 * * Inputs: * * 0: Lookups. A 1-D tensor of {@link ANEURALNETWORKS_TENSOR_INT32}. * The values are indices into the first dimension of Values. * * 1: Values. An n-D tensor, where n >= 2, from which sub-tensors are * extracted. * * Output: * * 0: A n-D tensor with the same rank and shape as the Values * tensor, except for the first dimension which has the same size * as Lookups' only dimension. * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor, * the scale and zeroPoint must be the same as input1. * * Available since API level 27. */ ANEURALNETWORKS_EMBEDDING_LOOKUP = 7, /** * Computes element-wise floor() on the input tensor. * * Supported tensor {@link OperandCode}: * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29) * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} * * Supported tensor rank: up to 4 * * Inputs: * * 0: A tensor. * * Outputs: * * 0: The output tensor, of the same {@link OperandCode} and dimensions as * the input tensor. * * Available since API level 27. */ ANEURALNETWORKS_FLOOR = 8, /** * Denotes a fully (densely) connected layer, which connects all elements * in the input tensor with each element in the output tensor. * * This layer implements the operation: * * outputs = activation(inputs * weights’ + bias) * * Supported tensor {@link OperandCode}: * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29) * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30) * * Supported tensor rank: up to 4. * * Inputs: * * 0: A tensor of at least rank 2, specifying the input. If rank is * greater than 2, then it gets flattened to a 2-D Tensor. The * (flattened) 2-D Tensor is reshaped (if necessary) to * [batch_size, input_size], where "input_size" corresponds to the * number of inputs to the layer, matching the second dimension of * weights, and "batch_size" is calculated by dividing the number of * elements by "input_size". * Since API level 29, zero batch_size is supported for this tensor. * * 1: A 2-D tensor, specifying the weights, of shape * [num_units, input_size], where "num_units" corresponds to the number * of output nodes. * * 2: A 1-D tensor, of shape [num_units], specifying the bias. For input * tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT32}, the bias should * also be of {@link ANEURALNETWORKS_TENSOR_FLOAT32}. * For input tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} * and {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED}, * the bias should be of {@link ANEURALNETWORKS_TENSOR_INT32}, * with zeroPoint of 0 and bias_scale == input_scale * filter_scale. * * 3: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the * {@link FuseCode} values. Specifies the activation to * invoke on the result. * * Outputs: * * 0: The output tensor, of shape [batch_size, num_units]. Before API level 29, for * output tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}, the following * condition must be satisfied: output_scale > input_scale * filter_scale. * * Available since API level 27. */ ANEURALNETWORKS_FULLY_CONNECTED = 9, /** * Looks up sub-tensors in the input tensor using a key-value map. * * This operator takes for input a tensor of values (Values), * a one-dimensional tensor of selection values (Lookups) and * a one-dimensional tensor that maps these values to Values * indexes. The output tensor is the concatenation of sub-tensors of * Values as selected by Lookups via Keys. * * Think of Values as being sliced along its outer-most dimension. * The output is a concatenation of selected slices, with one slice * for each entry of Lookups. The slice selected is the one at the * same index as the Maps entry that matches the value in Lookups. * * For a hit, the corresponding sub-tensor of Values is included * in the Output tensor. For a miss, the corresponding sub-tensor in * Output must have zero values. * * For example, if Values has shape of [40, 200, 300], * Keys should have a shape of [40]. If Lookups tensor has shape * of [3], three slices are being concatenated, so the resulting tensor * must have the shape of [3, 200, 300]. If the first entry in Lookups * has the value 123456, that value must be located in Keys tensor. * If the sixth entry of Keys contains 123456, the sixth slice of Values * must be selected. If no entry in Keys has 123456, a slice of zeroes * must be concatenated. * * Supported value tensor {@link OperandCode}: * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} * * {@link ANEURALNETWORKS_TENSOR_INT32} * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} * * Supported value tensor rank: from 2 * * Inputs: * * 0: Lookups. A 1-D {@link ANEURALNETWORKS_TENSOR_INT32} tensor with * shape [ k ]. * * 1: Keys. A 1-D {@link ANEURALNETWORKS_TENSOR_INT32} tensor with shape * [ n ]; Keys and Values pair represent a map, i.e., the ith element * in Keys (Keys[i]) is the key to select the ith sub-tensor in Values * (Values[i]), where 0 <= i <= n-1. Keys tensor *MUST* be sorted in * ascending order. * * 2: Values. A tensor with shape of [ n, … ]; i.e., the first dimension * must be n. * * Outputs: * * 0: Output. A tensor with shape [ k …]. * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} tensor, * the scale and zeroPoint must be the same as input2. * * 1: Hits. A boolean tensor with shape [ k ] indicates whether the lookup * hits (True) or not (False). * Stored as {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} with offset 0 * and scale 1.0f. * A non-zero byte represents True, a hit. A zero indicates otherwise. * * Available since API level 27. */ ANEURALNETWORKS_HASHTABLE_LOOKUP = 10, /** * Applies L2 normalization along the axis dimension. * * The values in the output tensor are computed as: * * output[batch, row, col, channel] = * input[batch, row, col, channel] / * sqrt(sum_{c} pow(input[batch, row, col, c], 2)) * * By default the axis dimension is the last dimension of the input tensor. * * Supported tensor {@link OperandCode}: * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29) * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} (since API level 29) * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30) * * Supported tensor rank: up to 4 * Tensors with rank less than 4 are only supported since API level 29. * * Inputs: * * 0: An n-D tensor, specifying the tensor to be normalized. * * 1: An optional {@link ANEURALNETWORKS_INT32} scalar, default to -1, * specifying the dimension normalization would be performed on. * Negative index is used to specify axis from the end (e.g. -1 for * the last axis). Must be in the range [-n, n). * Available since API level 29. * * Outputs: * * 0: A tensor of the same {@link OperandCode} and same shape as input0. * For {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}, * the scale must be 1.f / 128 and the zeroPoint must be 128. * For {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED}, * the scale must be 1.f / 128 and the zeroPoint must be 0. * * NOTE: Before API level 30, if the elements along an axis are all zeros, * the result is undefined. Since API level 30, if the elements along an axis * are all zeros, the result is logical zero. * * Available since API level 27. */ ANEURALNETWORKS_L2_NORMALIZATION = 11, /** * Performs an 2-D L2 pooling operation. * * The output dimensions are functions of the filter dimensions, stride, and * padding. * * The values in the output tensor are computed as: * * output[b, i, j, c] = * sqrt(sum_{di, dj} pow(input[b, strides[1] * i + di, strides[2] * j + dj, c], 2) / * sum(1)) * * Supported tensor {@link OperandCode}: * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29) * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} * * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout. * With the default data layout NHWC, the data is stored in the order of: * [batch, height, width, channels]. Alternatively, the data layout could * be NCHW, the data storage order of: [batch, channels, height, width]. * NCHW is supported since API level 29. * * Both explicit padding and implicit padding are supported. * * Inputs (explicit padding): * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying * the input. * Since API level 29, zero batches is supported for this tensor. * * 1: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on * the left, in the ‘width’ dimension. * * 2: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on * the right, in the ‘width’ dimension. * * 3: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on * the top, in the ‘height’ dimension. * * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on * the bottom, in the ‘height’ dimension. * * 5: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when * walking through input in the ‘width’ dimension. * * 6: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when * walking through input in the ‘height’ dimension. * * 7: An {@link ANEURALNETWORKS_INT32} scalar, specifying the filter * width. * * 8: An {@link ANEURALNETWORKS_INT32} scalar, specifying the filter * height. * * 9: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the * {@link FuseCode} values. Specifies the activation to * invoke on the result. * * 10: An optional {@link ANEURALNETWORKS_BOOL} scalar, default to false. * Set to true to specify NCHW data layout for input0 and output0. * Available since API level 29. * * Inputs (implicit padding): * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying * the input. * Since API level 29, zero batches is supported for this tensor. * * 1: An {@link ANEURALNETWORKS_INT32} scalar, specifying the implicit * padding scheme, has to be one of the * {@link PaddingCode} values. * * 2: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when * walking through input in the ‘width’ dimension. * * 3: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when * walking through input in the ‘height’ dimension. * * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the filter * width. * * 5: An {@link ANEURALNETWORKS_INT32} scalar, specifying the filter * height. * * 6: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the * {@link FuseCode} values. Specifies the activation to * invoke on the result. * * 7: An optional {@link ANEURALNETWORKS_BOOL} scalar, default to false. * Set to true to specify NCHW data layout for input0 and output0. * Available since API level 29. * * Outputs: * * 0: The output 4-D tensor, of shape * [batches, out_height, out_width, depth]. * * Available since API level 27. */ ANEURALNETWORKS_L2_POOL_2D = 12, /** * Applies Local Response Normalization along the depth dimension. * * The 4-D input tensor is treated as a 3-D array of 1-D vectors (along the * last dimension), and each vector is normalized independently. Within a * given vector, each component is divided by the weighted, squared sum of * inputs within depth_radius. * * The output is calculated using this formula: * * sqr_sum[a, b, c, d] = sum( * pow(input[a, b, c, d - depth_radius : d + depth_radius + 1], 2)) * output = input / pow((bias + alpha * sqr_sum), beta) * * For input tensor with rank less than 4, independently normalizes each * 1-D slice along specified dimension. * * Supported tensor {@link OperandCode}: * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29) * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} * * Supported tensor rank: up to 4 * Tensors with rank less than 4 are only supported since API level 29. * * Inputs: * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying * the input. * * 1: An {@link ANEURALNETWORKS_INT32} scalar, specifying the radius of * the normalization window. * * 2: A scalar, specifying the bias, must not be zero. * For input tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT16}, the bias * value must be of {@link ANEURALNETWORKS_FLOAT16}. * For input tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT32}, the bias * value must be of {@link ANEURALNETWORKS_FLOAT32}. * * 3: A scalar, specifying the scale factor, alpha. * For input tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT16}, the * alpha value must be of {@link ANEURALNETWORKS_FLOAT16}. * For input tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT32}, the * alpha value must be of {@link ANEURALNETWORKS_FLOAT32}. * * 4: A scalar, specifying the exponent, beta. * For input tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT16}, the beta * value must be of {@link ANEURALNETWORKS_FLOAT16}. * For input tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT32}, the beta * value must be of {@link ANEURALNETWORKS_FLOAT32}. * * 5: An optional {@link ANEURALNETWORKS_INT32} scalar, default to -1, * specifying the dimension normalization would be performed on. * Negative index is used to specify axis from the end (e.g. -1 for * the last axis). Must be in the range [-n, n). * Available since API level 29. * * Outputs: * * 0: The output tensor of same shape as input0. * * Available since API level 27. */ ANEURALNETWORKS_LOCAL_RESPONSE_NORMALIZATION = 13, /** * Computes sigmoid activation on the input tensor element-wise. * * The output is calculated using this formula: * * output = 1 / (1 + exp(-input)) * * Supported tensor {@link OperandCode}: * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29) * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30) * * Supported tensor rank: up to 4. * * Inputs: * * 0: A tensor, specifying the input. * Since API level 29, this tensor may be zero-sized. * * Outputs: * * 0: The output tensor of same shape as input0. * For {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}, * the scale must be 1.f / 256 and the zeroPoint must be 0. * For {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED}, * the scale must be 1.f / 256 and the zeroPoint must be -128. * * Available since API level 27. */ ANEURALNETWORKS_LOGISTIC = 14, /** * Projects an input to a bit vector via locality senstive hashing. * * Supported input tensor {@link OperandCode}: * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29) * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} * * {@link ANEURALNETWORKS_TENSOR_INT32} * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} * * Supported input tensor rank: from 1 * * Inputs: * * 0: Hash functions. Dim.size == 2, DataType: Float. * Tensor[0].Dim[0]: Number of hash functions. * Tensor[0].Dim[1]: Number of projected output bits generated by each * hash function. * If the projection type is Sparse: * Tensor[0].Dim[1] + ceil(log2(Tensor[0].Dim[0])) <= 32 * * * 1: Input. Dim.size >= 1, no restriction on DataType. * * 2: Weight. Optional. Dim.size == 1, DataType: Float. * If not set, each input element is considered to have the same weight * of 1.0. * Tensor[1].Dim[0] == Tensor[2].Dim[0] * * 3: Type: * Sparse: * Value LSHProjectionType_SPARSE(=3) (since API level 29). * Computed bit vector is considered to be sparse. * Each output element is an int32 made up of multiple bits * computed from hash functions. * * NOTE: To avoid collisions across hash functions, an offset value * of k * (1 << Tensor[0].Dim[1]) will be added to each signature, * where k is the index of the hash function. * * Value LSHProjectionType_SPARSE_DEPRECATED(=1). * Legacy behavior that does not include the offset value. * * Dense: * Value LSHProjectionType_DENSE(=2). * Computed bit vector is considered to be dense. Each output * element represents a bit and can take the value of either * 0 or 1. * * Outputs: * * 0: If the projection type is Sparse: * Output.Dim == { Tensor[0].Dim[0] } * A tensor of int32 that represents hash signatures. * * If the projection type is Dense: * Output.Dim == { Tensor[0].Dim[0] * Tensor[0].Dim[1] } * A flattened tensor that represents projected bit vectors. * * Available since API level 27. * The offset value for sparse projections was added in API level 29. */ ANEURALNETWORKS_LSH_PROJECTION = 15, /** * Performs a single time step in a Long Short-Term Memory (LSTM) layer * * The LSTM operation is described by the following equations. * * \f{eqnarray*}{ * i_t =& \sigma(W_{xi}x_t+W_{hi}h_{t-1}+W_{ci}C_{t-1}+b_i) & \\ * f_t =& \sigma(W_{xf}x_t+W_{hf}h_{t-1}+W_{cf}C_{t-1}+b_f) & \\ * C_t =& clip(f_t \odot C_{t-1} + i_t \odot * g(W_{xc}x_t+W_{hc}h_{t-1}+b_c),\ t_{cell}) & \\ * o_t =& \sigma(W_{xo}x_t+W_{ho}h_{t-1}+W_{co}C_t+b_o) & \\ * & & \\ * & clip(W_{proj}(o_t \odot g(C_t))+b_{proj},\ t_{proj}) * & if\ there\ is\ a\ projection; \\ * h_t =& & \\ * & o_t \odot g(C_t) & otherwise. \\ * \f} * Where: * * \f$x_t\f$ is the input, * * \f$i_t\f$ is the input gate, * * \f$f_t\f$ is the forget gate, * * \f$C_t\f$ is the cell state, * * \f$o_t\f$ is the output, * * \f$h_t\f$ is the output state, * * \f$\sigma\f$ is the logistic sigmoid function, * * \f$g\f$ is the cell input and cell output activation function, usually * \f$tahn\f$, * * \f$W_{xi}\f$ is the input-to-input weight matrix, * * \f$W_{hi}\f$ is the recurrent to input weight matrix, * * \f$W_{ci}\f$ is the cell-to-input weight matrix, * * \f$b_i\f$ is the input gate bias, * * \f$W_{xf}\f$ is the input-to-forget weight matrix, * * \f$W_{hf}\f$ is the recurrent-to-forget weight matrix, * * \f$W_{cf}\f$ is the cell-to-forget weight matrix, * * \f$b_f\f$ is the forget gate bias, * * \f$W_{xc}\f$ is the input-to-cell weight matrix, * * \f$W_{hc}\f$ is the recurrent-to-cell weight matrix, * * \f$b_c\f$ is the cell bias, * * \f$W_{xo}\f$ is the input-to-output weight matrix, * * \f$W_{ho}\f$ is the recurrent-to-output weight matrix, * * \f$W_{co}\f$ is the cell-to-output weight matrix, * * \f$b_o\f$ is the output gate bias, * * \f$W_{proj}\f$ is the projection weight matrix, * * \f$b_{proj}\f$ is the projection bias, * * \f$t_{cell}\f$ is the threshold for clipping the cell state, and * * \f$t_{proj}\f$ is the threshold for clipping the projected output. * * \f$\odot\f$ is the * * Hadamard product that takes two matrices and produces another * matrix, each element of which is the product of the corresponding * elements of the input matrices. * * Since API level 29 LSTM supports layer normalization. * In case layer normalization is used, the inputs to internal activation * functions (sigmoid and \f$g\f$) are normalized, rescaled and recentered * following an approach from section 3.1 from * https://arxiv.org/pdf/1607.06450.pdf * * The operation has the following independently optional inputs: * * The cell-to-input weights (\f$W_{ci}\f$), cell-to-forget weights * (\f$W_{cf}\f$) and cell-to-output weights (\f$W_{co}\f$) either all * have values or neither of them have values (i.e., all set to null). If * they have values, the peephole optimization is used. * * The input-to-input weights (\f$W_{xi}\f$), recurrent-to-input weights * (\f$W_{hi}\f$) and input gate bias (\f$b_i\f$) either all have values, * or none of them have values. If they have no values, coupling of input * and forget gates (CIFG) is used, in which case the input gate * (\f$i_t\f$) is calculated using the following equation instead. * \f{eqnarray*}{ * i_t = 1 - f_t * \f} * In case peephole optimization is used and CIFG is not used * cell-to-input (\f$W_{ci}\f$) weights must be present. Otherwise, the * cell-to-input weights must have no value. * * The projection weights (\f$W_{proj}\f$) is required only for the * recurrent projection layer, and should otherwise have no value. * * The projection bias (\f$b_{proj}\f$) may (but not required to) have a * value if the recurrent projection layer exists, and should otherwise * have no value. * * (API level 29 or later) The four layer normalization weights either all have * values or none of them have values. Additionally, if CIFG is used, * input layer normalization weights tensor is omitted and the other layer * normalization weights either all have values or none of them have * values. Layer normalization is used when the values of all the layer * normalization weights are present. * * References: * * The default non-peephole non-CIFG implementation is based on: * http://www.bioinf.jku.at/publications/older/2604.pdf * S. Hochreiter and J. Schmidhuber. "Long Short-Term Memory". Neural * Computation, 9(8):1735-1780, 1997. * * The peephole implementation and projection layer is based on: * https://research.google.com/pubs/archive/43905.pdf * Hasim Sak, Andrew Senior, and Francoise Beaufays. "Long short-term memory * recurrent neural network architectures for large scale acoustic * modeling." INTERSPEECH, 2014. * (However, the concept of peephole optimization was introduced in work * prior to this paper.) * * The coupling of input and forget gate (CIFG) is based on: * http://arxiv.org/pdf/1503.04069.pdf * Greff et al. "LSTM: A Search Space Odyssey" * * The layer normalization is based on: * https://arxiv.org/pdf/1607.06450.pdf * Jimmy Ba et al. "Layer Normalization" * * Supported tensor {@link OperandCode}: * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29) * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} * * All input and output tensors must be of the same type. * * Inputs: * * 0: The input (\f$x_t\f$). * A 2-D tensor of shape [batch_size, input_size], where “batch_size” * corresponds to the batching dimension, and “input_size” is the size * of the input. * * 1: The input-to-input weights (\f$W_{xi}\f$). Optional. * A 2-D tensor of shape [num_units, input_size], where “num_units” * corresponds to the number of cell units. * * 2: The input-to-forget weights (\f$W_{xf}\f$). * A 2-D tensor of shape [num_units, input_size]. * * 3: The input-to-cell weights (\f$W_{xc}\f$). * A 2-D tensor of shape [num_units, input_size]. * * 4: The input-to-output weights (\f$W_{xo}\f$). * A 2-D tensor of shape [num_units, input_size]. * * 5: The recurrent-to-input weights (\f$W_{hi}\f$). Optional. * A 2-D tensor of shape [num_units, output_size], where “output_size” * corresponds to either the number of cell units (i.e., “num_units”), * or the second dimension of the “projection_weights”, if defined. * * 6: The recurrent-to-forget weights (\f$W_{hf}\f$). * A 2-D tensor of shape [num_units, output_size]. * * 7: The recurrent-to-cell weights (\f$W_{hc}\f$). * A 2-D tensor of shape [num_units, output_size]. * * 8: The recurrent-to-output weights (\f$W_{ho}\f$). * A 2-D tensor of shape [num_units, output_size]. * * 9: The cell-to-input weights (\f$W_{ci}\f$). Optional. * A 1-D tensor of shape [num_units]. * * 10:The cell-to-forget weights (\f$W_{cf}\f$). Optional. * A 1-D tensor of shape [num_units]. * * 11:The cell-to-output weights (\f$W_{co}\f$). Optional. * A 1-D tensor of shape [num_units]. * * 12:The input gate bias (\f$b_i\f$). Optional. * A 1-D tensor of shape [num_units]. * * 13:The forget gate bias (\f$b_f\f$). * A 1-D tensor of shape [num_units]. * * 14:The cell bias (\f$b_c\f$). * A 1-D tensor of shape [num_units]. * * 15:The output gate bias (\f$b_o\f$). * A 1-D tensor of shape [num_units]. * * 16:The projection weights (\f$W_{proj}\f$). Optional. * A 2-D tensor of shape [output_size, num_units]. * * 17:The projection bias (\f$b_{proj}\f$). Optional. * A 1-D tensor of shape [output_size]. * * 18:The output state (in) (\f$h_{t-1}\f$). * A 2-D tensor of shape [batch_size, output_size]. * * 19:The cell state (in) (\f$C_{t-1}\f$). * A 2-D tensor of shape [batch_size, num_units]. * * 20:The activation function (\f$g\f$). * A value indicating the activation function: *
*
• 0: None; *
• 1: Relu; *
• 3: Relu6; *
• 4: Tanh; *
• 6: Sigmoid. *
* * 21:The clipping threshold (\f$t_{cell}\f$) for the cell state, such * that values are bound within [-cell_clip, cell_clip]. If set to 0.0 * then clipping is disabled. * Until API level 29 this scalar must be of type {@link * ANEURALNETWORKS_FLOAT32}. Since API level 29, if all the input * tensors have type {@link ANEURALNETWORKS_TENSOR_FLOAT32}, this * scalar must be of the type {@link ANEURALNETWORKS_FLOAT32}, * otherwise if all the input tensors have the type {@link * ANEURALNETWORKS_TENSOR_FLOAT16}, this scalar must be of type {@link * ANEURALNETWORKS_FLOAT16}. * * 22:The clipping threshold (\f$t_{proj}\f$) for the output from the * projection layer, such that values are bound within * [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled. * Until API level 29 this scalar must be of type {@link * ANEURALNETWORKS_FLOAT32}. Since API level 29, if all the input * tensors have type {@link ANEURALNETWORKS_TENSOR_FLOAT32}, this * scalar must be of the type {@link ANEURALNETWORKS_FLOAT32}, * otherwise if all the input tensors have the type {@link * ANEURALNETWORKS_TENSOR_FLOAT16}, this scalar must be of type {@link * ANEURALNETWORKS_FLOAT16}. * Since API level 29 there are additional inputs to this op: * * 23:The input layer normalization weights. * A 1-D tensor of shape [num_units]. Used to rescale normalized inputs * to activation at input gate. * * 24:The forget layer normalization weights. * A 1-D tensor of shape [num_units]. Used to rescale normalized inputs * to activation at forget gate. * * 25:The cell layer normalization weights. * A 1-D tensor of shape [num_units]. Used to rescale normalized inputs * to activation at cell gate. * * 26:The output layer normalization weights. * A 1-D tensor of shape [num_units]. Used to rescale normalized inputs * to activation at output gate. * * Outputs: * * 0: The scratch buffer. * A 2-D tensor of shape [batch_size, num_units * 3] with CIFG, or * [batch_size, num_units * 4] without CIFG. * * 1: The output state (out) (\f$h_t\f$). * A 2-D tensor of shape [batch_size, output_size]. * * 2: The cell state (out) (\f$C_t\f$). * A 2-D tensor of shape [batch_size, num_units]. * * 3: The output (\f$o_t\f$). * A 2-D tensor of shape [batch_size, output_size]. This is effectively * the same as the current “output state (out)” value. * * Available since API level 27. */ ANEURALNETWORKS_LSTM = 16, /** * Performs an 2-D max pooling operation. * * The output dimensions are functions of the filter dimensions, stride, and * padding. * * The values in the output tensor are computed as: * * output[b, i, j, channel] = * max_{di, dj} ( * input[b, strides[1] * i + di, strides[2] * j + dj, channel] * ) * * Supported tensor {@link OperandCode}: * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29) * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30) * * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout. * With the default data layout NHWC, the data is stored in the order of: * [batch, height, width, channels]. Alternatively, the data layout could * be NCHW, the data storage order of: [batch, channels, height, width]. * NCHW is supported since API level 29. * * Both explicit padding and implicit padding are supported. * * Inputs (explicit padding): * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying * the input. * Since API level 29, zero batches is supported for this tensor. * * 1: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on * the left, in the ‘width’ dimension. * * 2: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on * the right, in the ‘width’ dimension. * * 3: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on * the top, in the ‘height’ dimension. * * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on * the bottom, in the ‘height’ dimension. * * 5: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when * walking through input in the ‘width’ dimension. * * 6: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when * walking through input in the ‘height’ dimension. * * 7: An {@link ANEURALNETWORKS_INT32} scalar, specifying the filter * width. * * 8: An {@link ANEURALNETWORKS_INT32} scalar, specifying the filter * height. * * 9: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the * {@link FuseCode} values. Specifies the activation to * invoke on the result. * * 10: An optional {@link ANEURALNETWORKS_BOOL} scalar, default to false. * Set to true to specify NCHW data layout for input0 and output0. * Available since API level 29. * * Inputs (implicit padding): * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying * the input. * Since API level 29, zero batches is supported for this tensor. * * 1: An {@link ANEURALNETWORKS_INT32} scalar, specifying the implicit * padding scheme, has to be one of the * {@link PaddingCode} values. * * 2: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when * walking through input in the ‘width’ dimension. * * 3: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when * walking through input in the ‘height’ dimension. * * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the filter * width. * * 5: An {@link ANEURALNETWORKS_INT32} scalar, specifying the filter * height. * * 6: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the * {@link FuseCode} values. Specifies the activation to * invoke on the result. * * 7: An optional {@link ANEURALNETWORKS_BOOL} scalar, default to false. * Set to true to specify NCHW data layout for input0 and output0. * Available since API level 29. * * Outputs: * * 0: The output 4-D tensor, of shape * [batches, out_height, out_width, depth]. * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor, * the scale and zeroPoint must be the same as input0. * * Available since API level 27. */ ANEURALNETWORKS_MAX_POOL_2D = 17, /** * Multiplies two tensors, element-wise. * * Takes two input tensors of identical {@link OperandCode} and compatible * dimensions. The output is the product of both input tensors, optionally * modified by an activation function. * * Two dimensions are compatible when: * 1. they are equal, or * 2. one of them is 1 * * The size of the resulting output is the maximum size along each dimension * of the input operands. It starts with the trailing dimensions, and works * its way forward. * * Since API level 29, generic zero-sized input tensor is supported. Zero * dimension is only compatible with 0 or 1. The size of the output * dimension is zero if either of corresponding input dimension is zero. * * Supported tensor {@link OperandCode}: * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29) * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30) * * {@link ANEURALNETWORKS_TENSOR_INT32} (since API level 30) * * Supported tensor rank: up to 4 * * Inputs: * * 0: A tensor. * * 1: A tensor of the same {@link OperandCode}, and compatible dimensions * as input0. * * 2: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the * {@link FuseCode} values. Specifies the activation to * invoke on the result. * For a {@link ANEURALNETWORKS_TENSOR_INT32} tensor, * the {@link FuseCode} must be "NONE". * * Outputs: * * 0: The product, a tensor of the same {@link OperandCode} as input0. * For output tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} * and {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED}, * the following condition must be satisfied: * output_scale > input1_scale * input2_scale. * * Available since API level 27. */ ANEURALNETWORKS_MUL = 18, /** * Computes rectified linear activation on the input tensor element-wise. * * The output is calculated using this formula: * * output = max(0, input) * * Supported tensor {@link OperandCode}: * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29) * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30) * * Supported tensor rank: up to 4. * * Inputs: * * 0: A tensor, specifying the input. * Since API level 29, this tensor may be zero-sized. * * Outputs: * * 0: The output tensor of same shape as input0. * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor, * the scale and zeroPoint must be the same as input0. * * Available since API level 27. */ ANEURALNETWORKS_RELU = 19, /** * Computes rectified linear 1 activation on the input tensor element-wise. * * The output is calculated using this formula: * * output = min(1.f, max(-1.f, input)) * * Supported tensor {@link OperandCode}: * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29) * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30) * * Supported tensor rank: up to 4. * * Inputs: * * 0: A tensor, specifying the input. * Since API level 29, this tensor may be zero-sized. * * Outputs: * * 0: The output tensor of the same shape as input0. * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor, * the scale and zeroPoint must be the same as input0. * * Available since API level 27. */ ANEURALNETWORKS_RELU1 = 20, /** * Computes rectified linear 6 activation on the input tensor element-wise. * * The output is calculated using this formula: * * output = min(6, max(0, input)) * * Supported tensor {@link OperandCode}: * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29) * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30) * * Supported tensor rank: up to 4. * * Inputs: * * 0: A tensor, specifying the input. * Since API level 29, this tensor may be zero-sized. * * Outputs: * * 0: The output tensor of same shape as input0. * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor, * the scale and zeroPoint must be the same as input0. * * Available since API level 27. */ ANEURALNETWORKS_RELU6 = 21, /** * Reshapes a tensor. * * Given tensor, this operation returns a tensor that has the same values as * tensor, but with a newly specified shape. * * Supported tensor {@link OperandCode}: * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29) * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30) * * Supported tensor rank: up to 4. * * Inputs: * * 0: A tensor, specifying the tensor to be reshaped. * * 1: A 1-D tensor of {@link ANEURALNETWORKS_TENSOR_INT32}, defining the * shape of the output tensor. The number of elements implied by shape * must be the same as the number of elements in the input tensor. * * If one component of shape is the special value -1, the size of that * dimension is computed so that the total size remains constant. In * particular, a shape of [-1] flattens into 1-D. At most one component * of shape can be -1. * * Outputs: * * 0: The output tensor, of shape specified by the input shape. * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor, * the scale and zeroPoint must be the same as input0. * * Available since API level 27. */ ANEURALNETWORKS_RESHAPE = 22, /** * Resizes images to given size using the bilinear interpretation. * * Resized images must be distorted if their output aspect ratio is not the * same as input aspect ratio. The corner pixels of output may not be the * same as corner pixels of input. * * Supported tensor {@link OperandCode}: * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29) * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} (since API level 29) * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30) * * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout. * With the default data layout NHWC, the data is stored in the order of: * [batch, height, width, channels]. Alternatively, the data layout could * be NCHW, the data storage order of: [batch, channels, height, width]. * NCHW is supported since API level 29. * * Both resizing by shape and resizing by scale are supported. * * Inputs (resizing by shape): * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying * the input. * Since API level 29, zero batches is supported for this tensor. * * 1: An {@link ANEURALNETWORKS_INT32} scalar, specifying the output * width of the output tensor. * * 2: An {@link ANEURALNETWORKS_INT32} scalar, specifying the output * height of the output tensor. * * 3: An optional {@link ANEURALNETWORKS_BOOL} scalar, default to false. * Set to true to specify NCHW data layout for input0 and output0. * Available since API level 29. * * 4: Align corners. An optional {@link ANEURALNETWORKS_BOOL} * scalar, default to false. If True, the centers of the 4 corner * pixels of the input and output tensors are aligned, preserving the * values at the corner pixels. * Available since API level 30. * * 5: Half pixel centers. An optional {@link ANEURALNETWORKS_BOOL} * scalar, default to false. If True, the pixel centers are assumed to * be at (0.5, 0.5). This is the default behavior of image.resize in * TF 2.0. If this parameter is True, then align_corners parameter * must be False. * Available since API level 30. * * Inputs (resizing by scale, since API level 29): * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying * the input. Zero batches is supported for this tensor. * * 1: A scalar, specifying width_scale, the scaling factor of the width * dimension from the input tensor to the output tensor. The output * width is calculated as new_width = floor(width * width_scale). * The scalar must be of {@link ANEURALNETWORKS_FLOAT16} if input0 is * of {@link ANEURALNETWORKS_TENSOR_FLOAT16} and of * {@link ANEURALNETWORKS_FLOAT32} otherwise. * * 2: A scalar, specifying height_scale, the scaling factor of the height * dimension from the input tensor to the output tensor. The output * height is calculated as new_height = floor(height * height_scale). * The scalar must be of {@link ANEURALNETWORKS_FLOAT16} if input0 is * of {@link ANEURALNETWORKS_TENSOR_FLOAT16} and of * {@link ANEURALNETWORKS_FLOAT32} otherwise. * * 3: An optional {@link ANEURALNETWORKS_BOOL} scalar, default to false. * Set to true to specify NCHW data layout for input0 and output0. * * 4: Align corners. An optional {@link ANEURALNETWORKS_BOOL} * scalar, default to false. If True, the centers of the 4 corner * pixels of the input and output tensors are aligned, preserving the * values at the corner pixels. * Available since API level 30. * * 5: Half pixel centers. An optional {@link ANEURALNETWORKS_BOOL} * scalar, default to false. If True, the pixel centers are assumed to * be at (0.5, 0.5). This is the default behavior of image.resize in * TF 2.0. If this parameter is True, then align_corners parameter * must be False. * Available since API level 30. * * Outputs: * * 0: The output 4-D tensor, of shape * [batches, new_height, new_width, depth]. * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor, * the scale and zeroPoint must be the same as input0. * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} tensor, * the scale and zeroPoint must be the same as input0. * * Available since API level 27. */ ANEURALNETWORKS_RESIZE_BILINEAR = 23, /** * A basic recurrent neural network layer. * * This layer implements the operation: * outputs = state = activation(inputs * input_weights + * state * recurrent_weights + bias) * * Where: * * “input_weights” is a weight matrix that multiplies the inputs; * * “recurrent_weights” is a weight matrix that multiplies the current * “state” which itself is the output from the previous time step * computation; * * “bias” is a bias vector (added to each output vector in the batch); * * “activation” is the function passed as the “fused_activation_function” * argument (if not “NONE”). * * Supported tensor {@link OperandCode}: * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29) * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} * * The input tensors must all be the same type. * * Inputs: * * 0: input. * A 2-D tensor of shape [batch_size, input_size], where “batch_size” * corresponds to the batching dimension, and “input_size” is the size * of the input. * * 1: weights. * A 2-D tensor of shape [num_units, input_size], where “num_units” * corresponds to the number of units. * * 2: recurrent_weights. * A 2-D tensor of shape [num_units, num_units], with columns * corresponding to the weights from each unit. * * 3: bias. * A 1-D tensor of shape [num_units]. * * 4: hidden state (in). * A 2-D tensor of shape [batch_size, num_units]. * * 5: fused_activation_function. * An optional {@link FuseCode} value indicating the * activation function. If “NONE” is specified then it results in a * linear activation. * * Outputs: * * 0: hidden state (out). * A 2-D tensor of shape [batch_size, num_units]. * * * 1: output. * A 2-D tensor of shape [batch_size, num_units]. This is effectively * the same as the current state value. * * Available since API level 27. */ ANEURALNETWORKS_RNN = 24, /** * Computes the softmax activation on the input tensor element-wise, per * batch, by normalizing the input vector so the maximum coefficient is * zero. * * The output is calculated using this formula: * * output[batch, i] = * exp((input[batch, i] - max(input[batch, :])) * beta) / * sum_{k}{exp((input[batch, k] - max(input[batch, :])) * beta)} * * For input tensor with rank other than 2, the activation will be applied * independently on each 1-D slice along specified dimension. * * Supported tensor {@link OperandCode}: * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29) * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30) * * Supported tensor rank: up to 4. * Tensors with rank other than 2 or 4 are only supported since API level 29. * * Inputs: * * 0: A 2-D or 4-D tensor, specifying the tensor to be reshaped. * Since API level 29, this tensor may be zero-sized. * * 1: A scalar, specifying the positive scaling factor for the exponent, * beta. If input0 is of {@link ANEURALNETWORKS_TENSOR_FLOAT32}, * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} or * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED}, the scalar * must be of {@link ANEURALNETWORKS_FLOAT32}. * If input0 is of {@link ANEURALNETWORKS_TENSOR_FLOAT16}, then the * scalar must be of {@link ANEURALNETWORKS_FLOAT16}. * * 2: An optional {@link ANEURALNETWORKS_INT32} scalar, default to -1, * specifying the dimension the activation would be performed on. * Negative index is used to specify axis from the end (e.g. -1 for * the last axis). Must be in the range [-n, n). * Available since API level 29. * * Outputs: * * 0: The output tensor of same shape as input0. * For {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}, * the scale must be 1.f / 256 and the zeroPoint must be 0. * For {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED}, * the scale must be 1.f / 256 and the zeroPoint must be -128. * * Available since API level 27. */ ANEURALNETWORKS_SOFTMAX = 25, /** * Rearranges blocks of spatial data, into depth. * * More specifically, this op outputs a copy of the input tensor where * values from the height and width dimensions are moved to the depth * dimension. The value block_size indicates the input block size and how * the data is moved. * * Chunks of data of size block_size * block_size from depth are rearranged * into non-overlapping blocks of size block_size x block_size. * * The depth of the output tensor is input_depth * block_size * block_size. * The input tensor's height and width must be divisible by block_size. * * Supported tensor {@link OperandCode}: * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29) * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30) * * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout. * With the default data layout NHWC, the data is stored in the order of: * [batch, height, width, channels]. Alternatively, the data layout could * be NCHW, the data storage order of: [batch, channels, height, width]. * NCHW is supported since API level 29. * * Inputs: * * 0: A 4-D tensor, of shape [batches, height, width, depth_in], * specifying the input. * * 1: An {@link ANEURALNETWORKS_INT32} scalar, specifying the block_size. * block_size must be >=1 and block_size must be a divisor of both the * input height and width. * * 2: An optional {@link ANEURALNETWORKS_BOOL} scalar, default to false. * Set to true to specify NCHW data layout for input0 and output0. * Available since API level 29. * * Outputs: * * 0: The output 4-D tensor, of shape [batches, height/block_size, * width/block_size, depth_in*block_size*block_size]. * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor, * the scale and zeroPoint must be the same as input0. * * Available since API level 27. */ ANEURALNETWORKS_SPACE_TO_DEPTH = 26, /** * SVDF op is a kind of stateful layer derived from the notion that a * densely connected layer that's processing a sequence of input frames can * be approximated by using a singular value decomposition of each of its * nodes. The implementation is based on: * * https://research.google.com/pubs/archive/43813.pdf * * P. Nakkiran, R. Alvarez, R. Prabhavalkar, C. Parada. * “Compressing Deep Neural Networks using a Rank-Constrained Topology”. * INTERSPEECH, 2015. * * It processes the incoming input using a 2-stage filtering mechanism: * * stage 1 performs filtering on the "features" dimension, whose outputs * get pushed into a memory of fixed-size memory_size. * * stage 2 performs filtering on the "time" dimension of the memory_size * memoized outputs of stage 1. * * Specifically, for rank 1, this layer implements the operation: * * memory = push(conv1d(inputs, weights_feature, feature_dim, * "ANEURALNETWORKS_PADDING_VALID")); * outputs = activation(memory * weights_time + bias); * * Where: * * “weights_feature” is a weights matrix that processes the inputs (by * convolving the input with every “feature filter”), and whose outputs * get pushed, stacked in order, into the fixed-size “memory” (the oldest * entry gets dropped); * * “weights_time” is a weights matrix that processes the “memory” (by a * batched matrix multiplication on the num_units); * * “bias” is an optional bias vector (added to each output vector in the * batch); and * * “activation” is the function passed as the “fused_activation_function” * argument (if not “NONE”). * * Each rank adds a dimension to the weights matrices by means of stacking * the filters. * * Supported tensor {@link OperandCode}: * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29) * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} * * All input tensors must be the same type. * * Inputs: * * 0: input. * A 2-D tensor of shape [batch_size, input_size], where “batch_size” * corresponds to the batching dimension, and “input_size” is the size * of the input. * * 1: weights_feature. * A 2-D tensor of shape [num_units, input_size], where “num_units” * corresponds to the number of units. * * 2: weights_time. * A 2-D tensor of shape [num_units, memory_size], where “memory_size” * corresponds to the fixed-size of the memory. * * 3: bias. * An optional 1-D tensor of shape [num_units]. * * 4: state (in). * A 2-D tensor of shape [batch_size, (memory_size - 1) * num_units * rank]. * * 5: rank. * The rank of the SVD approximation. * * 6: fused_activation_function. * An optional {@link FuseCode} value indicating the * activation function. If “NONE” is specified then it results in a * linear activation. * * Outputs: * * 0: state (out). * A 2-D tensor of the same {@link OperandCode} as the inputs, with shape * [batch_size, (memory_size - 1) * num_units * rank]. * * 1: output. * A 2-D tensor of the same {@link OperandCode} as the inputs, with shape * [batch_size, num_units]. * * Available since API level 27. */ ANEURALNETWORKS_SVDF = 27, /** * Computes hyperbolic tangent of input tensor element-wise. * * The output is calculated using this formula: * * output = tanh(input) * * Supported tensor {@link OperandCode}: * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29) * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} (since API level 29) * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30) * * Supported tensor rank: up to 4. * * Inputs: * * 0: A tensor, specifying the input. * Since API level 29, this tensor may be zero-sized. * * Outputs: * * 0: The output tensor of same shape as input0. * For {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}, * the scale must be 1.f / 128 and the zeroPoint must be 128. * For {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED}, * the scale must be 1.f / 128 and the zeroPoint must be 0. * * Available since API level 27. */ ANEURALNETWORKS_TANH = 28, // Operations below are available since API level 28. /** * BatchToSpace for N-dimensional tensors. * * This operation reshapes the batch dimension (dimension 0) into M + 1 * dimensions of shape block_shape + [batch], interleaves these blocks back * into the grid defined by the spatial dimensions [1, ..., M], to obtain a * result with the same rank as the input. * * This is the reverse of SpaceToBatch. * * Supported tensor {@link OperandCode}: * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29) * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30) * * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout. * With the default data layout NHWC, the data is stored in the order of: * [batch, height, width, channels]. Alternatively, the data layout could * be NCHW, the data storage order of: [batch, channels, height, width]. * NCHW is supported since API level 29. * * Inputs: * * 0: An n-D tensor, specifying the tensor to be reshaped * * 1: A 1-D Tensor of {@link ANEURALNETWORKS_TENSOR_INT32}, the block * sizes for each spatial dimension of the input tensor. All values * must be >= 1. * * 2: An optional {@link ANEURALNETWORKS_BOOL} scalar, default to false. * Set to true to specify NCHW data layout for input0 and output0. * Available since API level 29. * * Outputs: * * 0: A tensor of the same {@link OperandCode} as input0. * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor, * the scale and zeroPoint must be the same as input0. * * Available since API level 28. */ ANEURALNETWORKS_BATCH_TO_SPACE_ND = 29, /** * Element-wise division of two tensors. * * Takes two input tensors of identical {@link OperandCode} and compatible * dimensions. The output is the result of dividing the first input tensor * by the second, optionally modified by an activation function. * * For inputs of {@link ANEURALNETWORKS_TENSOR_INT32}, performs * "floor division" ("//" in Python). For example, * 5 // 2 = 2 * -5 // 2 = -3 * * Two dimensions are compatible when: * 1. they are equal, or * 2. one of them is 1 * * The size of the output is the maximum size along each dimension of the * input operands. It starts with the trailing dimensions, and works its way * forward. * * Example: * input1.dimension = {4, 1, 2} * input2.dimension = {5, 4, 3, 1} * output.dimension = {5, 4, 3, 2} * * Since API level 29, generic zero-sized input tensor is supported. Zero * dimension is only compatible with 0 or 1. The size of the output * dimension is zero if either of corresponding input dimension is zero. * * Supported tensor {@link OperandCode}: * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29) * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} * * {@link ANEURALNETWORKS_TENSOR_INT32} (since API level 30) * * Supported tensor rank: up to 4 * * Inputs: * * 0: An n-D tensor, specifying the first input. * * 1: A tensor of the same {@link OperandCode}, and compatible dimensions * as input0. * * 2: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the * {@link FuseCode} values. Specifies the activation to * invoke on the result. * For a {@link ANEURALNETWORKS_TENSOR_INT32} tensor, * the {@link FuseCode} must be "NONE". * * Outputs: * * 0: A tensor of the same {@link OperandCode} as input0. * * Available since API level 28. */ ANEURALNETWORKS_DIV = 30, /** * Computes the mean of elements across dimensions of a tensor. * * Reduces the input tensor along the given dimensions to reduce. Unless * keep_dims is true, the rank of the tensor is reduced by 1 for each entry * in axis. If keep_dims is true, the reduced dimensions are retained with * length 1. * * Supported tensor {@link OperandCode}: * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29) * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30) * * Supported tensor rank: up to 4 * * Inputs: * * 0: A tensor, specifying the input. * * 1: A 1-D Tensor of {@link ANEURALNETWORKS_TENSOR_INT32}. The dimensions * to reduce. Must be in the range * [-rank(input_tensor), rank(input_tensor)). * * NOTE: When the operation was introduced, the documentation * incorrectly stated that if dimensions were empty, the operation * would reduce across all dimensions. This behavior was never * implemented. * * * 2: An {@link ANEURALNETWORKS_INT32} scalar, keep_dims. If positive, * retains reduced dimensions with length 1. * * Outputs: * * 0: A tensor of the same {@link OperandCode} as input0. * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor, * the scale and zeroPoint must be the same as input0. * If all dimensions are reduced and keep_dims is false, the output * shape is [1]. * * Available since API level 28. */ ANEURALNETWORKS_MEAN = 31, /** * Pads a tensor. * * This operation pads a tensor according to the specified paddings. * * Supported tensor {@link OperandCode}: * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29) * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30) * (full support since API level 29, see the output section) * * Supported tensor rank: up to 4 * * Inputs: * * 0: An n-D tensor, specifying the tensor to be padded. * * 1: A 2-D Tensor of {@link ANEURALNETWORKS_TENSOR_INT32}, the paddings * for each spatial dimension of the input tensor. The shape of the * tensor must be {rank(input0), 2}. * padding[i, 0] specifies the number of elements to be padded in the * front of dimension i. * padding[i, 1] specifies the number of elements to be padded after the * end of dimension i. * * Outputs: * * 0: A tensor of the same {@link OperandCode} as input0. The * output tensor has the same rank as input0, and each * dimension of the output tensor has the same size as the * corresponding dimension of the input tensor plus the size * of the padding: * output0.dimension[i] = * padding[i, 0] + input0.dimension[i] + padding[i, 1] * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor, * the scale and zeroPoint must be the same as input0. * * NOTE: Before API level 29, the pad value for * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} is undefined. * Since API level 29, the pad value is always the logical zero. * * Available since API level 28. */ ANEURALNETWORKS_PAD = 32, /** * SpaceToBatch for N-Dimensional tensors. * * This operation divides "spatial" dimensions [1, ..., M] of the input into * a grid of blocks of shape block_shape, and interleaves these blocks with * the "batch" dimension (0) such that in the output, the spatial dimensions * [1, ..., M] correspond to the position within the grid, and the batch * dimension combines both the position within a spatial block and the * original batch position. Prior to division into blocks, the spatial * dimensions of the input are optionally zero padded according to paddings. * * Supported tensor {@link OperandCode}: * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29) * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30) * (full support since API level 29, see the output section) * * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout. * With the default data layout NHWC, the data is stored in the order of: * [batch, height, width, channels]. Alternatively, the data layout could * be NCHW, the data storage order of: [batch, channels, height, width]. * NCHW is supported since API level 29. * * Inputs: * * 0: An n-D tensor, specifying the input. * * 1: A 1-D Tensor of {@link ANEURALNETWORKS_TENSOR_INT32}, the block * sizes for each spatial dimension of the input tensor. All values * must be >= 1. * * 2: A 2-D Tensor of {@link ANEURALNETWORKS_TENSOR_INT32}, the paddings * for each spatial dimension of the input tensor. All values must be * >= 0. The shape of the tensor must be {M, 2}, where M is the number * of spatial dimensions. * padding[i, 0] specifies the number of element to be padded in the * front of dimension i. * padding[i, 1] specifies the number of element to be padded after the * end of dimension i. * * 3: An optional {@link ANEURALNETWORKS_BOOL} scalar, default to false. * Set to true to specify NCHW data layout for input0 and output0. * Available since API level 29. * * Outputs: * * 0: A tensor of the same {@link OperandCode} as input0. * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor, * the scale and zeroPoint must be the same as input0. * * NOTE: Before API level 29, the pad value for * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} is undefined. * Since API level 29, the pad value is always the logical zero. * * Available since API level 28. */ ANEURALNETWORKS_SPACE_TO_BATCH_ND = 33, /** * Removes dimensions of size 1 from the shape of a tensor. * * Given a tensor input, this operation returns a tensor of the same * {@link OperandCode} with all dimensions of size 1 removed. If you don't * want to remove all size 1 dimensions, you can remove specific size 1 * dimensions by specifying the axes (input1). * * Supported tensor {@link OperandCode}: * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29) * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30) * * Supported tensor rank: up to 4 * * Inputs: * * 0: An n-D tensor, the tensor to be squeezed. * * 1: An optional 1-D tensor of {@link ANEURALNETWORKS_TENSOR_INT32}. The * dimensions to squeeze. If specified only squeezes the dimensions * listed. Otherwise, squeezes all dimensions. The dimension index * starts at 0. An error must be reported if squeezing a dimension that * is not 1. * * Outputs: * * 0: A tensor of the same {@link OperandCode} as input0. Contains the * same data as input, but has one or more dimensions of size 1 * removed. * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor, * the scale and zeroPoint must be the same as input0. * If all input dimensions are equal to 1 and are to be squeezed, the * output shape is [1]. * * Available since API level 28. */ ANEURALNETWORKS_SQUEEZE = 34, /** * Extracts a strided slice of a tensor. * * Roughly speaking, this op extracts a slice of size (end - begin) / stride * from the given input tensor. Starting at the location specified by begin * the slice continues by adding stride to the index until all dimensions * are not less than end. Note that a stride can be negative, which causes a * reverse slice. * * Supported tensor {@link OperandCode}: * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29) * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30) * * Supported tensor rank: up to 4 * * Inputs: * * 0: An n-D tensor, specifying the tensor to be sliced. * * 1: begin, a 1-D tensor of {@link ANEURALNETWORKS_TENSOR_INT32}. The * starts of the dimensions of the input tensor to be sliced. The * length must be of rank(input0). * * 2: end, a 1-D tensor of {@link ANEURALNETWORKS_TENSOR_INT32}. The * ends of the dimensions of the input tensor to be sliced. The length * must be of rank(input0). * * 3: strides, a 1-D tensor of {@link ANEURALNETWORKS_TENSOR_INT32}. The * strides of the dimensions of the input tensor to be sliced. The * length must be of rank(input0). The entries must be non-zero. * * 4: begin_mask, an {@link ANEURALNETWORKS_INT32} scalar. If the ith bit * of begin_mask is set, begin[i] is ignored and the fullest possible * range in that dimension is used instead. * * 5: end_mask, an {@link ANEURALNETWORKS_INT32} scalar. If the ith bit of * end_mask is set, end[i] is ignored and the fullest possible range in * that dimension is used instead. * * 6: shrink_axis_mask, an {@link ANEURALNETWORKS_INT32} scalar. If the * ith bit of shrink_axis_mask is set, the ith dimension specification * shrinks the dimensionality by 1, taking on the value at index * begin[i]. In this case, the ith specification must define a * slice of size 1, e.g. begin[i] = x, end[i] = x + 1. * * Outputs: * * 0: A tensor of the same {@link OperandCode} as input0 and rank (n - k), * where k is the number of bits set in shrink_axis_mask. * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor, * the scale and zeroPoint must be the same as input0. * If shrink_axis_mask is true for all input dimensions, the output * shape is [1]. * * Available since API level 28. */ ANEURALNETWORKS_STRIDED_SLICE = 35, /** * Element-wise subtraction of two tensors. * * Takes two input tensors of identical {@link OperandCode} and compatible * dimensions. The output is the result of subtracting the second input * tensor from the first one, optionally modified by an activation function. * * Two dimensions are compatible when: * 1. they are equal, or * 2. one of them is 1 * * The size of the output is the maximum size along each dimension of the * input operands. It starts with the trailing dimensions, and works its way * forward. * * Example: * input1.dimension = {4, 1, 2} * input2.dimension = {5, 4, 3, 1} * output.dimension = {5, 4, 3, 2} * * Since API level 29, generic zero-sized input tensor is supported. Zero * dimension is only compatible with 0 or 1. The size of the output * dimension is zero if either of corresponding input dimension is zero. * * Supported tensor {@link OperandCode}: * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29) * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} (since API level 29) * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30) * * {@link ANEURALNETWORKS_TENSOR_INT32} (since API level 30) * * Supported tensor rank: up to 4 * * Inputs: * * 0: An n-D tensor, specifying the first input. * * 1: A tensor of the same {@link OperandCode}, and compatible dimensions * as input0. * * 2: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the * {@link FuseCode} values. Specifies the activation to * invoke on the result. * For a {@link ANEURALNETWORKS_TENSOR_INT32} tensor, * the {@link FuseCode} must be "NONE". * * Outputs: * * 0: A tensor of the same {@link OperandCode} as input0. * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor, * the scale and zeroPoint can be different from inputs' scale and zeroPoint. * * Available since API level 28. */ ANEURALNETWORKS_SUB = 36, /** * Transposes the input tensor, permuting the dimensions according to the * perm tensor. * * The returned tensor's dimension i corresponds to the input dimension * perm[i]. If perm is not given, it is set to (n-1...0), where n is the * rank of the input tensor. Hence by default, this operation performs a * regular matrix transpose on 2-D input Tensors. * * Supported tensor {@link OperandCode}: * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29) * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30) * * Supported tensor rank: up to 4 * * Inputs: * * 0: An n-D tensor, specifying the tensor to be transposed. * Since API level 29, this tensor may be zero-sized. * * 1: An optional 1-D Tensor of {@link ANEURALNETWORKS_TENSOR_INT32}, * the permutation of the dimensions of the input tensor. * * Outputs: * * 0: A tensor of the same {@link OperandCode} as input0. * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor, * the scale and zeroPoint must be the same as input0. * * Available since API level 28. */ ANEURALNETWORKS_TRANSPOSE = 37, // Operations below are available since API level 29. /** * Computes the absolute value of a tensor, element-wise. * * Supported tensor {@link OperandCode}: * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} * * {@link ANEURALNETWORKS_TENSOR_INT32} (since API level 30) * * Supported tensor rank: from 1. * * Inputs: * * 0: A tensor. * * Outputs: * * 0: The output tensor of same shape as input0. * * Available since API level 29. */ ANEURALNETWORKS_ABS = 38, /** * Returns the index of the largest element along an axis. * * Supported tensor {@link OperandCode}: * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} * * {@link ANEURALNETWORKS_TENSOR_INT32} * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30) * * Supported tensor rank: from 1 * * Inputs: * * 0: An n-D tensor specifying the input. Must be non-empty. * * 1: An {@link ANEURALNETWORKS_INT32} scalar specifying the axis to * reduce across. Negative index is used to specify axis from the * end (e.g. -1 for the last axis). Must be in the range [-n, n). * * Outputs: * * 0: An (n - 1)-D {@link ANEURALNETWORKS_TENSOR_INT32} tensor. * If input is 1-dimensional, the output shape is [1]. * * Available since API level 29. */ // There is no underscore in ARG_MAX to avoid name conflict with // the macro defined in libc/kernel/uapi/linux/limits.h. ANEURALNETWORKS_ARGMAX = 39, /** * Returns the index of the smallest element along an axis. * * Supported tensor {@link OperandCode}: * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} * * {@link ANEURALNETWORKS_TENSOR_INT32} * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30) * * Supported tensor rank: from 1 * * Inputs: * * 0: An n-D tensor specifying the input. Must be non-empty. * * 1: An {@link ANEURALNETWORKS_INT32} scalar specifying the axis to * reduce across. Negative index is used to specify axis from the * end (e.g. -1 for the last axis). Must be in the range [-n, n). * * Outputs: * * 0: An (n - 1)-D {@link ANEURALNETWORKS_TENSOR_INT32} tensor. * If input is 1-dimensional, the output shape is [1]. * * Available since API level 29. */ ANEURALNETWORKS_ARGMIN = 40, // See ARGMAX for naming discussion. /** * Transform axis-aligned bounding box proposals using bounding box deltas. * * Given the positions of bounding box proposals and the corresponding * bounding box deltas for each class, return the refined bounding box * regions. The resulting bounding boxes are cliped against the edges of * the image. * * Supported tensor {@link OperandCode}: * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} * * {@link ANEURALNETWORKS_TENSOR_QUANT16_ASYMM} * * Inputs: * * 0: A 2-D Tensor of shape [num_rois, 4], specifying the locations of the * bounding box proposals, each line with format [x1, y1, x2, y2]. * For tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT16_ASYMM}, * the zeroPoint must be 0 and the scale must be 0.125. Zero num_rois * is supported for this tensor. * * 1: A 2-D Tensor of shape [num_rois, num_classes * 4], specifying the * bounding box delta for each region of interest and each class. The * bounding box deltas are organized in the following order * [dx, dy, dw, dh], where dx and dy is the relative correction factor * for the center position of the bounding box with respect to the width * and height, dw and dh is the log-scale relative correction factor * for the width and height. For input0 of type * {@link ANEURALNETWORKS_TENSOR_QUANT16_ASYMM}, this tensor should be * of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} or * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED}. Zero num_rois is * supported for this tensor. * * 2: An 1-D {@link ANEURALNETWORKS_TENSOR_INT32} tensor, of shape * [num_rois], specifying the batch index of each box. Boxes with * the same batch index are grouped together. Zero num_rois is * supported for this tensor. * * 3: A 2-D Tensor of shape [batches, 2], specifying the information of * each image in the batch, each line with format * [image_height, image_width]. * * Outputs: * * 0: A tensor of the same {@link OperandCode} as input0, with shape * [num_rois, num_classes * 4], specifying the coordinates of each * output bounding box for each class, with format [x1, y1, x2, y2]. * For type of {@link ANEURALNETWORKS_TENSOR_QUANT16_ASYMM}, the * scale must be 0.125 and the zero point must be 0. * * Available since API level 29. */ ANEURALNETWORKS_AXIS_ALIGNED_BBOX_TRANSFORM = 41, /** * A recurrent neural network layer that applies an LSTM cell to a * sequence of inputs in forward and backward directions. * * The op supports cross-linking via an auxiliary input. Regular cell feeds * one input into the two RNN cells in the following way: * * INPUT (INPUT_REVERSED) * | | * --------------------- * | FW_LSTM BW_LSTM | * --------------------- * | | * FW_OUT BW_OUT * * An op with cross-linking takes two inputs and feeds them into the RNN * cells in the following way: * * AUX_INPUT (AUX_INPUT_REVERSED) * | | * INPUT | (INPUT_R'D.)| * | | | | * ----------------------- * | \ / \ / | * | FW_LSTM BW_LSTM | * ----------------------- * | | * FW_OUT BW_OUT * * The cross-linking mode is enabled iff auxiliary input and auxiliary * weights are present. While stacking this op on top of itself, this * allows to connect both forward and backward outputs from previous cell * to the next cell's input. * * Since API level 30 parallel linking mode is supported. The mode is * enabled if auxiliary input is present but auxiliary weights are omitted. * In this case, the cell feeds inputs into the RNN in the following way: * * INPUT (AUX_INPUT_REVERSED) * | | * --------------------- * | FW_LSTM BW_LSTM | * --------------------- * | | * FW_OUT BW_OUT * * While stacking this op on top of itself, this allows to connect both * forward and backward outputs from previous cell to the next cell's * corresponding inputs. * * Supported tensor {@link OperandCode}: * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} * * Supported tensor rank: 3, either time-major or batch-major. * * All input and output tensors must be of the same type. * * Inputs: * * 0: The input. * A 3-D tensor of shape: * If time-major: [max_time, batch_size, input_size] * If batch-major: [batch_size, max_time, input_size] * where "max_time" is the number of timesteps (sequence length), * "batch_size" corresponds to the batching dimension, and * "input_size" is the size of the input. * * 1: The forward input-to-input weights. Optional. * A 2-D tensor of shape [fw_num_units, input_size], where “fw_num_units” * corresponds to the number of forward cell units. * * 2: The forward input-to-forget weights. * A 2-D tensor of shape [fw_num_units, input_size]. * * 3: The forward input-to-cell weights. * A 2-D tensor of shape [fw_num_units, input_size]. * * 4: The forward input-to-output weights. * A 2-D tensor of shape [fw_num_units, input_size]. * * 5: The forward recurrent-to-input weights. Optional. * A 2-D tensor of shape [fw_num_units, fw_output_size], where “fw_output_size” * corresponds to either the number of cell units (i.e., fw_num_units), * or the second dimension of the “fw_projection_weights”, if defined. * * 6: The forward recurrent-to-forget weights. * A 2-D tensor of shape [fw_num_units, fw_output_size]. * * 7: The forward recurrent-to-cell weights. * A 2-D tensor of shape [fw_num_units, fw_output_size]. * * 8: The forward recurrent-to-output weights. * A 2-D tensor of shape [fw_num_units, fw_output_size]. * * 9: The forward cell-to-input weights. Optional. * A 1-D tensor of shape [fw_num_units]. * * 10: The forward cell-to-forget weights. Optional. * A 1-D tensor of shape [fw_num_units]. * * 11: The forward cell-to-output weights. Optional. * A 1-D tensor of shape [fw_num_units]. * * 12: The forward input gate bias. Optional. * A 1-D tensor of shape [fw_num_units]. * * 13: The forward forget gate bias. * A 1-D tensor of shape [fw_num_units]. * * 14: The forward cell gate bias. * A 1-D tensor of shape [fw_num_units]. * * 15: The forward output gate bias. * A 1-D tensor of shape [fw_num_units]. * * 16: The forward projection weights. Optional. * A 2-D tensor of shape [fw_output_size, fw_num_units]. * * 17: The forward projection bias. Optional. * A 1-D tensor of shape [fw_output_size]. * * 18: The backward input-to-input weights. Optional. * A 2-D tensor of shape [bw_num_units, input_size], where “bw_num_units” * corresponds to the number of backward cell units. * * 19: The backward input-to-forget weights. * A 2-D tensor of shape [bw_num_units, input_size]. * * 20: The backward input-to-cell weights. * A 2-D tensor of shape [bw_num_units, input_size]. * * 21: The backward input-to-output weights. * A 2-D tensor of shape [bw_num_units, input_size]. * * 22: The backward recurrent-to-input weights. Optional. * A 2-D tensor of shape [bw_num_units, bw_output_size], where “bw_output_size” * corresponds to either the number of cell units (i.e., “bw_num_units”), * or the second dimension of the “bw_projection_weights”, if defined. * * 23: The backward recurrent-to-forget weights. * A 2-D tensor of shape [bw_num_units, bw_output_size]. * * 24: The backward recurrent-to-cell weights. * A 2-D tensor of shape [bw_num_units, bw_output_size]. * * 25: The backward recurrent-to-output weights. * A 2-D tensor of shape [bw_num_units, bw_output_size]. * * 26: The backward cell-to-input weights. Optional. * A 1-D tensor of shape [bw_num_units]. * * 27: The backward cell-to-forget weights. Optional. * A 1-D tensor of shape [bw_num_units]. * * 28: The backward cell-to-output weights. Optional. * A 1-D tensor of shape [bw_num_units]. * * 29: The backward input gate bias. Optional. * A 1-D tensor of shape [bw_num_units]. * * 30: The backward forget gate bias. * A 1-D tensor of shape [bw_num_units]. * * 31: The backward cell gate bias. * A 1-D tensor of shape [bw_num_units]. * * 32: The backward output gate bias. * A 1-D tensor of shape [bw_num_units]. * * 33: The backward projection weights. Optional. * A 2-D tensor of shape [bw_output_size, bw_num_units]. * * 34: The backward projection bias. Optional. * A 1-D tensor of shape [bw_output_size]. * * 35: The forward input activation state. * A 2-D tensor of shape [batch_size, bw_output_size]. * * 36: The forward input cell state. * A 2-D tensor of shape [batch_size, bw_num_units]. * * 37: The backward input activation state. * A 2-D tensor of shape [batch_size, bw_output_size]. * * 38: The backward input cell state. * A 2-D tensor of shape [batch_size, bw_num_units]. * * 39: The auxiliary input. Optional. * A 3-D tensor of shape [max_time, batch_size, aux_input_size], * where “batch_size” corresponds to the batching dimension, and * “aux_input_size” is the size of the auxiliary input. Optional. See * the docs above for the usage modes explanation. * * 40: The forward auxiliary input-to-input weights. * Optional. See the docs above for the usage modes explanation. * A 2-D tensor of shape [fw_num_units, aux_input_size]. * * 41: The forward auxiliary input-to-forget weights. * Optional. See the docs above for the usage modes explanation. * A 2-D tensor of shape [fw_num_units, aux_input_size]. * * 42: The forward auxiliary input-to-cell weights. * Optional. See the docs above for the usage modes explanation. * A 2-D tensor of shape [fw_num_units, aux_input_size]. * * 43: The forward auxiliary input-to-output weights. * Optional. See the docs above for the usage modes explanation. * A 2-D tensor of shape [fw_num_units, aux_input_size]. * * 44: The backward auxiliary input-to-input weights. * Optional. See the docs above for the usage modes explanation. * A 2-D tensor of shape [bw_num_units, aux_input_size]. * * 45: The backward auxiliary input-to-forget weights. * Optional. See the docs above for the usage modes explanation. * A 2-D tensor of shape [bw_num_units, aux_input_size]. * * 46: The backward auxiliary input-to-cell weights. * Optional. See the docs above for the usage modes explanation. * A 2-D tensor of shape [bw_num_units, aux_input_size]. * * 47: The backward auxiliary input-to-output weights. * Optional. See the docs above for the usage modes explanation. * A 2-D tensor of shape [bw_num_units, aux_input_size]. * * 48: The activation function. * A value indicating the activation function: *
*
• 0: None; *
• 1: Relu; *
• 3: Relu6; *
• 4: Tanh; *
• 6: Sigmoid. *