blob: e0400e1f2bfab2dca76eaf00aa77e6c62b7f8a5c [file] [log] [blame]
------ ArmNN for Android NNAPI supported operations ------
This release of ArmNN for Android supports use as a driver for the Android Neural Networks API. It implements the
android.hardware.neuralnetworks@1.0, android.hardware.neuralnetworks@1.1, android.hardware.neuralnetworks@1.2 and
android.hardware.neuralnetworks@1.3
HAL interfaces.
For more information on the Android Neural Networks API, see https://developer.android.com/ndk/guides/neuralnetworks/index.html
For integration and usage documentation, please see README.md.
--- Support for Android Neural Networks HAL operations ---
The following AndroidNN HAL 1.0, 1.1, 1.2 and 1.3 operations are currently supported:
AndroidNN operator Tensor type supported
ABS (FLOAT32)
ADD (FLOAT32, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED)
ARGMAX (FLOAT32, INT32, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED)
ARGMIN (FLOAT32, INT32, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED)
AVERAGE_POOL_2D (FLOAT32, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED)
BATCH_TO_SPACE_ND (FLOAT32, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED)
CONCATENATION (FLOAT32, FLOAT16, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED)
CONV_2D (FLOAT32, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED)
DEPTH_TO_SPACE (FLOAT32, FLOAT16, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED)
DEPTHWISE_CONV_2D (FLOAT32, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED)
DEQUANTIZE (FLOAT32 (output only), QUANT8_ASYMM and QUANT8_ASYMM_SIGNED (input only))
DIV (FLOAT32, QUANT8_ASYMM)
ELU (FLOAT32, QUANT8_ASYMM)
EQUAL (FLOAT32, INT32, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED)
EXP (FLOAT32, FLOAT16)
EXPAND_DIMS (FLOAT32, FLOAT16, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED)
FILL (FLOAT32, FLOAT16, INT32)
FLOOR (FLOAT32)
FULLY_CONNECTED (FLOAT32, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED)
GREATER (FLOAT32, INT32, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED)
GREATER_EQUAL (FLOAT32, INT32, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED)
GROUPED_CONV_2D (FLOAT32, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED)
HARD_SWISH (FLOAT32, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED)
INSTANCE_NORMALIZATION (FLOAT32)
L2_NORMALIZATION (FLOAT32)
L2_POOL_2D (FLOAT32, QUANT8_ASYMM)
LESS (FLOAT32, INT32, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED)
LESS_EQUAL (FLOAT32, INT32, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED)
LOCAL_RESPONSE_NORMALIZATION (FLOAT32)
LOGICAL_AND (BOOL8)
LOGICAL_NOT (BOOL8)
LOGICAL_OR (BOOL8)
LOGISTIC (FLOAT32, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED)
LOG_SOFTMAX (FLOAT32)
LSTM (FLOAT32)
MAXIMUM (FLOAT32, INT32, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED)
MAX_POOL_2D (FLOAT32, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED)
MEAN (FLOAT32, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED)
MINIMUM (FLOAT32, INT32, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED)
MUL (FLOAT32, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED)
NEG (FLOAT32)
NOT_EQUAL (FLOAT32, INT32, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED)
PAD (FLOAT32, FLOAT16, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED)
PAD_V2 (FLOAT32, FLOAT16, QUANT8_ASYMM)
PRELU (FLOAT32, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED)
QUANTIZE (FLOAT32 (input only), QUANT8_ASYMM and QUANT8_ASYMM_SIGNED (output only))
QUANTIZED_16BIT_LSTM (QUANT8_ASYMM)
QUANTIZED_LSTM (QUANT8_ASYMM)
RELU (FLOAT32, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED)
RELU1 (FLOAT32, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED)
RELU6 (FLOAT32, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED)
RESHAPE (FLOAT32, FLOAT16, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED)
RESIZE_BILINEAR (FLOAT32, QUANT8_ASYMM)
RESIZE_NEAREST_NEIGHBOR (FLOAT32, QUANT8_ASYMM)
RSQRT (FLOAT32)
SOFTMAX (FLOAT32, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED)
SPACE_TO_BATCH_ND (FLOAT32, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED)
SPACE_TO_DEPTH (FLOAT32, FLOAT16, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED)
SQRT (FLOAT32)
SQUEEZE (FLOAT32, FLOAT16, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED)
STRIDED_SLICE (FLOAT32, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED)
SUB (FLOAT32, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED)
TANH (FLOAT32, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED)
TRANSPOSE (FLOAT32, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED)
TRANSPOSE_CONV_2D (FLOAT32, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED)
Where operations are not supported by the ArmNN Android NN Driver, the driver indicates this to the framework
appropriately and the framework implements those operations using a CPU implementation.
NOTE: By convention, only those tensor types have been listed above, which are fully supported across all
ArmNN backends.
- FLOAT16 input tensors are partially supported on most HAL 1.2 operators on the GpuAcc and
CpuRef backends, however not on CpuAcc.