Use run-time feature detection for Neon compound convolve x
Arm Neon DotProd and I8MM implementations of av1_dist_wtd_convolve_x
currently need to be enabled at compile time since they're guarded by
ifdef feature macros. Now that run-time feature detection has been
enabled for Arm platforms, expose these implementations with distinct
*neon_dotprod/*neon_i8mm names in separate files and wire them up to
CMake and rtcd.pl. Also add new test cases for the new DotProd and
I8MM functions.
Change-Id: I5df949a9f00a443430422a3da0f3d4b522eefad6
diff --git a/av1/av1.cmake b/av1/av1.cmake
index 717c197..6d6da1d 100644
--- a/av1/av1.cmake
+++ b/av1/av1.cmake
@@ -390,6 +390,12 @@
"${AOM_ROOT}/av1/common/arm/warp_plane_neon.c"
"${AOM_ROOT}/av1/common/arm/wiener_convolve_neon.c")
+list(APPEND AOM_AV1_COMMON_INTRIN_NEON_DOTPROD
+ "${AOM_ROOT}/av1/common/arm/compound_convolve_neon_dotprod.c")
+
+list(APPEND AOM_AV1_COMMON_INTRIN_NEON_I8MM
+ "${AOM_ROOT}/av1/common/arm/compound_convolve_neon_i8mm.c")
+
list(APPEND AOM_AV1_ENCODER_INTRIN_SSE4_2
"${AOM_ROOT}/av1/encoder/x86/hash_sse42.c")
@@ -640,6 +646,22 @@
"AOM_AV1_COMMON_INTRIN_NEON")
endif()
+ if(HAVE_NEON_DOTPROD)
+ if(AOM_AV1_COMMON_INTRIN_NEON_DOTPROD)
+ add_intrinsics_object_library("${AOM_NEON_DOTPROD_FLAG}" "neon_dotprod"
+ "aom_av1_common"
+ "AOM_AV1_COMMON_INTRIN_NEON_DOTPROD")
+ endif()
+ endif()
+
+ if(HAVE_NEON_I8MM)
+ if(AOM_AV1_COMMON_INTRIN_NEON_I8MM)
+ add_intrinsics_object_library("${AOM_NEON_I8MM_FLAG}" "neon_i8mm"
+ "aom_av1_common"
+ "AOM_AV1_COMMON_INTRIN_NEON_I8MM")
+ endif()
+ endif()
+
if(CONFIG_AV1_ENCODER)
if(AOM_AV1_ENCODER_INTRIN_NEON)
add_intrinsics_object_library("${AOM_NEON_INTRIN_FLAG}" "neon"
diff --git a/av1/common/arm/compound_convolve_neon.c b/av1/common/arm/compound_convolve_neon.c
index effa67b..63f0a57 100644
--- a/av1/common/arm/compound_convolve_neon.c
+++ b/av1/common/arm/compound_convolve_neon.c
@@ -12,15 +12,12 @@
#include <arm_neon.h>
#include <assert.h>
-#include "config/aom_config.h"
-#include "config/av1_rtcd.h"
-
-#include "aom_dsp/txfm_common.h"
#include "aom_dsp/arm/mem_neon.h"
#include "aom_dsp/arm/transpose_neon.h"
-#include "aom_ports/mem.h"
-#include "av1/common/common.h"
+#include "av1/common/arm/compound_convolve_neon.h"
#include "av1/common/arm/convolve_neon.h"
+#include "config/aom_config.h"
+#include "config/av1_rtcd.h"
#if !AOM_ARCH_AARCH64
static INLINE void compute_dist_wtd_avg_4x1(uint16x4_t dd0, uint16x4_t d0,
@@ -82,127 +79,6 @@
#endif // !AOM_ARCH_AARCH64
-static INLINE void compute_dist_wtd_avg_4x4(
- uint16x4_t dd0, uint16x4_t dd1, uint16x4_t dd2, uint16x4_t dd3,
- uint16x4_t d0, uint16x4_t d1, uint16x4_t d2, uint16x4_t d3,
- const uint16_t fwd_offset, const uint16_t bck_offset,
- const int16x8_t round_offset, uint8x8_t *d01_u8, uint8x8_t *d23_u8) {
- uint32x4_t blend0 = vmull_n_u16(dd0, fwd_offset);
- blend0 = vmlal_n_u16(blend0, d0, bck_offset);
- uint32x4_t blend1 = vmull_n_u16(dd1, fwd_offset);
- blend1 = vmlal_n_u16(blend1, d1, bck_offset);
- uint32x4_t blend2 = vmull_n_u16(dd2, fwd_offset);
- blend2 = vmlal_n_u16(blend2, d2, bck_offset);
- uint32x4_t blend3 = vmull_n_u16(dd3, fwd_offset);
- blend3 = vmlal_n_u16(blend3, d3, bck_offset);
-
- uint16x4_t avg0 = vshrn_n_u32(blend0, DIST_PRECISION_BITS);
- uint16x4_t avg1 = vshrn_n_u32(blend1, DIST_PRECISION_BITS);
- uint16x4_t avg2 = vshrn_n_u32(blend2, DIST_PRECISION_BITS);
- uint16x4_t avg3 = vshrn_n_u32(blend3, DIST_PRECISION_BITS);
-
- int16x8_t dst_01 = vreinterpretq_s16_u16(vcombine_u16(avg0, avg1));
- int16x8_t dst_23 = vreinterpretq_s16_u16(vcombine_u16(avg2, avg3));
-
- dst_01 = vsubq_s16(dst_01, round_offset);
- dst_23 = vsubq_s16(dst_23, round_offset);
-
- *d01_u8 = vqrshrun_n_s16(dst_01, FILTER_BITS - ROUND0_BITS);
- *d23_u8 = vqrshrun_n_s16(dst_23, FILTER_BITS - ROUND0_BITS);
-}
-
-static INLINE void compute_basic_avg_4x4(uint16x4_t dd0, uint16x4_t dd1,
- uint16x4_t dd2, uint16x4_t dd3,
- uint16x4_t d0, uint16x4_t d1,
- uint16x4_t d2, uint16x4_t d3,
- const int16x8_t round_offset,
- uint8x8_t *d01_u8, uint8x8_t *d23_u8) {
- uint16x4_t avg0 = vhadd_u16(dd0, d0);
- uint16x4_t avg1 = vhadd_u16(dd1, d1);
- uint16x4_t avg2 = vhadd_u16(dd2, d2);
- uint16x4_t avg3 = vhadd_u16(dd3, d3);
-
- int16x8_t dst_01 = vreinterpretq_s16_u16(vcombine_u16(avg0, avg1));
- int16x8_t dst_23 = vreinterpretq_s16_u16(vcombine_u16(avg2, avg3));
-
- dst_01 = vsubq_s16(dst_01, round_offset);
- dst_23 = vsubq_s16(dst_23, round_offset);
-
- *d01_u8 = vqrshrun_n_s16(dst_01, FILTER_BITS - ROUND0_BITS);
- *d23_u8 = vqrshrun_n_s16(dst_23, FILTER_BITS - ROUND0_BITS);
-}
-
-static INLINE void compute_dist_wtd_avg_8x4(
- uint16x8_t dd0, uint16x8_t dd1, uint16x8_t dd2, uint16x8_t dd3,
- uint16x8_t d0, uint16x8_t d1, uint16x8_t d2, uint16x8_t d3,
- const uint16_t fwd_offset, const uint16_t bck_offset,
- const int16x8_t round_offset, uint8x8_t *d0_u8, uint8x8_t *d1_u8,
- uint8x8_t *d2_u8, uint8x8_t *d3_u8) {
- uint32x4_t blend0_lo = vmull_n_u16(vget_low_u16(dd0), fwd_offset);
- blend0_lo = vmlal_n_u16(blend0_lo, vget_low_u16(d0), bck_offset);
- uint32x4_t blend0_hi = vmull_n_u16(vget_high_u16(dd0), fwd_offset);
- blend0_hi = vmlal_n_u16(blend0_hi, vget_high_u16(d0), bck_offset);
-
- uint32x4_t blend1_lo = vmull_n_u16(vget_low_u16(dd1), fwd_offset);
- blend1_lo = vmlal_n_u16(blend1_lo, vget_low_u16(d1), bck_offset);
- uint32x4_t blend1_hi = vmull_n_u16(vget_high_u16(dd1), fwd_offset);
- blend1_hi = vmlal_n_u16(blend1_hi, vget_high_u16(d1), bck_offset);
-
- uint32x4_t blend2_lo = vmull_n_u16(vget_low_u16(dd2), fwd_offset);
- blend2_lo = vmlal_n_u16(blend2_lo, vget_low_u16(d2), bck_offset);
- uint32x4_t blend2_hi = vmull_n_u16(vget_high_u16(dd2), fwd_offset);
- blend2_hi = vmlal_n_u16(blend2_hi, vget_high_u16(d2), bck_offset);
-
- uint32x4_t blend3_lo = vmull_n_u16(vget_low_u16(dd3), fwd_offset);
- blend3_lo = vmlal_n_u16(blend3_lo, vget_low_u16(d3), bck_offset);
- uint32x4_t blend3_hi = vmull_n_u16(vget_high_u16(dd3), fwd_offset);
- blend3_hi = vmlal_n_u16(blend3_hi, vget_high_u16(d3), bck_offset);
-
- uint16x8_t avg0 = vcombine_u16(vshrn_n_u32(blend0_lo, DIST_PRECISION_BITS),
- vshrn_n_u32(blend0_hi, DIST_PRECISION_BITS));
- uint16x8_t avg1 = vcombine_u16(vshrn_n_u32(blend1_lo, DIST_PRECISION_BITS),
- vshrn_n_u32(blend1_hi, DIST_PRECISION_BITS));
- uint16x8_t avg2 = vcombine_u16(vshrn_n_u32(blend2_lo, DIST_PRECISION_BITS),
- vshrn_n_u32(blend2_hi, DIST_PRECISION_BITS));
- uint16x8_t avg3 = vcombine_u16(vshrn_n_u32(blend3_lo, DIST_PRECISION_BITS),
- vshrn_n_u32(blend3_hi, DIST_PRECISION_BITS));
-
- int16x8_t dst0 = vsubq_s16(vreinterpretq_s16_u16(avg0), round_offset);
- int16x8_t dst1 = vsubq_s16(vreinterpretq_s16_u16(avg1), round_offset);
- int16x8_t dst2 = vsubq_s16(vreinterpretq_s16_u16(avg2), round_offset);
- int16x8_t dst3 = vsubq_s16(vreinterpretq_s16_u16(avg3), round_offset);
-
- *d0_u8 = vqrshrun_n_s16(dst0, FILTER_BITS - ROUND0_BITS);
- *d1_u8 = vqrshrun_n_s16(dst1, FILTER_BITS - ROUND0_BITS);
- *d2_u8 = vqrshrun_n_s16(dst2, FILTER_BITS - ROUND0_BITS);
- *d3_u8 = vqrshrun_n_s16(dst3, FILTER_BITS - ROUND0_BITS);
-}
-
-static INLINE void compute_basic_avg_8x4(uint16x8_t dd0, uint16x8_t dd1,
- uint16x8_t dd2, uint16x8_t dd3,
- uint16x8_t d0, uint16x8_t d1,
- uint16x8_t d2, uint16x8_t d3,
- const int16x8_t round_offset,
- uint8x8_t *d0_u8, uint8x8_t *d1_u8,
- uint8x8_t *d2_u8, uint8x8_t *d3_u8) {
- uint16x8_t avg0, avg1, avg2, avg3;
-
- avg0 = vhaddq_u16(dd0, d0);
- avg1 = vhaddq_u16(dd1, d1);
- avg2 = vhaddq_u16(dd2, d2);
- avg3 = vhaddq_u16(dd3, d3);
-
- int16x8_t dst0 = vsubq_s16(vreinterpretq_s16_u16(avg0), round_offset);
- int16x8_t dst1 = vsubq_s16(vreinterpretq_s16_u16(avg1), round_offset);
- int16x8_t dst2 = vsubq_s16(vreinterpretq_s16_u16(avg2), round_offset);
- int16x8_t dst3 = vsubq_s16(vreinterpretq_s16_u16(avg3), round_offset);
-
- *d0_u8 = vqrshrun_n_s16(dst0, FILTER_BITS - ROUND0_BITS);
- *d1_u8 = vqrshrun_n_s16(dst1, FILTER_BITS - ROUND0_BITS);
- *d2_u8 = vqrshrun_n_s16(dst2, FILTER_BITS - ROUND0_BITS);
- *d3_u8 = vqrshrun_n_s16(dst3, FILTER_BITS - ROUND0_BITS);
-}
-
#if AOM_ARCH_AARCH64 && defined(__ARM_FEATURE_MATMUL_INT8)
static INLINE int16x4_t convolve4_4_2d_h(uint8x16_t samples,
@@ -2081,780 +1957,6 @@
}
}
-#if AOM_ARCH_AARCH64 && defined(__ARM_FEATURE_MATMUL_INT8)
-
-static INLINE uint16x4_t convolve4_4_x(uint8x16_t samples,
- const int8x8_t x_filter,
- const uint8x16_t permute_tbl,
- const int32x4_t round_offset) {
- // Permute samples ready for dot product.
- // { 0, 1, 2, 3, 1, 2, 3, 4, 2, 3, 4, 5, 3, 4, 5, 6 }
- uint8x16_t permuted_samples = vqtbl1q_u8(samples, permute_tbl);
-
- // First 4 output values.
- int32x4_t sum = vusdotq_lane_s32(round_offset, permuted_samples, x_filter, 0);
-
- // We halved the convolution filter values so -1 from the right shift.
- return vreinterpret_u16_s16(vshrn_n_s32(sum, ROUND0_BITS - 1));
-}
-
-static INLINE uint16x8_t convolve8_8_x(uint8x16_t samples,
- const int8x8_t x_filter,
- const uint8x16x3_t permute_tbl,
- const int32x4_t round_offset) {
- uint8x16_t permuted_samples[3];
- int32x4_t sum[2];
-
- // Permute samples ready for dot product.
- // { 0, 1, 2, 3, 1, 2, 3, 4, 2, 3, 4, 5, 3, 4, 5, 6 }
- permuted_samples[0] = vqtbl1q_u8(samples, permute_tbl.val[0]);
- // { 4, 5, 6, 7, 5, 6, 7, 8, 6, 7, 8, 9, 7, 8, 9, 10 }
- permuted_samples[1] = vqtbl1q_u8(samples, permute_tbl.val[1]);
- // { 8, 9, 10, 11, 9, 10, 11, 12, 10, 11, 12, 13, 11, 12, 13, 14 }
- permuted_samples[2] = vqtbl1q_u8(samples, permute_tbl.val[2]);
-
- // First 4 output values.
- sum[0] = vusdotq_lane_s32(round_offset, permuted_samples[0], x_filter, 0);
- sum[0] = vusdotq_lane_s32(sum[0], permuted_samples[1], x_filter, 1);
- // Second 4 output values.
- sum[1] = vusdotq_lane_s32(round_offset, permuted_samples[1], x_filter, 0);
- sum[1] = vusdotq_lane_s32(sum[1], permuted_samples[2], x_filter, 1);
-
- // Narrow and re-pack.
- // We halved the convolution filter values so -1 from the right shift.
- int16x8_t res = vcombine_s16(vshrn_n_s32(sum[0], ROUND0_BITS - 1),
- vshrn_n_s32(sum[1], ROUND0_BITS - 1));
- return vreinterpretq_u16_s16(res);
-}
-
-static INLINE void dist_wtd_convolve_x_dist_wtd_avg_neon(
- const uint8_t *src, int src_stride, uint8_t *dst8, int dst8_stride, int w,
- int h, const InterpFilterParams *filter_params_x, const int subpel_x_qn,
- ConvolveParams *conv_params) {
- assert(w % 4 == 0);
- assert(h % 4 == 0);
-
- const int bd = 8;
- const int offset_bits = bd + 2 * FILTER_BITS - ROUND0_BITS;
- const int16_t round_offset = (1 << (offset_bits - COMPOUND_ROUND1_BITS)) +
- (1 << (offset_bits - COMPOUND_ROUND1_BITS - 1));
- const int16x8_t round_offset_vec = vdupq_n_s16(round_offset);
- // A shim of 1 << ((ROUND0_BITS - 1) - 1) enables us to use non-rounding
- // shifts - which are generally faster than rounding shifts on modern CPUs.
- // (The extra -1 is needed because we halved the filter values.)
- const int32x4_t round_offset_shim = vdupq_n_s32(
- (round_offset << (ROUND0_BITS - 1)) + (1 << ((ROUND0_BITS - 1) - 1)));
-
- const uint16_t fwd_offset = conv_params->fwd_offset;
- const uint16_t bck_offset = conv_params->bck_offset;
-
- // Horizontal filter.
- const int16_t *x_filter_ptr = av1_get_interp_filter_subpel_kernel(
- filter_params_x, subpel_x_qn & SUBPEL_MASK);
-
- const int horiz_offset = filter_params_x->taps / 2 - 1;
- const uint8_t *src_ptr = src - horiz_offset;
- CONV_BUF_TYPE *dst_ptr = conv_params->dst;
- uint8_t *dst8_ptr = dst8;
- int dst_stride = conv_params->dst_stride;
- int height = h;
-
- if (w == 4) {
- const uint8x16_t permute_tbl = vld1q_u8(dot_prod_permute_tbl);
- // 4-tap filters are used for blocks having width <= 4.
- // Filter values are even, so halve to reduce intermediate precision reqs.
- const int8x8_t x_filter =
- vshrn_n_s16(vcombine_s16(vld1_s16(x_filter_ptr + 2), vdup_n_s16(0)), 1);
-
- src_ptr += 2;
-
- do {
- uint8x16_t s0, s1, s2, s3;
- load_u8_16x4(src_ptr, src_stride, &s0, &s1, &s2, &s3);
-
- uint16x4_t d0 =
- convolve4_4_x(s0, x_filter, permute_tbl, round_offset_shim);
- uint16x4_t d1 =
- convolve4_4_x(s1, x_filter, permute_tbl, round_offset_shim);
- uint16x4_t d2 =
- convolve4_4_x(s2, x_filter, permute_tbl, round_offset_shim);
- uint16x4_t d3 =
- convolve4_4_x(s3, x_filter, permute_tbl, round_offset_shim);
-
- uint16x4_t dd0, dd1, dd2, dd3;
- load_u16_4x4(dst_ptr, dst_stride, &dd0, &dd1, &dd2, &dd3);
-
- uint8x8_t d01_u8, d23_u8;
- compute_dist_wtd_avg_4x4(dd0, dd1, dd2, dd3, d0, d1, d2, d3, fwd_offset,
- bck_offset, round_offset_vec, &d01_u8, &d23_u8);
-
- store_u8_4x1(dst8_ptr + 0 * dst8_stride, d01_u8, 0);
- store_u8_4x1(dst8_ptr + 1 * dst8_stride, d01_u8, 1);
- store_u8_4x1(dst8_ptr + 2 * dst8_stride, d23_u8, 0);
- store_u8_4x1(dst8_ptr + 3 * dst8_stride, d23_u8, 1);
-
- src_ptr += 4 * src_stride;
- dst_ptr += 4 * dst_stride;
- dst8_ptr += 4 * dst8_stride;
- height -= 4;
- } while (height != 0);
- } else {
- const uint8x16x3_t permute_tbl = vld1q_u8_x3(dot_prod_permute_tbl);
- // Filter values are even, so halve to reduce intermediate precision reqs.
- const int8x8_t x_filter = vshrn_n_s16(vld1q_s16(x_filter_ptr), 1);
-
- do {
- const uint8_t *s = src_ptr;
- CONV_BUF_TYPE *d = dst_ptr;
- uint8_t *d_u8 = dst8_ptr;
- int width = w;
-
- do {
- uint8x16_t s0, s1, s2, s3;
- load_u8_16x4(s, src_stride, &s0, &s1, &s2, &s3);
-
- uint16x8_t d0 =
- convolve8_8_x(s0, x_filter, permute_tbl, round_offset_shim);
- uint16x8_t d1 =
- convolve8_8_x(s1, x_filter, permute_tbl, round_offset_shim);
- uint16x8_t d2 =
- convolve8_8_x(s2, x_filter, permute_tbl, round_offset_shim);
- uint16x8_t d3 =
- convolve8_8_x(s3, x_filter, permute_tbl, round_offset_shim);
-
- uint16x8_t dd0, dd1, dd2, dd3;
- load_u16_8x4(d, dst_stride, &dd0, &dd1, &dd2, &dd3);
-
- uint8x8_t d0_u8, d1_u8, d2_u8, d3_u8;
- compute_dist_wtd_avg_8x4(dd0, dd1, dd2, dd3, d0, d1, d2, d3, fwd_offset,
- bck_offset, round_offset_vec, &d0_u8, &d1_u8,
- &d2_u8, &d3_u8);
-
- store_u8_8x4(d_u8, dst8_stride, d0_u8, d1_u8, d2_u8, d3_u8);
-
- s += 8;
- d += 8;
- d_u8 += 8;
- width -= 8;
- } while (width != 0);
- src_ptr += 4 * src_stride;
- dst_ptr += 4 * dst_stride;
- dst8_ptr += 4 * dst8_stride;
- height -= 4;
- } while (height != 0);
- }
-}
-
-static INLINE void dist_wtd_convolve_x_avg_neon(
- const uint8_t *src, int src_stride, uint8_t *dst8, int dst8_stride, int w,
- int h, const InterpFilterParams *filter_params_x, const int subpel_x_qn,
- ConvolveParams *conv_params) {
- assert(w % 4 == 0);
- assert(h % 4 == 0);
-
- const int bd = 8;
- const int offset_bits = bd + 2 * FILTER_BITS - ROUND0_BITS;
- const int16_t round_offset = (1 << (offset_bits - COMPOUND_ROUND1_BITS)) +
- (1 << (offset_bits - COMPOUND_ROUND1_BITS - 1));
- const int16x8_t round_offset_vec = vdupq_n_s16(round_offset);
- // A shim of 1 << ((ROUND0_BITS - 1) - 1) enables us to use non-rounding
- // shifts - which are generally faster than rounding shifts on modern CPUs.
- // (The extra -1 is needed because we halved the filter values.)
- const int32x4_t round_offset_shim = vdupq_n_s32(
- (round_offset << (ROUND0_BITS - 1)) + (1 << ((ROUND0_BITS - 1) - 1)));
-
- // Horizontal filter.
- const int16_t *x_filter_ptr = av1_get_interp_filter_subpel_kernel(
- filter_params_x, subpel_x_qn & SUBPEL_MASK);
-
- const int horiz_offset = filter_params_x->taps / 2 - 1;
- const uint8_t *src_ptr = src - horiz_offset;
- CONV_BUF_TYPE *dst_ptr = conv_params->dst;
- uint8_t *dst8_ptr = dst8;
- int dst_stride = conv_params->dst_stride;
- int height = h;
-
- if (w == 4) {
- const uint8x16_t permute_tbl = vld1q_u8(dot_prod_permute_tbl);
- // 4-tap filters are used for blocks having width <= 4.
- // Filter values are even, so halve to reduce intermediate precision reqs.
- const int8x8_t x_filter =
- vshrn_n_s16(vcombine_s16(vld1_s16(x_filter_ptr + 2), vdup_n_s16(0)), 1);
-
- src_ptr += 2;
-
- do {
- uint8x16_t s0, s1, s2, s3;
- load_u8_16x4(src_ptr, src_stride, &s0, &s1, &s2, &s3);
-
- uint16x4_t d0 =
- convolve4_4_x(s0, x_filter, permute_tbl, round_offset_shim);
- uint16x4_t d1 =
- convolve4_4_x(s1, x_filter, permute_tbl, round_offset_shim);
- uint16x4_t d2 =
- convolve4_4_x(s2, x_filter, permute_tbl, round_offset_shim);
- uint16x4_t d3 =
- convolve4_4_x(s3, x_filter, permute_tbl, round_offset_shim);
-
- uint16x4_t dd0, dd1, dd2, dd3;
- load_u16_4x4(dst_ptr, dst_stride, &dd0, &dd1, &dd2, &dd3);
-
- uint8x8_t d01_u8, d23_u8;
- compute_basic_avg_4x4(dd0, dd1, dd2, dd3, d0, d1, d2, d3,
- round_offset_vec, &d01_u8, &d23_u8);
-
- store_u8_4x1(dst8_ptr + 0 * dst8_stride, d01_u8, 0);
- store_u8_4x1(dst8_ptr + 1 * dst8_stride, d01_u8, 1);
- store_u8_4x1(dst8_ptr + 2 * dst8_stride, d23_u8, 0);
- store_u8_4x1(dst8_ptr + 3 * dst8_stride, d23_u8, 1);
-
- src_ptr += 4 * src_stride;
- dst_ptr += 4 * dst_stride;
- dst8_ptr += 4 * dst8_stride;
- height -= 4;
- } while (height != 0);
- } else {
- const uint8x16x3_t permute_tbl = vld1q_u8_x3(dot_prod_permute_tbl);
- // Filter values are even, so halve to reduce intermediate precision reqs.
- const int8x8_t x_filter = vshrn_n_s16(vld1q_s16(x_filter_ptr), 1);
-
- do {
- const uint8_t *s = src_ptr;
- CONV_BUF_TYPE *d = dst_ptr;
- uint8_t *d_u8 = dst8_ptr;
- int width = w;
-
- do {
- uint8x16_t s0, s1, s2, s3;
- load_u8_16x4(s, src_stride, &s0, &s1, &s2, &s3);
-
- uint16x8_t d0 =
- convolve8_8_x(s0, x_filter, permute_tbl, round_offset_shim);
- uint16x8_t d1 =
- convolve8_8_x(s1, x_filter, permute_tbl, round_offset_shim);
- uint16x8_t d2 =
- convolve8_8_x(s2, x_filter, permute_tbl, round_offset_shim);
- uint16x8_t d3 =
- convolve8_8_x(s3, x_filter, permute_tbl, round_offset_shim);
-
- uint16x8_t dd0, dd1, dd2, dd3;
- load_u16_8x4(d, dst_stride, &dd0, &dd1, &dd2, &dd3);
-
- uint8x8_t d0_u8, d1_u8, d2_u8, d3_u8;
- compute_basic_avg_8x4(dd0, dd1, dd2, dd3, d0, d1, d2, d3,
- round_offset_vec, &d0_u8, &d1_u8, &d2_u8, &d3_u8);
-
- store_u8_8x4(d_u8, dst8_stride, d0_u8, d1_u8, d2_u8, d3_u8);
-
- s += 8;
- d += 8;
- d_u8 += 8;
- width -= 8;
- } while (width != 0);
- src_ptr += 4 * src_stride;
- dst_ptr += 4 * dst_stride;
- dst8_ptr += 4 * dst8_stride;
- height -= 4;
- } while (height != 0);
- }
-}
-
-static INLINE void dist_wtd_convolve_x_neon(
- const uint8_t *src, int src_stride, int w, int h,
- const InterpFilterParams *filter_params_x, const int subpel_x_qn,
- ConvolveParams *conv_params) {
- assert(w % 4 == 0);
- assert(h % 4 == 0);
-
- const int bd = 8;
- const int offset_bits = bd + 2 * FILTER_BITS - ROUND0_BITS;
- const int16_t round_offset = (1 << (offset_bits - COMPOUND_ROUND1_BITS)) +
- (1 << (offset_bits - COMPOUND_ROUND1_BITS - 1));
- // A shim of 1 << ((ROUND0_BITS - 1) - 1) enables us to use non-rounding
- // shifts - which are generally faster than rounding shifts on modern CPUs.
- // (The extra -1 is needed because we halved the filter values.)
- const int32x4_t round_offset_shim = vdupq_n_s32(
- (round_offset << (ROUND0_BITS - 1)) + (1 << ((ROUND0_BITS - 1) - 1)));
-
- // Horizontal filter.
- const int16_t *x_filter_ptr = av1_get_interp_filter_subpel_kernel(
- filter_params_x, subpel_x_qn & SUBPEL_MASK);
-
- const int horiz_offset = filter_params_x->taps / 2 - 1;
- const uint8_t *src_ptr = src - horiz_offset;
- CONV_BUF_TYPE *dst_ptr = conv_params->dst;
- int dst_stride = conv_params->dst_stride;
- int height = h;
-
- if (w == 4) {
- const uint8x16_t permute_tbl = vld1q_u8(dot_prod_permute_tbl);
- // 4-tap filters are used for blocks having width <= 4.
- // Filter values are even, so halve to reduce intermediate precision reqs.
- const int8x8_t x_filter =
- vshrn_n_s16(vcombine_s16(vld1_s16(x_filter_ptr + 2), vdup_n_s16(0)), 1);
-
- src_ptr += 2;
-
- do {
- uint8x16_t s0, s1, s2, s3;
- load_u8_16x4(src_ptr, src_stride, &s0, &s1, &s2, &s3);
-
- uint16x4_t d0 =
- convolve4_4_x(s0, x_filter, permute_tbl, round_offset_shim);
- uint16x4_t d1 =
- convolve4_4_x(s1, x_filter, permute_tbl, round_offset_shim);
- uint16x4_t d2 =
- convolve4_4_x(s2, x_filter, permute_tbl, round_offset_shim);
- uint16x4_t d3 =
- convolve4_4_x(s3, x_filter, permute_tbl, round_offset_shim);
-
- store_u16_4x4(dst_ptr, dst_stride, d0, d1, d2, d3);
-
- src_ptr += 4 * src_stride;
- dst_ptr += 4 * dst_stride;
- height -= 4;
- } while (height != 0);
- } else {
- const uint8x16x3_t permute_tbl = vld1q_u8_x3(dot_prod_permute_tbl);
- // Filter values are even, so halve to reduce intermediate precision reqs.
- const int8x8_t x_filter = vshrn_n_s16(vld1q_s16(x_filter_ptr), 1);
-
- do {
- const uint8_t *s = src_ptr;
- CONV_BUF_TYPE *d = dst_ptr;
- int width = w;
-
- do {
- uint8x16_t s0, s1, s2, s3;
- load_u8_16x4(s, src_stride, &s0, &s1, &s2, &s3);
-
- uint16x8_t d0 =
- convolve8_8_x(s0, x_filter, permute_tbl, round_offset_shim);
- uint16x8_t d1 =
- convolve8_8_x(s1, x_filter, permute_tbl, round_offset_shim);
- uint16x8_t d2 =
- convolve8_8_x(s2, x_filter, permute_tbl, round_offset_shim);
- uint16x8_t d3 =
- convolve8_8_x(s3, x_filter, permute_tbl, round_offset_shim);
-
- store_u16_8x4(d, dst_stride, d0, d1, d2, d3);
-
- s += 8;
- d += 8;
- width -= 8;
- } while (width != 0);
- src_ptr += 4 * src_stride;
- dst_ptr += 4 * dst_stride;
- height -= 4;
- } while (height != 0);
- }
-}
-
-#elif AOM_ARCH_AARCH64 && defined(__ARM_FEATURE_DOTPROD)
-
-static INLINE uint16x4_t convolve4_4_x(uint8x16_t samples,
- const int8x8_t x_filter,
- const int32x4_t correction,
- const uint8x16_t range_limit,
- const uint8x16_t permute_tbl) {
- // Clamp sample range to [-128, 127] for 8-bit signed dot product.
- int8x16_t clamped_samples =
- vreinterpretq_s8_u8(vsubq_u8(samples, range_limit));
-
- // Permute samples ready for dot product.
- // { 0, 1, 2, 3, 1, 2, 3, 4, 2, 3, 4, 5, 3, 4, 5, 6 }
- int8x16_t permuted_samples = vqtbl1q_s8(clamped_samples, permute_tbl);
-
- // Accumulate dot product into 'correction' to account for range clamp.
- int32x4_t sum = vdotq_lane_s32(correction, permuted_samples, x_filter, 0);
-
- // We halved the convolution filter values so -1 from the right shift.
- return vreinterpret_u16_s16(vshrn_n_s32(sum, ROUND0_BITS - 1));
-}
-
-static INLINE uint16x8_t convolve8_8_x(uint8x16_t samples,
- const int8x8_t x_filter,
- const int32x4_t correction,
- const uint8x16_t range_limit,
- const uint8x16x3_t permute_tbl) {
- int8x16_t clamped_samples, permuted_samples[3];
- int32x4_t sum[2];
-
- // Clamp sample range to [-128, 127] for 8-bit signed dot product.
- clamped_samples = vreinterpretq_s8_u8(vsubq_u8(samples, range_limit));
-
- // Permute samples ready for dot product. */
- // { 0, 1, 2, 3, 1, 2, 3, 4, 2, 3, 4, 5, 3, 4, 5, 6 }
- permuted_samples[0] = vqtbl1q_s8(clamped_samples, permute_tbl.val[0]);
- // { 4, 5, 6, 7, 5, 6, 7, 8, 6, 7, 8, 9, 7, 8, 9, 10 }
- permuted_samples[1] = vqtbl1q_s8(clamped_samples, permute_tbl.val[1]);
- // { 8, 9, 10, 11, 9, 10, 11, 12, 10, 11, 12, 13, 11, 12, 13, 14 }
- permuted_samples[2] = vqtbl1q_s8(clamped_samples, permute_tbl.val[2]);
-
- // Accumulate dot product into 'correction' to account for range clamp.
- // First 4 output values.
- sum[0] = vdotq_lane_s32(correction, permuted_samples[0], x_filter, 0);
- sum[0] = vdotq_lane_s32(sum[0], permuted_samples[1], x_filter, 1);
- // Second 4 output values.
- sum[1] = vdotq_lane_s32(correction, permuted_samples[1], x_filter, 0);
- sum[1] = vdotq_lane_s32(sum[1], permuted_samples[2], x_filter, 1);
-
- // Narrow and re-pack.
- // We halved the convolution filter values so -1 from the right shift.
- int16x8_t res = vcombine_s16(vshrn_n_s32(sum[0], ROUND0_BITS - 1),
- vshrn_n_s32(sum[1], ROUND0_BITS - 1));
- return vreinterpretq_u16_s16(res);
-}
-
-static INLINE void dist_wtd_convolve_x_dist_wtd_avg_neon(
- const uint8_t *src, int src_stride, uint8_t *dst8, int dst8_stride, int w,
- int h, const InterpFilterParams *filter_params_x, const int subpel_x_qn,
- ConvolveParams *conv_params) {
- assert(w % 4 == 0);
- assert(h % 4 == 0);
-
- const int bd = 8;
- const int offset_bits = bd + 2 * FILTER_BITS - ROUND0_BITS;
- const int16_t round_offset = (1 << (offset_bits - COMPOUND_ROUND1_BITS)) +
- (1 << (offset_bits - COMPOUND_ROUND1_BITS - 1));
- const int16x8_t round_offset_vec = vdupq_n_s16(round_offset);
-
- const uint16_t fwd_offset = conv_params->fwd_offset;
- const uint16_t bck_offset = conv_params->bck_offset;
-
- // Horizontal filter.
- const int16_t *x_filter_ptr = av1_get_interp_filter_subpel_kernel(
- filter_params_x, subpel_x_qn & SUBPEL_MASK);
- const int16x8_t x_filter_s16 = vld1q_s16(x_filter_ptr);
-
- // Dot-product constants and other shims.
- const uint8x16_t range_limit = vdupq_n_u8(128);
- const int32_t correction_s32 =
- vaddlvq_s16(vshlq_n_s16(x_filter_s16, FILTER_BITS - 1));
- // Fold round_offset into the dot-product filter correction constant. The
- // additional shim of 1 << ((ROUND0_BITS - 1) - 1) enables us to use non-
- // rounding shifts - which are generally faster than rounding shifts on
- // modern CPUs. (The extra -1 is needed because we halved the filter values.)
- int32x4_t correction =
- vdupq_n_s32(correction_s32 + (round_offset << (ROUND0_BITS - 1)) +
- (1 << ((ROUND0_BITS - 1) - 1)));
-
- const int horiz_offset = filter_params_x->taps / 2 - 1;
- const uint8_t *src_ptr = src - horiz_offset;
- CONV_BUF_TYPE *dst_ptr = conv_params->dst;
- uint8_t *dst8_ptr = dst8;
- int dst_stride = conv_params->dst_stride;
- int height = h;
-
- if (w == 4) {
- const uint8x16_t permute_tbl = vld1q_u8(dot_prod_permute_tbl);
- // 4-tap filters are used for blocks having width <= 4.
- // Filter values are even, so halve to reduce intermediate precision reqs.
- const int8x8_t x_filter =
- vshrn_n_s16(vcombine_s16(vld1_s16(x_filter_ptr + 2), vdup_n_s16(0)), 1);
-
- src_ptr += 2;
-
- do {
- uint8x16_t s0, s1, s2, s3;
- load_u8_16x4(src_ptr, src_stride, &s0, &s1, &s2, &s3);
-
- uint16x4_t d0 =
- convolve4_4_x(s0, x_filter, correction, range_limit, permute_tbl);
- uint16x4_t d1 =
- convolve4_4_x(s1, x_filter, correction, range_limit, permute_tbl);
- uint16x4_t d2 =
- convolve4_4_x(s2, x_filter, correction, range_limit, permute_tbl);
- uint16x4_t d3 =
- convolve4_4_x(s3, x_filter, correction, range_limit, permute_tbl);
-
- uint16x4_t dd0, dd1, dd2, dd3;
- load_u16_4x4(dst_ptr, dst_stride, &dd0, &dd1, &dd2, &dd3);
-
- uint8x8_t d01_u8, d23_u8;
- compute_dist_wtd_avg_4x4(dd0, dd1, dd2, dd3, d0, d1, d2, d3, fwd_offset,
- bck_offset, round_offset_vec, &d01_u8, &d23_u8);
-
- store_u8_4x1(dst8_ptr + 0 * dst8_stride, d01_u8, 0);
- store_u8_4x1(dst8_ptr + 1 * dst8_stride, d01_u8, 1);
- store_u8_4x1(dst8_ptr + 2 * dst8_stride, d23_u8, 0);
- store_u8_4x1(dst8_ptr + 3 * dst8_stride, d23_u8, 1);
-
- src_ptr += 4 * src_stride;
- dst_ptr += 4 * dst_stride;
- dst8_ptr += 4 * dst8_stride;
- height -= 4;
- } while (height != 0);
- } else {
- const uint8x16x3_t permute_tbl = vld1q_u8_x3(dot_prod_permute_tbl);
- // Filter values are even, so halve to reduce intermediate precision reqs.
- const int8x8_t x_filter = vshrn_n_s16(x_filter_s16, 1);
-
- do {
- const uint8_t *s = src_ptr;
- CONV_BUF_TYPE *d = dst_ptr;
- uint8_t *d_u8 = dst8_ptr;
- int width = w;
-
- do {
- uint8x16_t s0, s1, s2, s3;
- load_u8_16x4(s, src_stride, &s0, &s1, &s2, &s3);
-
- uint16x8_t d0 =
- convolve8_8_x(s0, x_filter, correction, range_limit, permute_tbl);
- uint16x8_t d1 =
- convolve8_8_x(s1, x_filter, correction, range_limit, permute_tbl);
- uint16x8_t d2 =
- convolve8_8_x(s2, x_filter, correction, range_limit, permute_tbl);
- uint16x8_t d3 =
- convolve8_8_x(s3, x_filter, correction, range_limit, permute_tbl);
-
- uint16x8_t dd0, dd1, dd2, dd3;
- load_u16_8x4(d, dst_stride, &dd0, &dd1, &dd2, &dd3);
-
- uint8x8_t d0_u8, d1_u8, d2_u8, d3_u8;
- compute_dist_wtd_avg_8x4(dd0, dd1, dd2, dd3, d0, d1, d2, d3, fwd_offset,
- bck_offset, round_offset_vec, &d0_u8, &d1_u8,
- &d2_u8, &d3_u8);
-
- store_u8_8x4(d_u8, dst8_stride, d0_u8, d1_u8, d2_u8, d3_u8);
-
- s += 8;
- d += 8;
- d_u8 += 8;
- width -= 8;
- } while (width != 0);
- src_ptr += 4 * src_stride;
- dst_ptr += 4 * dst_stride;
- dst8_ptr += 4 * dst8_stride;
- height -= 4;
- } while (height != 0);
- }
-}
-
-static INLINE void dist_wtd_convolve_x_avg_neon(
- const uint8_t *src, int src_stride, uint8_t *dst8, int dst8_stride, int w,
- int h, const InterpFilterParams *filter_params_x, const int subpel_x_qn,
- ConvolveParams *conv_params) {
- assert(w % 4 == 0);
- assert(h % 4 == 0);
-
- const int bd = 8;
- const int offset_bits = bd + 2 * FILTER_BITS - ROUND0_BITS;
- const int16_t round_offset = (1 << (offset_bits - COMPOUND_ROUND1_BITS)) +
- (1 << (offset_bits - COMPOUND_ROUND1_BITS - 1));
- const int16x8_t round_offset_vec = vdupq_n_s16(round_offset);
-
- // Horizontal filter.
- const int16_t *x_filter_ptr = av1_get_interp_filter_subpel_kernel(
- filter_params_x, subpel_x_qn & SUBPEL_MASK);
- const int16x8_t x_filter_s16 = vld1q_s16(x_filter_ptr);
-
- // Dot-product constants and other shims.
- const uint8x16_t range_limit = vdupq_n_u8(128);
- const int32_t correction_s32 =
- vaddlvq_s16(vshlq_n_s16(x_filter_s16, FILTER_BITS - 1));
- // Fold round_offset into the dot-product filter correction constant. The
- // additional shim of 1 << ((ROUND0_BITS - 1) - 1) enables us to use non-
- // rounding shifts - which are generally faster than rounding shifts on
- // modern CPUs. (The extra -1 is needed because we halved the filter values.)
- int32x4_t correction =
- vdupq_n_s32(correction_s32 + (round_offset << (ROUND0_BITS - 1)) +
- (1 << ((ROUND0_BITS - 1) - 1)));
-
- const int horiz_offset = filter_params_x->taps / 2 - 1;
- const uint8_t *src_ptr = src - horiz_offset;
- CONV_BUF_TYPE *dst_ptr = conv_params->dst;
- uint8_t *dst8_ptr = dst8;
- int dst_stride = conv_params->dst_stride;
- int height = h;
-
- if (w == 4) {
- const uint8x16_t permute_tbl = vld1q_u8(dot_prod_permute_tbl);
- // 4-tap filters are used for blocks having width <= 4.
- // Filter values are even, so halve to reduce intermediate precision reqs.
- const int8x8_t x_filter =
- vshrn_n_s16(vcombine_s16(vld1_s16(x_filter_ptr + 2), vdup_n_s16(0)), 1);
-
- src_ptr += 2;
-
- do {
- uint8x16_t s0, s1, s2, s3;
- load_u8_16x4(src_ptr, src_stride, &s0, &s1, &s2, &s3);
-
- uint16x4_t d0 =
- convolve4_4_x(s0, x_filter, correction, range_limit, permute_tbl);
- uint16x4_t d1 =
- convolve4_4_x(s1, x_filter, correction, range_limit, permute_tbl);
- uint16x4_t d2 =
- convolve4_4_x(s2, x_filter, correction, range_limit, permute_tbl);
- uint16x4_t d3 =
- convolve4_4_x(s3, x_filter, correction, range_limit, permute_tbl);
-
- uint16x4_t dd0, dd1, dd2, dd3;
- load_u16_4x4(dst_ptr, dst_stride, &dd0, &dd1, &dd2, &dd3);
-
- uint8x8_t d01_u8, d23_u8;
- compute_basic_avg_4x4(dd0, dd1, dd2, dd3, d0, d1, d2, d3,
- round_offset_vec, &d01_u8, &d23_u8);
-
- store_u8_4x1(dst8_ptr + 0 * dst8_stride, d01_u8, 0);
- store_u8_4x1(dst8_ptr + 1 * dst8_stride, d01_u8, 1);
- store_u8_4x1(dst8_ptr + 2 * dst8_stride, d23_u8, 0);
- store_u8_4x1(dst8_ptr + 3 * dst8_stride, d23_u8, 1);
-
- src_ptr += 4 * src_stride;
- dst_ptr += 4 * dst_stride;
- dst8_ptr += 4 * dst8_stride;
- height -= 4;
- } while (height != 0);
- } else {
- const uint8x16x3_t permute_tbl = vld1q_u8_x3(dot_prod_permute_tbl);
- // Filter values are even, so halve to reduce intermediate precision reqs.
- const int8x8_t x_filter = vshrn_n_s16(x_filter_s16, 1);
-
- do {
- const uint8_t *s = src_ptr;
- CONV_BUF_TYPE *d = dst_ptr;
- uint8_t *d_u8 = dst8_ptr;
- int width = w;
-
- do {
- uint8x16_t s0, s1, s2, s3;
- load_u8_16x4(s, src_stride, &s0, &s1, &s2, &s3);
-
- uint16x8_t d0 =
- convolve8_8_x(s0, x_filter, correction, range_limit, permute_tbl);
- uint16x8_t d1 =
- convolve8_8_x(s1, x_filter, correction, range_limit, permute_tbl);
- uint16x8_t d2 =
- convolve8_8_x(s2, x_filter, correction, range_limit, permute_tbl);
- uint16x8_t d3 =
- convolve8_8_x(s3, x_filter, correction, range_limit, permute_tbl);
-
- uint16x8_t dd0, dd1, dd2, dd3;
- load_u16_8x4(d, dst_stride, &dd0, &dd1, &dd2, &dd3);
-
- uint8x8_t d0_u8, d1_u8, d2_u8, d3_u8;
- compute_basic_avg_8x4(dd0, dd1, dd2, dd3, d0, d1, d2, d3,
- round_offset_vec, &d0_u8, &d1_u8, &d2_u8, &d3_u8);
-
- store_u8_8x4(d_u8, dst8_stride, d0_u8, d1_u8, d2_u8, d3_u8);
-
- s += 8;
- d += 8;
- d_u8 += 8;
- width -= 8;
- } while (width != 0);
- src_ptr += 4 * src_stride;
- dst_ptr += 4 * dst_stride;
- dst8_ptr += 4 * dst8_stride;
- height -= 4;
- } while (height != 0);
- }
-}
-
-static INLINE void dist_wtd_convolve_x_neon(
- const uint8_t *src, int src_stride, int w, int h,
- const InterpFilterParams *filter_params_x, const int subpel_x_qn,
- ConvolveParams *conv_params) {
- assert(w % 4 == 0);
- assert(h % 4 == 0);
-
- const int bd = 8;
- const int offset_bits = bd + 2 * FILTER_BITS - ROUND0_BITS;
- const int16_t round_offset = (1 << (offset_bits - COMPOUND_ROUND1_BITS)) +
- (1 << (offset_bits - COMPOUND_ROUND1_BITS - 1));
-
- // Horizontal filter.
- const int16_t *x_filter_ptr = av1_get_interp_filter_subpel_kernel(
- filter_params_x, subpel_x_qn & SUBPEL_MASK);
- const int16x8_t x_filter_s16 = vld1q_s16(x_filter_ptr);
-
- // Dot-product constants and other shims.
- const uint8x16_t range_limit = vdupq_n_u8(128);
- const int32_t correction_s32 =
- vaddlvq_s16(vshlq_n_s16(x_filter_s16, FILTER_BITS - 1));
- // Fold round_offset into the dot-product filter correction constant. The
- // additional shim of 1 << ((ROUND0_BITS - 1) - 1) enables us to use non-
- // rounding shifts - which are generally faster than rounding shifts on
- // modern CPUs. (The extra -1 is needed because we halved the filter values.)
- int32x4_t correction =
- vdupq_n_s32(correction_s32 + (round_offset << (ROUND0_BITS - 1)) +
- (1 << ((ROUND0_BITS - 1) - 1)));
-
- const int horiz_offset = filter_params_x->taps / 2 - 1;
- const uint8_t *src_ptr = src - horiz_offset;
- CONV_BUF_TYPE *dst_ptr = conv_params->dst;
- int dst_stride = conv_params->dst_stride;
- int height = h;
-
- if (w == 4) {
- const uint8x16_t permute_tbl = vld1q_u8(dot_prod_permute_tbl);
- // 4-tap filters are used for blocks having width <= 4.
- // Filter values are even, so halve to reduce intermediate precision reqs.
- const int8x8_t x_filter =
- vshrn_n_s16(vcombine_s16(vld1_s16(x_filter_ptr + 2), vdup_n_s16(0)), 1);
-
- src_ptr += 2;
-
- do {
- uint8x16_t s0, s1, s2, s3;
- load_u8_16x4(src_ptr, src_stride, &s0, &s1, &s2, &s3);
-
- uint16x4_t d0 =
- convolve4_4_x(s0, x_filter, correction, range_limit, permute_tbl);
- uint16x4_t d1 =
- convolve4_4_x(s1, x_filter, correction, range_limit, permute_tbl);
- uint16x4_t d2 =
- convolve4_4_x(s2, x_filter, correction, range_limit, permute_tbl);
- uint16x4_t d3 =
- convolve4_4_x(s3, x_filter, correction, range_limit, permute_tbl);
-
- store_u16_4x4(dst_ptr, dst_stride, d0, d1, d2, d3);
-
- src_ptr += 4 * src_stride;
- dst_ptr += 4 * dst_stride;
- height -= 4;
- } while (height != 0);
- } else {
- const uint8x16x3_t permute_tbl = vld1q_u8_x3(dot_prod_permute_tbl);
- // Filter values are even, so halve to reduce intermediate precision reqs.
- const int8x8_t x_filter = vshrn_n_s16(x_filter_s16, 1);
-
- do {
- const uint8_t *s = src_ptr;
- CONV_BUF_TYPE *d = dst_ptr;
- int width = w;
-
- do {
- uint8x16_t s0, s1, s2, s3;
- load_u8_16x4(s, src_stride, &s0, &s1, &s2, &s3);
-
- uint16x8_t d0 =
- convolve8_8_x(s0, x_filter, correction, range_limit, permute_tbl);
- uint16x8_t d1 =
- convolve8_8_x(s1, x_filter, correction, range_limit, permute_tbl);
- uint16x8_t d2 =
- convolve8_8_x(s2, x_filter, correction, range_limit, permute_tbl);
- uint16x8_t d3 =
- convolve8_8_x(s3, x_filter, correction, range_limit, permute_tbl);
-
- store_u16_8x4(d, dst_stride, d0, d1, d2, d3);
-
- s += 8;
- d += 8;
- width -= 8;
- } while (width != 0);
- src_ptr += 4 * src_stride;
- dst_ptr += 4 * dst_stride;
- height -= 4;
- } while (height != 0);
- }
-}
-
-#else // !(AOM_ARCH_AARCH64 && defined(__ARM_FEATURE_DOTPROD))
-
static INLINE uint16x4_t convolve4_4_x(const int16x4_t s0, const int16x4_t s1,
const int16x4_t s2, const int16x4_t s3,
const int16x4_t x_filter,
@@ -3931,8 +3033,6 @@
}
}
-#endif // AOM_ARCH_AARCH64 && defined(__ARM_FEATURE_DOTPROD)
-
void av1_dist_wtd_convolve_x_neon(const uint8_t *src, int src_stride,
uint8_t *dst8, int dst8_stride, int w, int h,
const InterpFilterParams *filter_params_x,
diff --git a/av1/common/arm/compound_convolve_neon.h b/av1/common/arm/compound_convolve_neon.h
new file mode 100644
index 0000000..4dbd827
--- /dev/null
+++ b/av1/common/arm/compound_convolve_neon.h
@@ -0,0 +1,135 @@
+/*
+ * Copyright (c) 2023, Alliance for Open Media. All rights reserved
+ *
+ * This source code is subject to the terms of the BSD 2 Clause License and
+ * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
+ * was not distributed with this source code in the LICENSE file, you can
+ * obtain it at www.aomedia.org/license/software. If the Alliance for Open
+ * Media Patent License 1.0 was not distributed with this source code in the
+ * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
+ */
+
+#include <arm_neon.h>
+
+#include "av1/common/convolve.h"
+#include "av1/common/enums.h"
+#include "av1/common/filter.h"
+
+static INLINE void compute_dist_wtd_avg_4x4(
+ uint16x4_t dd0, uint16x4_t dd1, uint16x4_t dd2, uint16x4_t dd3,
+ uint16x4_t d0, uint16x4_t d1, uint16x4_t d2, uint16x4_t d3,
+ const uint16_t fwd_offset, const uint16_t bck_offset,
+ const int16x8_t round_offset, uint8x8_t *d01_u8, uint8x8_t *d23_u8) {
+ uint32x4_t blend0 = vmull_n_u16(dd0, fwd_offset);
+ blend0 = vmlal_n_u16(blend0, d0, bck_offset);
+ uint32x4_t blend1 = vmull_n_u16(dd1, fwd_offset);
+ blend1 = vmlal_n_u16(blend1, d1, bck_offset);
+ uint32x4_t blend2 = vmull_n_u16(dd2, fwd_offset);
+ blend2 = vmlal_n_u16(blend2, d2, bck_offset);
+ uint32x4_t blend3 = vmull_n_u16(dd3, fwd_offset);
+ blend3 = vmlal_n_u16(blend3, d3, bck_offset);
+
+ uint16x4_t avg0 = vshrn_n_u32(blend0, DIST_PRECISION_BITS);
+ uint16x4_t avg1 = vshrn_n_u32(blend1, DIST_PRECISION_BITS);
+ uint16x4_t avg2 = vshrn_n_u32(blend2, DIST_PRECISION_BITS);
+ uint16x4_t avg3 = vshrn_n_u32(blend3, DIST_PRECISION_BITS);
+
+ int16x8_t dst_01 = vreinterpretq_s16_u16(vcombine_u16(avg0, avg1));
+ int16x8_t dst_23 = vreinterpretq_s16_u16(vcombine_u16(avg2, avg3));
+
+ dst_01 = vsubq_s16(dst_01, round_offset);
+ dst_23 = vsubq_s16(dst_23, round_offset);
+
+ *d01_u8 = vqrshrun_n_s16(dst_01, FILTER_BITS - ROUND0_BITS);
+ *d23_u8 = vqrshrun_n_s16(dst_23, FILTER_BITS - ROUND0_BITS);
+}
+
+static INLINE void compute_basic_avg_4x4(uint16x4_t dd0, uint16x4_t dd1,
+ uint16x4_t dd2, uint16x4_t dd3,
+ uint16x4_t d0, uint16x4_t d1,
+ uint16x4_t d2, uint16x4_t d3,
+ const int16x8_t round_offset,
+ uint8x8_t *d01_u8, uint8x8_t *d23_u8) {
+ uint16x4_t avg0 = vhadd_u16(dd0, d0);
+ uint16x4_t avg1 = vhadd_u16(dd1, d1);
+ uint16x4_t avg2 = vhadd_u16(dd2, d2);
+ uint16x4_t avg3 = vhadd_u16(dd3, d3);
+
+ int16x8_t dst_01 = vreinterpretq_s16_u16(vcombine_u16(avg0, avg1));
+ int16x8_t dst_23 = vreinterpretq_s16_u16(vcombine_u16(avg2, avg3));
+
+ dst_01 = vsubq_s16(dst_01, round_offset);
+ dst_23 = vsubq_s16(dst_23, round_offset);
+
+ *d01_u8 = vqrshrun_n_s16(dst_01, FILTER_BITS - ROUND0_BITS);
+ *d23_u8 = vqrshrun_n_s16(dst_23, FILTER_BITS - ROUND0_BITS);
+}
+
+static INLINE void compute_dist_wtd_avg_8x4(
+ uint16x8_t dd0, uint16x8_t dd1, uint16x8_t dd2, uint16x8_t dd3,
+ uint16x8_t d0, uint16x8_t d1, uint16x8_t d2, uint16x8_t d3,
+ const uint16_t fwd_offset, const uint16_t bck_offset,
+ const int16x8_t round_offset, uint8x8_t *d0_u8, uint8x8_t *d1_u8,
+ uint8x8_t *d2_u8, uint8x8_t *d3_u8) {
+ uint32x4_t blend0_lo = vmull_n_u16(vget_low_u16(dd0), fwd_offset);
+ blend0_lo = vmlal_n_u16(blend0_lo, vget_low_u16(d0), bck_offset);
+ uint32x4_t blend0_hi = vmull_n_u16(vget_high_u16(dd0), fwd_offset);
+ blend0_hi = vmlal_n_u16(blend0_hi, vget_high_u16(d0), bck_offset);
+
+ uint32x4_t blend1_lo = vmull_n_u16(vget_low_u16(dd1), fwd_offset);
+ blend1_lo = vmlal_n_u16(blend1_lo, vget_low_u16(d1), bck_offset);
+ uint32x4_t blend1_hi = vmull_n_u16(vget_high_u16(dd1), fwd_offset);
+ blend1_hi = vmlal_n_u16(blend1_hi, vget_high_u16(d1), bck_offset);
+
+ uint32x4_t blend2_lo = vmull_n_u16(vget_low_u16(dd2), fwd_offset);
+ blend2_lo = vmlal_n_u16(blend2_lo, vget_low_u16(d2), bck_offset);
+ uint32x4_t blend2_hi = vmull_n_u16(vget_high_u16(dd2), fwd_offset);
+ blend2_hi = vmlal_n_u16(blend2_hi, vget_high_u16(d2), bck_offset);
+
+ uint32x4_t blend3_lo = vmull_n_u16(vget_low_u16(dd3), fwd_offset);
+ blend3_lo = vmlal_n_u16(blend3_lo, vget_low_u16(d3), bck_offset);
+ uint32x4_t blend3_hi = vmull_n_u16(vget_high_u16(dd3), fwd_offset);
+ blend3_hi = vmlal_n_u16(blend3_hi, vget_high_u16(d3), bck_offset);
+
+ uint16x8_t avg0 = vcombine_u16(vshrn_n_u32(blend0_lo, DIST_PRECISION_BITS),
+ vshrn_n_u32(blend0_hi, DIST_PRECISION_BITS));
+ uint16x8_t avg1 = vcombine_u16(vshrn_n_u32(blend1_lo, DIST_PRECISION_BITS),
+ vshrn_n_u32(blend1_hi, DIST_PRECISION_BITS));
+ uint16x8_t avg2 = vcombine_u16(vshrn_n_u32(blend2_lo, DIST_PRECISION_BITS),
+ vshrn_n_u32(blend2_hi, DIST_PRECISION_BITS));
+ uint16x8_t avg3 = vcombine_u16(vshrn_n_u32(blend3_lo, DIST_PRECISION_BITS),
+ vshrn_n_u32(blend3_hi, DIST_PRECISION_BITS));
+
+ int16x8_t dst0 = vsubq_s16(vreinterpretq_s16_u16(avg0), round_offset);
+ int16x8_t dst1 = vsubq_s16(vreinterpretq_s16_u16(avg1), round_offset);
+ int16x8_t dst2 = vsubq_s16(vreinterpretq_s16_u16(avg2), round_offset);
+ int16x8_t dst3 = vsubq_s16(vreinterpretq_s16_u16(avg3), round_offset);
+
+ *d0_u8 = vqrshrun_n_s16(dst0, FILTER_BITS - ROUND0_BITS);
+ *d1_u8 = vqrshrun_n_s16(dst1, FILTER_BITS - ROUND0_BITS);
+ *d2_u8 = vqrshrun_n_s16(dst2, FILTER_BITS - ROUND0_BITS);
+ *d3_u8 = vqrshrun_n_s16(dst3, FILTER_BITS - ROUND0_BITS);
+}
+
+static INLINE void compute_basic_avg_8x4(uint16x8_t dd0, uint16x8_t dd1,
+ uint16x8_t dd2, uint16x8_t dd3,
+ uint16x8_t d0, uint16x8_t d1,
+ uint16x8_t d2, uint16x8_t d3,
+ const int16x8_t round_offset,
+ uint8x8_t *d0_u8, uint8x8_t *d1_u8,
+ uint8x8_t *d2_u8, uint8x8_t *d3_u8) {
+ uint16x8_t avg0 = vhaddq_u16(dd0, d0);
+ uint16x8_t avg1 = vhaddq_u16(dd1, d1);
+ uint16x8_t avg2 = vhaddq_u16(dd2, d2);
+ uint16x8_t avg3 = vhaddq_u16(dd3, d3);
+
+ int16x8_t dst0 = vsubq_s16(vreinterpretq_s16_u16(avg0), round_offset);
+ int16x8_t dst1 = vsubq_s16(vreinterpretq_s16_u16(avg1), round_offset);
+ int16x8_t dst2 = vsubq_s16(vreinterpretq_s16_u16(avg2), round_offset);
+ int16x8_t dst3 = vsubq_s16(vreinterpretq_s16_u16(avg3), round_offset);
+
+ *d0_u8 = vqrshrun_n_s16(dst0, FILTER_BITS - ROUND0_BITS);
+ *d1_u8 = vqrshrun_n_s16(dst1, FILTER_BITS - ROUND0_BITS);
+ *d2_u8 = vqrshrun_n_s16(dst2, FILTER_BITS - ROUND0_BITS);
+ *d3_u8 = vqrshrun_n_s16(dst3, FILTER_BITS - ROUND0_BITS);
+}
diff --git a/av1/common/arm/compound_convolve_neon_dotprod.c b/av1/common/arm/compound_convolve_neon_dotprod.c
new file mode 100644
index 0000000..f9e10a0
--- /dev/null
+++ b/av1/common/arm/compound_convolve_neon_dotprod.c
@@ -0,0 +1,440 @@
+/*
+ * Copyright (c) 2023, Alliance for Open Media. All rights reserved
+ *
+ * This source code is subject to the terms of the BSD 2 Clause License and
+ * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
+ * was not distributed with this source code in the LICENSE file, you can
+ * obtain it at www.aomedia.org/license/software. If the Alliance for Open
+ * Media Patent License 1.0 was not distributed with this source code in the
+ * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
+ */
+
+#include <arm_neon.h>
+#include <assert.h>
+
+#include "aom_dsp/arm/mem_neon.h"
+#include "av1/common/arm/compound_convolve_neon.h"
+#include "av1/common/arm/convolve_neon.h"
+#include "config/aom_config.h"
+#include "config/av1_rtcd.h"
+
+static INLINE uint16x4_t convolve4_4_x(uint8x16_t samples,
+ const int8x8_t x_filter,
+ const int32x4_t correction,
+ const uint8x16_t range_limit,
+ const uint8x16_t permute_tbl) {
+ // Clamp sample range to [-128, 127] for 8-bit signed dot product.
+ int8x16_t clamped_samples =
+ vreinterpretq_s8_u8(vsubq_u8(samples, range_limit));
+
+ // Permute samples ready for dot product.
+ // { 0, 1, 2, 3, 1, 2, 3, 4, 2, 3, 4, 5, 3, 4, 5, 6 }
+ int8x16_t permuted_samples = vqtbl1q_s8(clamped_samples, permute_tbl);
+
+ // Accumulate dot product into 'correction' to account for range clamp.
+ int32x4_t sum = vdotq_lane_s32(correction, permuted_samples, x_filter, 0);
+
+ // We halved the convolution filter values so -1 from the right shift.
+ return vreinterpret_u16_s16(vshrn_n_s32(sum, ROUND0_BITS - 1));
+}
+
+static INLINE uint16x8_t convolve8_8_x(uint8x16_t samples,
+ const int8x8_t x_filter,
+ const int32x4_t correction,
+ const uint8x16_t range_limit,
+ const uint8x16x3_t permute_tbl) {
+ int8x16_t clamped_samples, permuted_samples[3];
+ int32x4_t sum[2];
+
+ // Clamp sample range to [-128, 127] for 8-bit signed dot product.
+ clamped_samples = vreinterpretq_s8_u8(vsubq_u8(samples, range_limit));
+
+ // Permute samples ready for dot product. */
+ // { 0, 1, 2, 3, 1, 2, 3, 4, 2, 3, 4, 5, 3, 4, 5, 6 }
+ permuted_samples[0] = vqtbl1q_s8(clamped_samples, permute_tbl.val[0]);
+ // { 4, 5, 6, 7, 5, 6, 7, 8, 6, 7, 8, 9, 7, 8, 9, 10 }
+ permuted_samples[1] = vqtbl1q_s8(clamped_samples, permute_tbl.val[1]);
+ // { 8, 9, 10, 11, 9, 10, 11, 12, 10, 11, 12, 13, 11, 12, 13, 14 }
+ permuted_samples[2] = vqtbl1q_s8(clamped_samples, permute_tbl.val[2]);
+
+ // Accumulate dot product into 'correction' to account for range clamp.
+ // First 4 output values.
+ sum[0] = vdotq_lane_s32(correction, permuted_samples[0], x_filter, 0);
+ sum[0] = vdotq_lane_s32(sum[0], permuted_samples[1], x_filter, 1);
+ // Second 4 output values.
+ sum[1] = vdotq_lane_s32(correction, permuted_samples[1], x_filter, 0);
+ sum[1] = vdotq_lane_s32(sum[1], permuted_samples[2], x_filter, 1);
+
+ // Narrow and re-pack.
+ // We halved the convolution filter values so -1 from the right shift.
+ int16x8_t res = vcombine_s16(vshrn_n_s32(sum[0], ROUND0_BITS - 1),
+ vshrn_n_s32(sum[1], ROUND0_BITS - 1));
+ return vreinterpretq_u16_s16(res);
+}
+
+static INLINE void dist_wtd_convolve_x_dist_wtd_avg_neon_dotprod(
+ const uint8_t *src, int src_stride, uint8_t *dst8, int dst8_stride, int w,
+ int h, const InterpFilterParams *filter_params_x, const int subpel_x_qn,
+ ConvolveParams *conv_params) {
+ assert(w % 4 == 0);
+ assert(h % 4 == 0);
+
+ const int bd = 8;
+ const int offset_bits = bd + 2 * FILTER_BITS - ROUND0_BITS;
+ const int16_t round_offset = (1 << (offset_bits - COMPOUND_ROUND1_BITS)) +
+ (1 << (offset_bits - COMPOUND_ROUND1_BITS - 1));
+ const int16x8_t round_offset_vec = vdupq_n_s16(round_offset);
+
+ const uint16_t fwd_offset = conv_params->fwd_offset;
+ const uint16_t bck_offset = conv_params->bck_offset;
+
+ // Horizontal filter.
+ const int16_t *x_filter_ptr = av1_get_interp_filter_subpel_kernel(
+ filter_params_x, subpel_x_qn & SUBPEL_MASK);
+ const int16x8_t x_filter_s16 = vld1q_s16(x_filter_ptr);
+
+ // Dot-product constants and other shims.
+ const uint8x16_t range_limit = vdupq_n_u8(128);
+ const int32_t correction_s32 =
+ vaddlvq_s16(vshlq_n_s16(x_filter_s16, FILTER_BITS - 1));
+ // Fold round_offset into the dot-product filter correction constant. The
+ // additional shim of 1 << ((ROUND0_BITS - 1) - 1) enables us to use non-
+ // rounding shifts - which are generally faster than rounding shifts on
+ // modern CPUs. (The extra -1 is needed because we halved the filter values.)
+ int32x4_t correction =
+ vdupq_n_s32(correction_s32 + (round_offset << (ROUND0_BITS - 1)) +
+ (1 << ((ROUND0_BITS - 1) - 1)));
+
+ const int horiz_offset = filter_params_x->taps / 2 - 1;
+ const uint8_t *src_ptr = src - horiz_offset;
+ CONV_BUF_TYPE *dst_ptr = conv_params->dst;
+ uint8_t *dst8_ptr = dst8;
+ int dst_stride = conv_params->dst_stride;
+ int height = h;
+
+ if (w == 4) {
+ const uint8x16_t permute_tbl = vld1q_u8(dot_prod_permute_tbl);
+ // 4-tap filters are used for blocks having width <= 4.
+ // Filter values are even, so halve to reduce intermediate precision reqs.
+ const int8x8_t x_filter =
+ vshrn_n_s16(vcombine_s16(vld1_s16(x_filter_ptr + 2), vdup_n_s16(0)), 1);
+
+ src_ptr += 2;
+
+ do {
+ uint8x16_t s0, s1, s2, s3;
+ load_u8_16x4(src_ptr, src_stride, &s0, &s1, &s2, &s3);
+
+ uint16x4_t d0 =
+ convolve4_4_x(s0, x_filter, correction, range_limit, permute_tbl);
+ uint16x4_t d1 =
+ convolve4_4_x(s1, x_filter, correction, range_limit, permute_tbl);
+ uint16x4_t d2 =
+ convolve4_4_x(s2, x_filter, correction, range_limit, permute_tbl);
+ uint16x4_t d3 =
+ convolve4_4_x(s3, x_filter, correction, range_limit, permute_tbl);
+
+ uint16x4_t dd0, dd1, dd2, dd3;
+ load_u16_4x4(dst_ptr, dst_stride, &dd0, &dd1, &dd2, &dd3);
+
+ uint8x8_t d01_u8, d23_u8;
+ compute_dist_wtd_avg_4x4(dd0, dd1, dd2, dd3, d0, d1, d2, d3, fwd_offset,
+ bck_offset, round_offset_vec, &d01_u8, &d23_u8);
+
+ store_u8_4x1(dst8_ptr + 0 * dst8_stride, d01_u8, 0);
+ store_u8_4x1(dst8_ptr + 1 * dst8_stride, d01_u8, 1);
+ store_u8_4x1(dst8_ptr + 2 * dst8_stride, d23_u8, 0);
+ store_u8_4x1(dst8_ptr + 3 * dst8_stride, d23_u8, 1);
+
+ src_ptr += 4 * src_stride;
+ dst_ptr += 4 * dst_stride;
+ dst8_ptr += 4 * dst8_stride;
+ height -= 4;
+ } while (height != 0);
+ } else {
+ const uint8x16x3_t permute_tbl = vld1q_u8_x3(dot_prod_permute_tbl);
+ // Filter values are even, so halve to reduce intermediate precision reqs.
+ const int8x8_t x_filter = vshrn_n_s16(x_filter_s16, 1);
+
+ do {
+ const uint8_t *s = src_ptr;
+ CONV_BUF_TYPE *d = dst_ptr;
+ uint8_t *d_u8 = dst8_ptr;
+ int width = w;
+
+ do {
+ uint8x16_t s0, s1, s2, s3;
+ load_u8_16x4(s, src_stride, &s0, &s1, &s2, &s3);
+
+ uint16x8_t d0 =
+ convolve8_8_x(s0, x_filter, correction, range_limit, permute_tbl);
+ uint16x8_t d1 =
+ convolve8_8_x(s1, x_filter, correction, range_limit, permute_tbl);
+ uint16x8_t d2 =
+ convolve8_8_x(s2, x_filter, correction, range_limit, permute_tbl);
+ uint16x8_t d3 =
+ convolve8_8_x(s3, x_filter, correction, range_limit, permute_tbl);
+
+ uint16x8_t dd0, dd1, dd2, dd3;
+ load_u16_8x4(d, dst_stride, &dd0, &dd1, &dd2, &dd3);
+
+ uint8x8_t d0_u8, d1_u8, d2_u8, d3_u8;
+ compute_dist_wtd_avg_8x4(dd0, dd1, dd2, dd3, d0, d1, d2, d3, fwd_offset,
+ bck_offset, round_offset_vec, &d0_u8, &d1_u8,
+ &d2_u8, &d3_u8);
+
+ store_u8_8x4(d_u8, dst8_stride, d0_u8, d1_u8, d2_u8, d3_u8);
+
+ s += 8;
+ d += 8;
+ d_u8 += 8;
+ width -= 8;
+ } while (width != 0);
+ src_ptr += 4 * src_stride;
+ dst_ptr += 4 * dst_stride;
+ dst8_ptr += 4 * dst8_stride;
+ height -= 4;
+ } while (height != 0);
+ }
+}
+
+static INLINE void dist_wtd_convolve_x_avg_neon_dotprod(
+ const uint8_t *src, int src_stride, uint8_t *dst8, int dst8_stride, int w,
+ int h, const InterpFilterParams *filter_params_x, const int subpel_x_qn,
+ ConvolveParams *conv_params) {
+ assert(w % 4 == 0);
+ assert(h % 4 == 0);
+
+ const int bd = 8;
+ const int offset_bits = bd + 2 * FILTER_BITS - ROUND0_BITS;
+ const int16_t round_offset = (1 << (offset_bits - COMPOUND_ROUND1_BITS)) +
+ (1 << (offset_bits - COMPOUND_ROUND1_BITS - 1));
+ const int16x8_t round_offset_vec = vdupq_n_s16(round_offset);
+
+ // Horizontal filter.
+ const int16_t *x_filter_ptr = av1_get_interp_filter_subpel_kernel(
+ filter_params_x, subpel_x_qn & SUBPEL_MASK);
+ const int16x8_t x_filter_s16 = vld1q_s16(x_filter_ptr);
+
+ // Dot-product constants and other shims.
+ const uint8x16_t range_limit = vdupq_n_u8(128);
+ const int32_t correction_s32 =
+ vaddlvq_s16(vshlq_n_s16(x_filter_s16, FILTER_BITS - 1));
+ // Fold round_offset into the dot-product filter correction constant. The
+ // additional shim of 1 << ((ROUND0_BITS - 1) - 1) enables us to use non-
+ // rounding shifts - which are generally faster than rounding shifts on
+ // modern CPUs. (The extra -1 is needed because we halved the filter values.)
+ int32x4_t correction =
+ vdupq_n_s32(correction_s32 + (round_offset << (ROUND0_BITS - 1)) +
+ (1 << ((ROUND0_BITS - 1) - 1)));
+
+ const int horiz_offset = filter_params_x->taps / 2 - 1;
+ const uint8_t *src_ptr = src - horiz_offset;
+ CONV_BUF_TYPE *dst_ptr = conv_params->dst;
+ uint8_t *dst8_ptr = dst8;
+ int dst_stride = conv_params->dst_stride;
+ int height = h;
+
+ if (w == 4) {
+ const uint8x16_t permute_tbl = vld1q_u8(dot_prod_permute_tbl);
+ // 4-tap filters are used for blocks having width <= 4.
+ // Filter values are even, so halve to reduce intermediate precision reqs.
+ const int8x8_t x_filter =
+ vshrn_n_s16(vcombine_s16(vld1_s16(x_filter_ptr + 2), vdup_n_s16(0)), 1);
+
+ src_ptr += 2;
+
+ do {
+ uint8x16_t s0, s1, s2, s3;
+ load_u8_16x4(src_ptr, src_stride, &s0, &s1, &s2, &s3);
+
+ uint16x4_t d0 =
+ convolve4_4_x(s0, x_filter, correction, range_limit, permute_tbl);
+ uint16x4_t d1 =
+ convolve4_4_x(s1, x_filter, correction, range_limit, permute_tbl);
+ uint16x4_t d2 =
+ convolve4_4_x(s2, x_filter, correction, range_limit, permute_tbl);
+ uint16x4_t d3 =
+ convolve4_4_x(s3, x_filter, correction, range_limit, permute_tbl);
+
+ uint16x4_t dd0, dd1, dd2, dd3;
+ load_u16_4x4(dst_ptr, dst_stride, &dd0, &dd1, &dd2, &dd3);
+
+ uint8x8_t d01_u8, d23_u8;
+ compute_basic_avg_4x4(dd0, dd1, dd2, dd3, d0, d1, d2, d3,
+ round_offset_vec, &d01_u8, &d23_u8);
+
+ store_u8_4x1(dst8_ptr + 0 * dst8_stride, d01_u8, 0);
+ store_u8_4x1(dst8_ptr + 1 * dst8_stride, d01_u8, 1);
+ store_u8_4x1(dst8_ptr + 2 * dst8_stride, d23_u8, 0);
+ store_u8_4x1(dst8_ptr + 3 * dst8_stride, d23_u8, 1);
+
+ src_ptr += 4 * src_stride;
+ dst_ptr += 4 * dst_stride;
+ dst8_ptr += 4 * dst8_stride;
+ height -= 4;
+ } while (height != 0);
+ } else {
+ const uint8x16x3_t permute_tbl = vld1q_u8_x3(dot_prod_permute_tbl);
+ // Filter values are even, so halve to reduce intermediate precision reqs.
+ const int8x8_t x_filter = vshrn_n_s16(x_filter_s16, 1);
+
+ do {
+ const uint8_t *s = src_ptr;
+ CONV_BUF_TYPE *d = dst_ptr;
+ uint8_t *d_u8 = dst8_ptr;
+ int width = w;
+
+ do {
+ uint8x16_t s0, s1, s2, s3;
+ load_u8_16x4(s, src_stride, &s0, &s1, &s2, &s3);
+
+ uint16x8_t d0 =
+ convolve8_8_x(s0, x_filter, correction, range_limit, permute_tbl);
+ uint16x8_t d1 =
+ convolve8_8_x(s1, x_filter, correction, range_limit, permute_tbl);
+ uint16x8_t d2 =
+ convolve8_8_x(s2, x_filter, correction, range_limit, permute_tbl);
+ uint16x8_t d3 =
+ convolve8_8_x(s3, x_filter, correction, range_limit, permute_tbl);
+
+ uint16x8_t dd0, dd1, dd2, dd3;
+ load_u16_8x4(d, dst_stride, &dd0, &dd1, &dd2, &dd3);
+
+ uint8x8_t d0_u8, d1_u8, d2_u8, d3_u8;
+ compute_basic_avg_8x4(dd0, dd1, dd2, dd3, d0, d1, d2, d3,
+ round_offset_vec, &d0_u8, &d1_u8, &d2_u8, &d3_u8);
+
+ store_u8_8x4(d_u8, dst8_stride, d0_u8, d1_u8, d2_u8, d3_u8);
+
+ s += 8;
+ d += 8;
+ d_u8 += 8;
+ width -= 8;
+ } while (width != 0);
+ src_ptr += 4 * src_stride;
+ dst_ptr += 4 * dst_stride;
+ dst8_ptr += 4 * dst8_stride;
+ height -= 4;
+ } while (height != 0);
+ }
+}
+
+static INLINE void dist_wtd_convolve_x_neon_dotprod(
+ const uint8_t *src, int src_stride, int w, int h,
+ const InterpFilterParams *filter_params_x, const int subpel_x_qn,
+ ConvolveParams *conv_params) {
+ assert(w % 4 == 0);
+ assert(h % 4 == 0);
+
+ const int bd = 8;
+ const int offset_bits = bd + 2 * FILTER_BITS - ROUND0_BITS;
+ const int16_t round_offset = (1 << (offset_bits - COMPOUND_ROUND1_BITS)) +
+ (1 << (offset_bits - COMPOUND_ROUND1_BITS - 1));
+
+ // Horizontal filter.
+ const int16_t *x_filter_ptr = av1_get_interp_filter_subpel_kernel(
+ filter_params_x, subpel_x_qn & SUBPEL_MASK);
+ const int16x8_t x_filter_s16 = vld1q_s16(x_filter_ptr);
+
+ // Dot-product constants and other shims.
+ const uint8x16_t range_limit = vdupq_n_u8(128);
+ const int32_t correction_s32 =
+ vaddlvq_s16(vshlq_n_s16(x_filter_s16, FILTER_BITS - 1));
+ // Fold round_offset into the dot-product filter correction constant. The
+ // additional shim of 1 << ((ROUND0_BITS - 1) - 1) enables us to use non-
+ // rounding shifts - which are generally faster than rounding shifts on
+ // modern CPUs. (The extra -1 is needed because we halved the filter values.)
+ int32x4_t correction =
+ vdupq_n_s32(correction_s32 + (round_offset << (ROUND0_BITS - 1)) +
+ (1 << ((ROUND0_BITS - 1) - 1)));
+
+ const int horiz_offset = filter_params_x->taps / 2 - 1;
+ const uint8_t *src_ptr = src - horiz_offset;
+ CONV_BUF_TYPE *dst_ptr = conv_params->dst;
+ int dst_stride = conv_params->dst_stride;
+ int height = h;
+
+ if (w == 4) {
+ const uint8x16_t permute_tbl = vld1q_u8(dot_prod_permute_tbl);
+ // 4-tap filters are used for blocks having width <= 4.
+ // Filter values are even, so halve to reduce intermediate precision reqs.
+ const int8x8_t x_filter =
+ vshrn_n_s16(vcombine_s16(vld1_s16(x_filter_ptr + 2), vdup_n_s16(0)), 1);
+
+ src_ptr += 2;
+
+ do {
+ uint8x16_t s0, s1, s2, s3;
+ load_u8_16x4(src_ptr, src_stride, &s0, &s1, &s2, &s3);
+
+ uint16x4_t d0 =
+ convolve4_4_x(s0, x_filter, correction, range_limit, permute_tbl);
+ uint16x4_t d1 =
+ convolve4_4_x(s1, x_filter, correction, range_limit, permute_tbl);
+ uint16x4_t d2 =
+ convolve4_4_x(s2, x_filter, correction, range_limit, permute_tbl);
+ uint16x4_t d3 =
+ convolve4_4_x(s3, x_filter, correction, range_limit, permute_tbl);
+
+ store_u16_4x4(dst_ptr, dst_stride, d0, d1, d2, d3);
+
+ src_ptr += 4 * src_stride;
+ dst_ptr += 4 * dst_stride;
+ height -= 4;
+ } while (height != 0);
+ } else {
+ const uint8x16x3_t permute_tbl = vld1q_u8_x3(dot_prod_permute_tbl);
+ // Filter values are even, so halve to reduce intermediate precision reqs.
+ const int8x8_t x_filter = vshrn_n_s16(x_filter_s16, 1);
+
+ do {
+ const uint8_t *s = src_ptr;
+ CONV_BUF_TYPE *d = dst_ptr;
+ int width = w;
+
+ do {
+ uint8x16_t s0, s1, s2, s3;
+ load_u8_16x4(s, src_stride, &s0, &s1, &s2, &s3);
+
+ uint16x8_t d0 =
+ convolve8_8_x(s0, x_filter, correction, range_limit, permute_tbl);
+ uint16x8_t d1 =
+ convolve8_8_x(s1, x_filter, correction, range_limit, permute_tbl);
+ uint16x8_t d2 =
+ convolve8_8_x(s2, x_filter, correction, range_limit, permute_tbl);
+ uint16x8_t d3 =
+ convolve8_8_x(s3, x_filter, correction, range_limit, permute_tbl);
+
+ store_u16_8x4(d, dst_stride, d0, d1, d2, d3);
+
+ s += 8;
+ d += 8;
+ width -= 8;
+ } while (width != 0);
+ src_ptr += 4 * src_stride;
+ dst_ptr += 4 * dst_stride;
+ height -= 4;
+ } while (height != 0);
+ }
+}
+
+void av1_dist_wtd_convolve_x_neon_dotprod(
+ const uint8_t *src, int src_stride, uint8_t *dst8, int dst8_stride, int w,
+ int h, const InterpFilterParams *filter_params_x, const int subpel_x_qn,
+ ConvolveParams *conv_params) {
+ if (conv_params->do_average) {
+ if (UNLIKELY(conv_params->use_dist_wtd_comp_avg)) {
+ dist_wtd_convolve_x_dist_wtd_avg_neon_dotprod(
+ src, src_stride, dst8, dst8_stride, w, h, filter_params_x,
+ subpel_x_qn, conv_params);
+ } else {
+ dist_wtd_convolve_x_avg_neon_dotprod(src, src_stride, dst8, dst8_stride,
+ w, h, filter_params_x, subpel_x_qn,
+ conv_params);
+ }
+ } else {
+ dist_wtd_convolve_x_neon_dotprod(src, src_stride, w, h, filter_params_x,
+ subpel_x_qn, conv_params);
+ }
+}
diff --git a/av1/common/arm/compound_convolve_neon_i8mm.c b/av1/common/arm/compound_convolve_neon_i8mm.c
new file mode 100644
index 0000000..fad5094
--- /dev/null
+++ b/av1/common/arm/compound_convolve_neon_i8mm.c
@@ -0,0 +1,406 @@
+/*
+ * Copyright (c) 2023, Alliance for Open Media. All rights reserved
+ *
+ * This source code is subject to the terms of the BSD 2 Clause License and
+ * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
+ * was not distributed with this source code in the LICENSE file, you can
+ * obtain it at www.aomedia.org/license/software. If the Alliance for Open
+ * Media Patent License 1.0 was not distributed with this source code in the
+ * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
+ */
+
+#include <arm_neon.h>
+#include <assert.h>
+
+#include "aom_dsp/arm/mem_neon.h"
+#include "av1/common/arm/compound_convolve_neon.h"
+#include "av1/common/arm/convolve_neon.h"
+#include "config/aom_config.h"
+#include "config/av1_rtcd.h"
+
+static INLINE uint16x4_t convolve4_4_x(uint8x16_t samples,
+ const int8x8_t x_filter,
+ const uint8x16_t permute_tbl,
+ const int32x4_t round_offset) {
+ // Permute samples ready for dot product.
+ // { 0, 1, 2, 3, 1, 2, 3, 4, 2, 3, 4, 5, 3, 4, 5, 6 }
+ uint8x16_t permuted_samples = vqtbl1q_u8(samples, permute_tbl);
+
+ // First 4 output values.
+ int32x4_t sum = vusdotq_lane_s32(round_offset, permuted_samples, x_filter, 0);
+
+ // We halved the convolution filter values so -1 from the right shift.
+ return vreinterpret_u16_s16(vshrn_n_s32(sum, ROUND0_BITS - 1));
+}
+
+static INLINE uint16x8_t convolve8_8_x(uint8x16_t samples,
+ const int8x8_t x_filter,
+ const uint8x16x3_t permute_tbl,
+ const int32x4_t round_offset) {
+ uint8x16_t permuted_samples[3];
+ int32x4_t sum[2];
+
+ // Permute samples ready for dot product.
+ // { 0, 1, 2, 3, 1, 2, 3, 4, 2, 3, 4, 5, 3, 4, 5, 6 }
+ permuted_samples[0] = vqtbl1q_u8(samples, permute_tbl.val[0]);
+ // { 4, 5, 6, 7, 5, 6, 7, 8, 6, 7, 8, 9, 7, 8, 9, 10 }
+ permuted_samples[1] = vqtbl1q_u8(samples, permute_tbl.val[1]);
+ // { 8, 9, 10, 11, 9, 10, 11, 12, 10, 11, 12, 13, 11, 12, 13, 14 }
+ permuted_samples[2] = vqtbl1q_u8(samples, permute_tbl.val[2]);
+
+ // First 4 output values.
+ sum[0] = vusdotq_lane_s32(round_offset, permuted_samples[0], x_filter, 0);
+ sum[0] = vusdotq_lane_s32(sum[0], permuted_samples[1], x_filter, 1);
+ // Second 4 output values.
+ sum[1] = vusdotq_lane_s32(round_offset, permuted_samples[1], x_filter, 0);
+ sum[1] = vusdotq_lane_s32(sum[1], permuted_samples[2], x_filter, 1);
+
+ // Narrow and re-pack.
+ // We halved the convolution filter values so -1 from the right shift.
+ int16x8_t res = vcombine_s16(vshrn_n_s32(sum[0], ROUND0_BITS - 1),
+ vshrn_n_s32(sum[1], ROUND0_BITS - 1));
+ return vreinterpretq_u16_s16(res);
+}
+
+static INLINE void dist_wtd_convolve_x_dist_wtd_avg_neon_i8mm(
+ const uint8_t *src, int src_stride, uint8_t *dst8, int dst8_stride, int w,
+ int h, const InterpFilterParams *filter_params_x, const int subpel_x_qn,
+ ConvolveParams *conv_params) {
+ assert(w % 4 == 0);
+ assert(h % 4 == 0);
+
+ const int bd = 8;
+ const int offset_bits = bd + 2 * FILTER_BITS - ROUND0_BITS;
+ const int16_t round_offset = (1 << (offset_bits - COMPOUND_ROUND1_BITS)) +
+ (1 << (offset_bits - COMPOUND_ROUND1_BITS - 1));
+ const int16x8_t round_offset_vec = vdupq_n_s16(round_offset);
+ // A shim of 1 << ((ROUND0_BITS - 1) - 1) enables us to use non-rounding
+ // shifts - which are generally faster than rounding shifts on modern CPUs.
+ // (The extra -1 is needed because we halved the filter values.)
+ const int32x4_t round_offset_shim = vdupq_n_s32(
+ (round_offset << (ROUND0_BITS - 1)) + (1 << ((ROUND0_BITS - 1) - 1)));
+
+ const uint16_t fwd_offset = conv_params->fwd_offset;
+ const uint16_t bck_offset = conv_params->bck_offset;
+
+ // Horizontal filter.
+ const int16_t *x_filter_ptr = av1_get_interp_filter_subpel_kernel(
+ filter_params_x, subpel_x_qn & SUBPEL_MASK);
+
+ const int horiz_offset = filter_params_x->taps / 2 - 1;
+ const uint8_t *src_ptr = src - horiz_offset;
+ CONV_BUF_TYPE *dst_ptr = conv_params->dst;
+ uint8_t *dst8_ptr = dst8;
+ int dst_stride = conv_params->dst_stride;
+ int height = h;
+
+ if (w == 4) {
+ const uint8x16_t permute_tbl = vld1q_u8(dot_prod_permute_tbl);
+ // 4-tap filters are used for blocks having width <= 4.
+ // Filter values are even, so halve to reduce intermediate precision reqs.
+ const int8x8_t x_filter =
+ vshrn_n_s16(vcombine_s16(vld1_s16(x_filter_ptr + 2), vdup_n_s16(0)), 1);
+
+ src_ptr += 2;
+
+ do {
+ uint8x16_t s0, s1, s2, s3;
+ load_u8_16x4(src_ptr, src_stride, &s0, &s1, &s2, &s3);
+
+ uint16x4_t d0 =
+ convolve4_4_x(s0, x_filter, permute_tbl, round_offset_shim);
+ uint16x4_t d1 =
+ convolve4_4_x(s1, x_filter, permute_tbl, round_offset_shim);
+ uint16x4_t d2 =
+ convolve4_4_x(s2, x_filter, permute_tbl, round_offset_shim);
+ uint16x4_t d3 =
+ convolve4_4_x(s3, x_filter, permute_tbl, round_offset_shim);
+
+ uint16x4_t dd0, dd1, dd2, dd3;
+ load_u16_4x4(dst_ptr, dst_stride, &dd0, &dd1, &dd2, &dd3);
+
+ uint8x8_t d01_u8, d23_u8;
+ compute_dist_wtd_avg_4x4(dd0, dd1, dd2, dd3, d0, d1, d2, d3, fwd_offset,
+ bck_offset, round_offset_vec, &d01_u8, &d23_u8);
+
+ store_u8_4x1(dst8_ptr + 0 * dst8_stride, d01_u8, 0);
+ store_u8_4x1(dst8_ptr + 1 * dst8_stride, d01_u8, 1);
+ store_u8_4x1(dst8_ptr + 2 * dst8_stride, d23_u8, 0);
+ store_u8_4x1(dst8_ptr + 3 * dst8_stride, d23_u8, 1);
+
+ src_ptr += 4 * src_stride;
+ dst_ptr += 4 * dst_stride;
+ dst8_ptr += 4 * dst8_stride;
+ height -= 4;
+ } while (height != 0);
+ } else {
+ const uint8x16x3_t permute_tbl = vld1q_u8_x3(dot_prod_permute_tbl);
+ // Filter values are even, so halve to reduce intermediate precision reqs.
+ const int8x8_t x_filter = vshrn_n_s16(vld1q_s16(x_filter_ptr), 1);
+
+ do {
+ const uint8_t *s = src_ptr;
+ CONV_BUF_TYPE *d = dst_ptr;
+ uint8_t *d_u8 = dst8_ptr;
+ int width = w;
+
+ do {
+ uint8x16_t s0, s1, s2, s3;
+ load_u8_16x4(s, src_stride, &s0, &s1, &s2, &s3);
+
+ uint16x8_t d0 =
+ convolve8_8_x(s0, x_filter, permute_tbl, round_offset_shim);
+ uint16x8_t d1 =
+ convolve8_8_x(s1, x_filter, permute_tbl, round_offset_shim);
+ uint16x8_t d2 =
+ convolve8_8_x(s2, x_filter, permute_tbl, round_offset_shim);
+ uint16x8_t d3 =
+ convolve8_8_x(s3, x_filter, permute_tbl, round_offset_shim);
+
+ uint16x8_t dd0, dd1, dd2, dd3;
+ load_u16_8x4(d, dst_stride, &dd0, &dd1, &dd2, &dd3);
+
+ uint8x8_t d0_u8, d1_u8, d2_u8, d3_u8;
+ compute_dist_wtd_avg_8x4(dd0, dd1, dd2, dd3, d0, d1, d2, d3, fwd_offset,
+ bck_offset, round_offset_vec, &d0_u8, &d1_u8,
+ &d2_u8, &d3_u8);
+
+ store_u8_8x4(d_u8, dst8_stride, d0_u8, d1_u8, d2_u8, d3_u8);
+
+ s += 8;
+ d += 8;
+ d_u8 += 8;
+ width -= 8;
+ } while (width != 0);
+ src_ptr += 4 * src_stride;
+ dst_ptr += 4 * dst_stride;
+ dst8_ptr += 4 * dst8_stride;
+ height -= 4;
+ } while (height != 0);
+ }
+}
+
+static INLINE void dist_wtd_convolve_x_avg_neon_i8mm(
+ const uint8_t *src, int src_stride, uint8_t *dst8, int dst8_stride, int w,
+ int h, const InterpFilterParams *filter_params_x, const int subpel_x_qn,
+ ConvolveParams *conv_params) {
+ assert(w % 4 == 0);
+ assert(h % 4 == 0);
+
+ const int bd = 8;
+ const int offset_bits = bd + 2 * FILTER_BITS - ROUND0_BITS;
+ const int16_t round_offset = (1 << (offset_bits - COMPOUND_ROUND1_BITS)) +
+ (1 << (offset_bits - COMPOUND_ROUND1_BITS - 1));
+ const int16x8_t round_offset_vec = vdupq_n_s16(round_offset);
+ // A shim of 1 << ((ROUND0_BITS - 1) - 1) enables us to use non-rounding
+ // shifts - which are generally faster than rounding shifts on modern CPUs.
+ // (The extra -1 is needed because we halved the filter values.)
+ const int32x4_t round_offset_shim = vdupq_n_s32(
+ (round_offset << (ROUND0_BITS - 1)) + (1 << ((ROUND0_BITS - 1) - 1)));
+
+ // Horizontal filter.
+ const int16_t *x_filter_ptr = av1_get_interp_filter_subpel_kernel(
+ filter_params_x, subpel_x_qn & SUBPEL_MASK);
+
+ const int horiz_offset = filter_params_x->taps / 2 - 1;
+ const uint8_t *src_ptr = src - horiz_offset;
+ CONV_BUF_TYPE *dst_ptr = conv_params->dst;
+ uint8_t *dst8_ptr = dst8;
+ int dst_stride = conv_params->dst_stride;
+ int height = h;
+
+ if (w == 4) {
+ const uint8x16_t permute_tbl = vld1q_u8(dot_prod_permute_tbl);
+ // 4-tap filters are used for blocks having width <= 4.
+ // Filter values are even, so halve to reduce intermediate precision reqs.
+ const int8x8_t x_filter =
+ vshrn_n_s16(vcombine_s16(vld1_s16(x_filter_ptr + 2), vdup_n_s16(0)), 1);
+
+ src_ptr += 2;
+
+ do {
+ uint8x16_t s0, s1, s2, s3;
+ load_u8_16x4(src_ptr, src_stride, &s0, &s1, &s2, &s3);
+
+ uint16x4_t d0 =
+ convolve4_4_x(s0, x_filter, permute_tbl, round_offset_shim);
+ uint16x4_t d1 =
+ convolve4_4_x(s1, x_filter, permute_tbl, round_offset_shim);
+ uint16x4_t d2 =
+ convolve4_4_x(s2, x_filter, permute_tbl, round_offset_shim);
+ uint16x4_t d3 =
+ convolve4_4_x(s3, x_filter, permute_tbl, round_offset_shim);
+
+ uint16x4_t dd0, dd1, dd2, dd3;
+ load_u16_4x4(dst_ptr, dst_stride, &dd0, &dd1, &dd2, &dd3);
+
+ uint8x8_t d01_u8, d23_u8;
+ compute_basic_avg_4x4(dd0, dd1, dd2, dd3, d0, d1, d2, d3,
+ round_offset_vec, &d01_u8, &d23_u8);
+
+ store_u8_4x1(dst8_ptr + 0 * dst8_stride, d01_u8, 0);
+ store_u8_4x1(dst8_ptr + 1 * dst8_stride, d01_u8, 1);
+ store_u8_4x1(dst8_ptr + 2 * dst8_stride, d23_u8, 0);
+ store_u8_4x1(dst8_ptr + 3 * dst8_stride, d23_u8, 1);
+
+ src_ptr += 4 * src_stride;
+ dst_ptr += 4 * dst_stride;
+ dst8_ptr += 4 * dst8_stride;
+ height -= 4;
+ } while (height != 0);
+ } else {
+ const uint8x16x3_t permute_tbl = vld1q_u8_x3(dot_prod_permute_tbl);
+ // Filter values are even, so halve to reduce intermediate precision reqs.
+ const int8x8_t x_filter = vshrn_n_s16(vld1q_s16(x_filter_ptr), 1);
+
+ do {
+ const uint8_t *s = src_ptr;
+ CONV_BUF_TYPE *d = dst_ptr;
+ uint8_t *d_u8 = dst8_ptr;
+ int width = w;
+
+ do {
+ uint8x16_t s0, s1, s2, s3;
+ load_u8_16x4(s, src_stride, &s0, &s1, &s2, &s3);
+
+ uint16x8_t d0 =
+ convolve8_8_x(s0, x_filter, permute_tbl, round_offset_shim);
+ uint16x8_t d1 =
+ convolve8_8_x(s1, x_filter, permute_tbl, round_offset_shim);
+ uint16x8_t d2 =
+ convolve8_8_x(s2, x_filter, permute_tbl, round_offset_shim);
+ uint16x8_t d3 =
+ convolve8_8_x(s3, x_filter, permute_tbl, round_offset_shim);
+
+ uint16x8_t dd0, dd1, dd2, dd3;
+ load_u16_8x4(d, dst_stride, &dd0, &dd1, &dd2, &dd3);
+
+ uint8x8_t d0_u8, d1_u8, d2_u8, d3_u8;
+ compute_basic_avg_8x4(dd0, dd1, dd2, dd3, d0, d1, d2, d3,
+ round_offset_vec, &d0_u8, &d1_u8, &d2_u8, &d3_u8);
+
+ store_u8_8x4(d_u8, dst8_stride, d0_u8, d1_u8, d2_u8, d3_u8);
+
+ s += 8;
+ d += 8;
+ d_u8 += 8;
+ width -= 8;
+ } while (width != 0);
+ src_ptr += 4 * src_stride;
+ dst_ptr += 4 * dst_stride;
+ dst8_ptr += 4 * dst8_stride;
+ height -= 4;
+ } while (height != 0);
+ }
+}
+
+static INLINE void dist_wtd_convolve_x_neon_i8mm(
+ const uint8_t *src, int src_stride, int w, int h,
+ const InterpFilterParams *filter_params_x, const int subpel_x_qn,
+ ConvolveParams *conv_params) {
+ assert(w % 4 == 0);
+ assert(h % 4 == 0);
+
+ const int bd = 8;
+ const int offset_bits = bd + 2 * FILTER_BITS - ROUND0_BITS;
+ const int16_t round_offset = (1 << (offset_bits - COMPOUND_ROUND1_BITS)) +
+ (1 << (offset_bits - COMPOUND_ROUND1_BITS - 1));
+ // A shim of 1 << ((ROUND0_BITS - 1) - 1) enables us to use non-rounding
+ // shifts - which are generally faster than rounding shifts on modern CPUs.
+ // (The extra -1 is needed because we halved the filter values.)
+ const int32x4_t round_offset_shim = vdupq_n_s32(
+ (round_offset << (ROUND0_BITS - 1)) + (1 << ((ROUND0_BITS - 1) - 1)));
+
+ // Horizontal filter.
+ const int16_t *x_filter_ptr = av1_get_interp_filter_subpel_kernel(
+ filter_params_x, subpel_x_qn & SUBPEL_MASK);
+
+ const int horiz_offset = filter_params_x->taps / 2 - 1;
+ const uint8_t *src_ptr = src - horiz_offset;
+ CONV_BUF_TYPE *dst_ptr = conv_params->dst;
+ int dst_stride = conv_params->dst_stride;
+ int height = h;
+
+ if (w == 4) {
+ const uint8x16_t permute_tbl = vld1q_u8(dot_prod_permute_tbl);
+ // 4-tap filters are used for blocks having width <= 4.
+ // Filter values are even, so halve to reduce intermediate precision reqs.
+ const int8x8_t x_filter =
+ vshrn_n_s16(vcombine_s16(vld1_s16(x_filter_ptr + 2), vdup_n_s16(0)), 1);
+
+ src_ptr += 2;
+
+ do {
+ uint8x16_t s0, s1, s2, s3;
+ load_u8_16x4(src_ptr, src_stride, &s0, &s1, &s2, &s3);
+
+ uint16x4_t d0 =
+ convolve4_4_x(s0, x_filter, permute_tbl, round_offset_shim);
+ uint16x4_t d1 =
+ convolve4_4_x(s1, x_filter, permute_tbl, round_offset_shim);
+ uint16x4_t d2 =
+ convolve4_4_x(s2, x_filter, permute_tbl, round_offset_shim);
+ uint16x4_t d3 =
+ convolve4_4_x(s3, x_filter, permute_tbl, round_offset_shim);
+
+ store_u16_4x4(dst_ptr, dst_stride, d0, d1, d2, d3);
+
+ src_ptr += 4 * src_stride;
+ dst_ptr += 4 * dst_stride;
+ height -= 4;
+ } while (height != 0);
+ } else {
+ const uint8x16x3_t permute_tbl = vld1q_u8_x3(dot_prod_permute_tbl);
+ // Filter values are even, so halve to reduce intermediate precision reqs.
+ const int8x8_t x_filter = vshrn_n_s16(vld1q_s16(x_filter_ptr), 1);
+
+ do {
+ const uint8_t *s = src_ptr;
+ CONV_BUF_TYPE *d = dst_ptr;
+ int width = w;
+
+ do {
+ uint8x16_t s0, s1, s2, s3;
+ load_u8_16x4(s, src_stride, &s0, &s1, &s2, &s3);
+
+ uint16x8_t d0 =
+ convolve8_8_x(s0, x_filter, permute_tbl, round_offset_shim);
+ uint16x8_t d1 =
+ convolve8_8_x(s1, x_filter, permute_tbl, round_offset_shim);
+ uint16x8_t d2 =
+ convolve8_8_x(s2, x_filter, permute_tbl, round_offset_shim);
+ uint16x8_t d3 =
+ convolve8_8_x(s3, x_filter, permute_tbl, round_offset_shim);
+
+ store_u16_8x4(d, dst_stride, d0, d1, d2, d3);
+
+ s += 8;
+ d += 8;
+ width -= 8;
+ } while (width != 0);
+ src_ptr += 4 * src_stride;
+ dst_ptr += 4 * dst_stride;
+ height -= 4;
+ } while (height != 0);
+ }
+}
+
+void av1_dist_wtd_convolve_x_neon_i8mm(
+ const uint8_t *src, int src_stride, uint8_t *dst8, int dst8_stride, int w,
+ int h, const InterpFilterParams *filter_params_x, const int subpel_x_qn,
+ ConvolveParams *conv_params) {
+ if (conv_params->do_average) {
+ if (UNLIKELY(conv_params->use_dist_wtd_comp_avg)) {
+ dist_wtd_convolve_x_dist_wtd_avg_neon_i8mm(
+ src, src_stride, dst8, dst8_stride, w, h, filter_params_x,
+ subpel_x_qn, conv_params);
+ } else {
+ dist_wtd_convolve_x_avg_neon_i8mm(src, src_stride, dst8, dst8_stride, w,
+ h, filter_params_x, subpel_x_qn,
+ conv_params);
+ }
+ } else {
+ dist_wtd_convolve_x_neon_i8mm(src, src_stride, w, h, filter_params_x,
+ subpel_x_qn, conv_params);
+ }
+}
diff --git a/av1/common/av1_rtcd_defs.pl b/av1/common/av1_rtcd_defs.pl
index eff70f9..9507cca 100644
--- a/av1/common/av1_rtcd_defs.pl
+++ b/av1/common/av1_rtcd_defs.pl
@@ -593,7 +593,7 @@
specialize qw/av1_convolve_2d_scale sse4_1/;
specialize qw/av1_dist_wtd_convolve_2d sse2 ssse3 avx2 neon/;
specialize qw/av1_dist_wtd_convolve_2d_copy sse2 avx2 neon/;
- specialize qw/av1_dist_wtd_convolve_x sse2 avx2 neon/;
+ specialize qw/av1_dist_wtd_convolve_x sse2 avx2 neon neon_dotprod neon_i8mm/;
specialize qw/av1_dist_wtd_convolve_y sse2 avx2 neon/;
if(aom_config("CONFIG_AV1_HIGHBITDEPTH") eq "yes") {
specialize qw/av1_highbd_dist_wtd_convolve_2d sse4_1 avx2 neon/;
diff --git a/test/av1_convolve_test.cc b/test/av1_convolve_test.cc
index df7dced..3894284 100644
--- a/test/av1_convolve_test.cc
+++ b/test/av1_convolve_test.cc
@@ -1309,6 +1309,18 @@
BuildLowbdLumaParams(av1_dist_wtd_convolve_x_neon));
#endif
+#if HAVE_NEON_DOTPROD
+INSTANTIATE_TEST_SUITE_P(
+ NEON_DOTPROD, AV1ConvolveXCompoundTest,
+ BuildLowbdLumaParams(av1_dist_wtd_convolve_x_neon_dotprod));
+#endif
+
+#if HAVE_NEON_I8MM
+INSTANTIATE_TEST_SUITE_P(
+ NEON_I8MM, AV1ConvolveXCompoundTest,
+ BuildLowbdLumaParams(av1_dist_wtd_convolve_x_neon_i8mm));
+#endif
+
#if CONFIG_AV1_HIGHBITDEPTH
/////////////////////////////////////////////////
// Compound convolve-x functions (high bit-depth)