blob: eff67976247301e2cdcf540e3a3cd4b1e0148304 [file] [log] [blame]
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
$assert CHANNEL_TILE % 8 == 0
$assert CHANNEL_TILE >= 8
$assert ROW_TILE >= 2
$assert ROW_SUBTILE >= 2
$assert ROW_SUBTILE <= ROW_TILE
$assert ACCUMULATORS >= 1
$assert ROW_TILE >= ACCUMULATORS * 2
$assert ROW_SUBTILE >= ACCUMULATORS * 2
$ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gavgpool.h>
#include <xnnpack/math.h>
void xnn_qs8_gavgpool_minmax_ukernel_${ROW_TILE}p${ROW_SUBTILE}x__neon_c${CHANNEL_TILE}${"" if ACCUMULATORS == 1 else "_acc%d" % ACCUMULATORS}(
size_t rows,
size_t channels,
const int8_t* input,
size_t input_stride,
const int8_t* zero,
int32_t* buffer,
int8_t* output,
const union xnn_qs8_avgpool_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
{
assert(rows > ${ROW_TILE});
assert(channels != 0);
const int8_t* i0 = input;
$for M in range(1, ROW_TILE):
const int8_t* i${M} = (const int8_t*) ((uintptr_t) i${M-1} + input_stride);
$if CHANNEL_TILE <= 16:
const size_t input_increment = ${ROW_TILE} * input_stride - round_up_po2(channels, ${CHANNEL_TILE});
$else:
const size_t input_increment = ${ROW_TILE} * input_stride - round_up_po2(channels, 8);
const int32x4_t vbias = vld1q_dup_s32(&params->neon.bias);
int32_t* b = buffer;
size_t c = channels;
for (; ${"c >= %d" % CHANNEL_TILE if CHANNEL_TILE > 16 else "c != 0"}; ${("c -= %d" if CHANNEL_TILE > 16 else "c = doz(c, %d)") % CHANNEL_TILE}) {
$for M in range(ROW_TILE):
$for C in range(0, CHANNEL_TILE, 8):
const int8x8_t vi${M}x${ABC[C:C+8]} = vld1_s8(i${M}); i${M} += 8;
$for A in range(ACCUMULATORS):
$for C in range(0, CHANNEL_TILE, 8):
int16x8_t vacc${A}x${ABC[C:C+8]} = vaddl_s8(vi${A*2}x${ABC[C:C+8]}, vi${A*2+1}x${ABC[C:C+8]});
$for M in range(ACCUMULATORS * 2, ROW_TILE):
$for C in range(0, CHANNEL_TILE, 8):
vacc${M % ACCUMULATORS}x${ABC[C:C+8]} = vaddw_s8(vacc${M % ACCUMULATORS}x${ABC[C:C+8]}, vi${M}x${ABC[C:C+8]});
$if ACCUMULATORS > 1:
// Add up all accumulators to vacc0x${ABC[0:CHANNEL_TILE]}
$ACC_SLICE = 1
$while ACC_SLICE < ACCUMULATORS:
$for A in range(0, ACCUMULATORS, ACC_SLICE * 2):
$if A + ACC_SLICE < ACCUMULATORS:
$for C in range(0, CHANNEL_TILE, 8):
vacc${A}x${ABC[C:C+8]} = vaddq_s16(vacc${A}x${ABC[C:C+8]}, vacc${A + ACC_SLICE}x${ABC[C:C+8]});
$ACC_SLICE *= 2
$for C in range(0, CHANNEL_TILE, 8):
const int32x4_t vacc${ABC[C:C+4]} = vaddw_s16(vbias, vget_low_s16(vacc0x${ABC[C:C+8]}));
const int32x4_t vacc${ABC[C+4:C+8]} = vaddw_s16(vbias, vget_high_s16(vacc0x${ABC[C:C+8]}));
$for C in range(0, CHANNEL_TILE, 4):
vst1q_s32(b, vacc${ABC[C:C+4]}); b += 4;
}
$if CHANNEL_TILE > 16:
if XNN_UNLIKELY(c != 0) {
do {
$for M in range(ROW_TILE):
const int8x8_t vi${M}x${ABC[0:8]} = vld1_s8(i${M}); i${M} += 8;
$for A in range(ACCUMULATORS):
int16x8_t vacc${A}x${ABC[0:8]} = vaddl_s8(vi${A*2}x${ABC[0:8]}, vi${A*2+1}x${ABC[0:8]});
$for M in range(ACCUMULATORS * 2, ROW_TILE):
vacc${M % ACCUMULATORS}x${ABC[0:8]} = vaddw_s8(vacc${M % ACCUMULATORS}x${ABC[0:8]}, vi${M}x${ABC[0:8]});
$if ACCUMULATORS > 1:
// Add up all accumulators to vacc0x${ABC[0:8]}
$ACC_SLICE = 1
$while ACC_SLICE < ACCUMULATORS:
$for A in range(0, ACCUMULATORS, ACC_SLICE * 2):
$if A + ACC_SLICE < ACCUMULATORS:
vacc${A}x${ABC[0:8]} = vaddq_s16(vacc${A}x${ABC[0:8]}, vacc${A + ACC_SLICE}x${ABC[0:8]});
$ACC_SLICE *= 2
const int32x4_t vacc${ABC[0:4]} = vaddw_s16(vbias, vget_low_s16(vacc0x${ABC[0:8]}));
const int32x4_t vacc${ABC[4:8]} = vaddw_s16(vbias, vget_high_s16(vacc0x${ABC[0:8]}));
vst1q_s32(b, vacc${ABC[0:4]}); b += 4;
vst1q_s32(b, vacc${ABC[4:8]}); b += 4;
c = doz(c, 8);
} while (c != 0);
}
for (rows -= ${ROW_TILE}; rows > ${ROW_SUBTILE}; rows -= ${ROW_SUBTILE}) {
$for M in range(ROW_SUBTILE):
i${M} = (const int8_t*) ((uintptr_t) i${M + ROW_TILE - ROW_SUBTILE} + input_increment);
int32_t* b = buffer;
size_t c = channels;
for (; ${"c >= %d" % CHANNEL_TILE if CHANNEL_TILE > 16 else "c != 0"}; ${("c -= %d" if CHANNEL_TILE > 16 else "c = doz(c, %d)") % CHANNEL_TILE}) {
$for M in range(ROW_SUBTILE):
$for C in range(0, CHANNEL_TILE, 8):
const int8x8_t vi${M}x${ABC[C:C+8]} = vld1_s8(i${M}); i${M} += 8;
$for A in range(ACCUMULATORS):
$for C in range(0, CHANNEL_TILE, 8):
int16x8_t vacc${A}x${ABC[C:C+8]} = vaddl_s8(vi${A*2}x${ABC[C:C+8]}, vi${A*2+1}x${ABC[C:C+8]});
$for M in range(ACCUMULATORS * 2, ROW_SUBTILE):
$for C in range(0, CHANNEL_TILE, 8):
vacc${M % ACCUMULATORS}x${ABC[C:C+8]} = vaddw_s8(vacc${M % ACCUMULATORS}x${ABC[C:C+8]}, vi${M}x${ABC[C:C+8]});
$if ACCUMULATORS > 1:
// Add up all accumulators to vacc0x${ABC[0:CHANNEL_TILE]}
$ACC_SLICE = 1
$while ACC_SLICE < ACCUMULATORS:
$for A in range(0, ACCUMULATORS, ACC_SLICE * 2):
$if A + ACC_SLICE < ACCUMULATORS:
$for C in range(0, CHANNEL_TILE, 8):
vacc${A}x${ABC[C:C+8]} = vaddq_s16(vacc${A}x${ABC[C:C+8]}, vacc${A + ACC_SLICE}x${ABC[C:C+8]});
$ACC_SLICE *= 2
int32x4_t vacc${ABC[0:4]} = vld1q_s32(b);
$for C in range(4, CHANNEL_TILE, 4):
int32x4_t vacc${ABC[C:C+4]} = vld1q_s32(b + ${C});
$for C in range(0, CHANNEL_TILE, 8):
vacc${ABC[C:C+4]} = vaddw_s16(vacc${ABC[C:C+4]}, vget_low_s16(vacc0x${ABC[C:C+8]}));
vacc${ABC[C+4:C+8]} = vaddw_s16(vacc${ABC[C+4:C+8]}, vget_high_s16(vacc0x${ABC[C:C+8]}));
$for C in range(0, CHANNEL_TILE, 4):
vst1q_s32(b, vacc${ABC[C:C+4]}); b += 4;
}
$if CHANNEL_TILE > 16:
if XNN_UNLIKELY(c != 0) {
do {
$for M in range(ROW_SUBTILE):
const int8x8_t vi${M}x${ABC[0:8]} = vld1_s8(i${M}); i${M} += 8;
$for A in range(ACCUMULATORS):
int16x8_t vacc${A}x${ABC[0:8]} = vaddl_s8(vi${A*2}x${ABC[0:8]}, vi${A*2+1}x${ABC[0:8]});
$for M in range(ACCUMULATORS * 2, ROW_SUBTILE):
vacc${M % ACCUMULATORS}x${ABC[0:8]} = vaddw_s8(vacc${M % ACCUMULATORS}x${ABC[0:8]}, vi${M}x${ABC[0:8]});
$if ACCUMULATORS > 1:
// Add up all accumulators to vacc0x${ABC[0:8]}
$ACC_SLICE = 1
$while ACC_SLICE < ACCUMULATORS:
$for A in range(0, ACCUMULATORS, ACC_SLICE * 2):
$if A + ACC_SLICE < ACCUMULATORS:
vacc${A}x${ABC[0:8]} = vaddq_s16(vacc${A}x${ABC[0:8]}, vacc${A + ACC_SLICE}x${ABC[0:8]});
$ACC_SLICE *= 2
int32x4_t vacc${ABC[0:4]} = vld1q_s32(b);
int32x4_t vacc${ABC[4:8]} = vld1q_s32(b + 4);
vacc${ABC[0:4]} = vaddw_s16(vacc${ABC[0:4]}, vget_low_s16(vacc0x${ABC[0:8]}));
vacc${ABC[4:8]} = vaddw_s16(vacc${ABC[4:8]}, vget_high_s16(vacc0x${ABC[0:8]}));
vst1q_s32(b, vacc${ABC[0:4]}); b += 4;
vst1q_s32(b, vacc${ABC[4:8]}); b += 4;
c = doz(c, 8);
} while (c != 0);
}
}
i0 = (const int8_t*) ((uintptr_t) i${ROW_TILE - ROW_SUBTILE} + input_increment);
$for M in range(1, ROW_SUBTILE):
i${M} = (const int8_t*) ((uintptr_t) i${M + ROW_TILE - ROW_SUBTILE} + input_increment);
$if M % 2 == 1:
if XNN_UNPREDICTABLE(rows < ${M+1}) {
i${M} = zero;
}
$else:
if XNN_UNPREDICTABLE(rows <= ${M}) {
i${M} = zero;
}
#if XNN_ARCH_ARM64
const int32x4_t vmultiplier = vld1q_dup_s32(&params->neon.multiplier);
#else
const int32x2_t vmultiplier = vld1_dup_s32(&params->neon.multiplier);
#endif
const int64x2_t vleft_shift = vld1q_dup_s64(&params->neon.left_shift);
const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->neon.output_zero_point);
$if CHANNEL_TILE > 8:
const int8x16_t voutput_min = vld1q_dup_s8(&params->neon.output_min);
const int8x16_t voutput_max = vld1q_dup_s8(&params->neon.output_max);
$else:
const int8x8_t voutput_min = vld1_dup_s8(&params->neon.output_min);
const int8x8_t voutput_max = vld1_dup_s8(&params->neon.output_max);
while (channels >= ${CHANNEL_TILE}) {
$for M in range(ROW_SUBTILE):
$for C in range(0, CHANNEL_TILE, 8):
const int8x8_t vi${M}x${ABC[C:C+8]} = vld1_s8(i${M}); i${M} += 8;
$for A in range(ACCUMULATORS):
$for C in range(0, CHANNEL_TILE, 8):
int16x8_t vacc${A}x${ABC[C:C+8]} = vaddl_s8(vi${A*2}x${ABC[C:C+8]}, vi${A*2+1}x${ABC[C:C+8]});
$for M in range(ACCUMULATORS * 2, ROW_SUBTILE):
$for C in range(0, CHANNEL_TILE, 8):
vacc${M % ACCUMULATORS}x${ABC[C:C+8]} = vaddw_s8(vacc${M % ACCUMULATORS}x${ABC[C:C+8]}, vi${M}x${ABC[C:C+8]});
$if ACCUMULATORS > 1:
// Add up all accumulators to vacc0x${ABC[0:CHANNEL_TILE]}
$ACC_SLICE = 1
$while ACC_SLICE < ACCUMULATORS:
$for A in range(0, ACCUMULATORS, ACC_SLICE * 2):
$if A + ACC_SLICE < ACCUMULATORS:
$for C in range(0, CHANNEL_TILE, 8):
vacc${A}x${ABC[C:C+8]} = vaddq_s16(vacc${A}x${ABC[C:C+8]}, vacc${A + ACC_SLICE}x${ABC[C:C+8]});
$ACC_SLICE *= 2
$for C in range(0, CHANNEL_TILE, 4):
int32x4_t vacc${ABC[C:C+4]} = vld1q_s32(buffer); buffer += 4;
$for C in range(0, CHANNEL_TILE, 8):
vacc${ABC[C:C+4]} = vaddw_s16(vacc${ABC[C:C+4]}, vget_low_s16(vacc0x${ABC[C:C+8]}));
vacc${ABC[C+4:C+8]} = vaddw_s16(vacc${ABC[C+4:C+8]}, vget_high_s16(vacc0x${ABC[C:C+8]}));
$for C in range(0, CHANNEL_TILE, 4):
const int32x4_t vsgnacc${ABC[C:C+4]} = vreinterpretq_s32_u32(vcltq_s32(vacc${ABC[C:C+4]}, vmovq_n_s32(0)));
#if XNN_ARCH_ARM64
$for C in range(0, CHANNEL_TILE, 4):
const int64x2_t vprod${ABC[C:C+2]} = vmull_s32(vget_low_s32(vacc${ABC[C:C+4]}), vget_low_s32(vmultiplier));
const int64x2_t vprod${ABC[C+2:C+4]} = vmull_high_s32(vacc${ABC[C:C+4]}, vmultiplier);
$for C in range(0, CHANNEL_TILE, 4):
const int64x2_t vadjprod${ABC[C:C+2]} = vaddw_s32(vprod${ABC[C:C+2]}, vget_low_s32(vsgnacc${ABC[C:C+4]}));
const int64x2_t vadjprod${ABC[C+2:C+4]} = vaddw_high_s32(vprod${ABC[C+2:C+4]}, vsgnacc${ABC[C:C+4]});
#else
$for C in range(0, CHANNEL_TILE, 4):
const int64x2_t vprod${ABC[C:C+2]} = vmull_s32(vget_low_s32(vacc${ABC[C:C+4]}), vmultiplier);
const int64x2_t vprod${ABC[C+2:C+4]} = vmull_s32(vget_high_s32(vacc${ABC[C:C+4]}), vmultiplier);
$for C in range(0, CHANNEL_TILE, 4):
const int64x2_t vadjprod${ABC[C:C+2]} = vaddw_s32(vprod${ABC[C:C+2]}, vget_low_s32(vsgnacc${ABC[C:C+4]}));
const int64x2_t vadjprod${ABC[C+2:C+4]} = vaddw_s32(vprod${ABC[C+2:C+4]}, vget_high_s32(vsgnacc${ABC[C:C+4]}));
#endif
$for C in range(0, CHANNEL_TILE, 2):
const int64x2_t vacc${ABC[C:C+2]} = vrshlq_s64(vadjprod${ABC[C:C+2]}, vleft_shift);
#if XNN_ARCH_ARM64
$for C in range(0, CHANNEL_TILE, 4):
vacc${ABC[C:C+4]} = vuzp1q_s32(vreinterpretq_s32_s64(vacc${ABC[C:C+2]}), vreinterpretq_s32_s64(vacc${ABC[C+2:C+4]}));
$for C in range(0, CHANNEL_TILE, 8):
const int16x8_t vacc${ABC[C:C+8]} = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc${ABC[C:C+4]}), vacc${ABC[C+4:C+8]}), voutput_zero_point);
$for C in range(0, CHANNEL_TILE, 16):
$if C + 8 < CHANNEL_TILE:
int8x16_t vout${ABC[C:C+16]} = vqmovn_high_s16(vqmovn_s16(vacc${ABC[C:C+8]}), vacc${ABC[C+8:C+16]});
$else:
int8x8_t vout${ABC[C:C+8]} = vqmovn_s16(vacc${ABC[C:C+8]});
#else
$for C in range(0, CHANNEL_TILE, 4):
vacc${ABC[C:C+4]} = vcombine_s32(vmovn_s64(vacc${ABC[C:C+2]}), vmovn_s64(vacc${ABC[C+2:C+4]}));
$for C in range(0, CHANNEL_TILE, 8):
const int16x8_t vacc${ABC[C:C+8]} = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc${ABC[C:C+4]}), vqmovn_s32(vacc${ABC[C+4:C+8]})), voutput_zero_point);
$for C in range(0, CHANNEL_TILE, 16):
$if C + 8 < CHANNEL_TILE:
int8x16_t vout${ABC[C:C+16]} = vcombine_s8(vqmovn_s16(vacc${ABC[C:C+8]}), vqmovn_s16(vacc${ABC[C+8:C+16]}));
$else:
int8x8_t vout${ABC[C:C+8]} = vqmovn_s16(vacc${ABC[C:C+8]});
#endif
$for C in range(0, CHANNEL_TILE, 16):
$if C + 8 < CHANNEL_TILE:
vout${ABC[C:C+16]} = vmaxq_s8(vout${ABC[C:C+16]}, voutput_min);
$elif CHANNEL_TILE > 8:
vout${ABC[C:C+8]} = vmax_s8(vout${ABC[C:C+8]}, vget_low_s8(voutput_min));
$else:
vout${ABC[C:C+8]} = vmax_s8(vout${ABC[C:C+8]}, voutput_min);
$for C in range(0, CHANNEL_TILE, 16):
$if C + 8 < CHANNEL_TILE:
vout${ABC[C:C+16]} = vminq_s8(vout${ABC[C:C+16]}, voutput_max);
$elif CHANNEL_TILE > 8:
vout${ABC[C:C+8]} = vmin_s8(vout${ABC[C:C+8]}, vget_low_s8(voutput_max));
$else:
vout${ABC[C:C+8]} = vmin_s8(vout${ABC[C:C+8]}, voutput_max);
$for C in range(0, CHANNEL_TILE, 16):
$if C + 8 < CHANNEL_TILE:
vst1q_s8(output, vout${ABC[C:C+16]}); output += 16;
$else:
vst1_s8(output, vout${ABC[C:C+8]}); output += 8;
channels -= ${CHANNEL_TILE};
}
if XNN_UNLIKELY(channels != 0) {
${"do " if CHANNEL_TILE > 8 else ""}{
$for M in range(ROW_SUBTILE):
$if CHANNEL_TILE > 8:
const int8x8_t vi${M}x${ABC[0:8]} = vld1_s8(i${M}); i${M} += 8;
$else:
const int8x8_t vi${M}x${ABC[0:8]} = vld1_s8(i${M});
$for A in range(ACCUMULATORS):
int16x8_t vacc${A}x${ABC[0:8]} = vaddl_s8(vi${A*2}x${ABC[0:8]}, vi${A*2+1}x${ABC[0:8]});
$for M in range(ACCUMULATORS * 2, ROW_SUBTILE):
vacc${M % ACCUMULATORS}x${ABC[0:8]} = vaddw_s8(vacc${M % ACCUMULATORS}x${ABC[0:8]}, vi${M}x${ABC[0:8]});
$if ACCUMULATORS > 1:
// Add up all accumulators to vacc0x${ABC[0:8]}
$ACC_SLICE = 1
$while ACC_SLICE < ACCUMULATORS:
$for A in range(0, ACCUMULATORS, ACC_SLICE * 2):
$if A + ACC_SLICE < ACCUMULATORS:
vacc${A}x${ABC[0:8]} = vaddq_s16(vacc${A}x${ABC[0:8]}, vacc${A + ACC_SLICE}x${ABC[0:8]});
$ACC_SLICE *= 2
int32x4_t vacc${ABC[0:4]} = vld1q_s32(buffer); buffer += 4;
int32x4_t vacc${ABC[4:8]} = vld1q_s32(buffer); buffer += 4;
vacc${ABC[0:4]} = vaddw_s16(vacc${ABC[0:4]}, vget_low_s16(vacc0x${ABC[0:8]}));
vacc${ABC[4:8]} = vaddw_s16(vacc${ABC[4:8]}, vget_high_s16(vacc0x${ABC[0:8]}));
const int32x4_t vsgnacc${ABC[0:4]} = vreinterpretq_s32_u32(vcltq_s32(vacc${ABC[0:4]}, vmovq_n_s32(0)));
const int32x4_t vsgnacc${ABC[4:8]} = vreinterpretq_s32_u32(vcltq_s32(vacc${ABC[4:8]}, vmovq_n_s32(0)));
#if XNN_ARCH_ARM64
const int64x2_t vprod${ABC[0:2]} = vmull_s32(vget_low_s32(vacc${ABC[0:4]}), vget_low_s32(vmultiplier));
const int64x2_t vprod${ABC[2:4]} = vmull_high_s32(vacc${ABC[0:4]}, vmultiplier);
const int64x2_t vprod${ABC[4:6]} = vmull_s32(vget_low_s32(vacc${ABC[4:8]}), vget_low_s32(vmultiplier));
const int64x2_t vprod${ABC[6:8]} = vmull_high_s32(vacc${ABC[4:8]}, vmultiplier);
const int64x2_t vadjprod${ABC[0:2]} = vaddw_s32(vprod${ABC[0:2]}, vget_low_s32(vsgnacc${ABC[0:4]}));
const int64x2_t vadjprod${ABC[2:4]} = vaddw_high_s32(vprod${ABC[2:4]}, vsgnacc${ABC[0:4]});
const int64x2_t vadjprod${ABC[4:6]} = vaddw_s32(vprod${ABC[4:6]}, vget_low_s32(vsgnacc${ABC[4:8]}));
const int64x2_t vadjprod${ABC[6:8]} = vaddw_high_s32(vprod${ABC[6:8]}, vsgnacc${ABC[4:8]});
#else
const int64x2_t vprod${ABC[0:2]} = vmull_s32(vget_low_s32(vacc${ABC[0:4]}), vmultiplier);
const int64x2_t vprod${ABC[2:4]} = vmull_s32(vget_high_s32(vacc${ABC[0:4]}), vmultiplier);
const int64x2_t vprod${ABC[4:6]} = vmull_s32(vget_low_s32(vacc${ABC[4:8]}), vmultiplier);
const int64x2_t vprod${ABC[6:8]} = vmull_s32(vget_high_s32(vacc${ABC[4:8]}), vmultiplier);
const int64x2_t vadjprod${ABC[0:2]} = vaddw_s32(vprod${ABC[0:2]}, vget_low_s32(vsgnacc${ABC[0:4]}));
const int64x2_t vadjprod${ABC[2:4]} = vaddw_s32(vprod${ABC[2:4]}, vget_high_s32(vsgnacc${ABC[0:4]}));
const int64x2_t vadjprod${ABC[4:6]} = vaddw_s32(vprod${ABC[4:6]}, vget_low_s32(vsgnacc${ABC[4:8]}));
const int64x2_t vadjprod${ABC[6:8]} = vaddw_s32(vprod${ABC[6:8]}, vget_high_s32(vsgnacc${ABC[4:8]}));
#endif
const int64x2_t vacc${ABC[0:2]} = vrshlq_s64(vadjprod${ABC[0:2]}, vleft_shift);
const int64x2_t vacc${ABC[2:4]} = vrshlq_s64(vadjprod${ABC[2:4]}, vleft_shift);
const int64x2_t vacc${ABC[4:6]} = vrshlq_s64(vadjprod${ABC[4:6]}, vleft_shift);
const int64x2_t vacc${ABC[6:8]} = vrshlq_s64(vadjprod${ABC[6:8]}, vleft_shift);
#if XNN_ARCH_ARM64
vacc${ABC[0:4]} = vuzp1q_s32(vreinterpretq_s32_s64(vacc${ABC[0:2]}), vreinterpretq_s32_s64(vacc${ABC[2:4]}));
vacc${ABC[4:8]} = vuzp1q_s32(vreinterpretq_s32_s64(vacc${ABC[4:6]}), vreinterpretq_s32_s64(vacc${ABC[6:8]}));
const int16x8_t vacc${ABC[0:8]} = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc${ABC[0:4]}), vacc${ABC[4:8]}), voutput_zero_point);
int8x8_t vout${ABC[0:8]} = vqmovn_s16(vacc${ABC[0:8]});
#else
vacc${ABC[0:4]} = vcombine_s32(vmovn_s64(vacc${ABC[0:2]}), vmovn_s64(vacc${ABC[2:4]}));
vacc${ABC[4:8]} = vcombine_s32(vmovn_s64(vacc${ABC[4:6]}), vmovn_s64(vacc${ABC[6:8]}));
const int16x8_t vacc${ABC[0:8]} = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc${ABC[0:4]}), vqmovn_s32(vacc${ABC[4:8]})), voutput_zero_point);
int8x8_t vout${ABC[0:8]} = vqmovn_s16(vacc${ABC[0:8]});
#endif
$if CHANNEL_TILE > 8:
vout${ABC[0:8]} = vmax_s8(vout${ABC[0:8]}, vget_low_s8(voutput_min));
vout${ABC[0:8]} = vmin_s8(vout${ABC[0:8]}, vget_low_s8(voutput_max));
$else:
vout${ABC[0:8]} = vmax_s8(vout${ABC[0:8]}, voutput_min);
vout${ABC[0:8]} = vmin_s8(vout${ABC[0:8]}, voutput_max);
$if CHANNEL_TILE > 8:
if XNN_LIKELY(channels >= 8) {
vst1_s8(output, vout${ABC[0:8]}); output += 8;
channels -= 8;
} else {
if (channels & 4) {
vst1_lane_u32(__builtin_assume_aligned(output, 1), vreinterpret_u32_s8(vout${ABC[0:8]}), 0); output += 4;
vout${ABC[0:8]} = vext_s8(vout${ABC[0:8]}, vout${ABC[0:8]}, 4);
}
if (channels & 2) {
vst1_lane_u16(__builtin_assume_aligned(output, 1), vreinterpret_u16_s8(vout${ABC[0:8]}), 0); output += 2;
vout${ABC[0:8]} = vext_s8(vout${ABC[0:8]}, vout${ABC[0:8]}, 2);
}
if (channels & 1) {
vst1_lane_s8(output, vout${ABC[0:8]}, 0); output += 1;
}
channels = 0;
}
$else:
if (channels & 4) {
vst1_lane_u32(__builtin_assume_aligned(output, 1), vreinterpret_u32_s8(vout${ABC[0:8]}), 0); output += 4;
vout${ABC[0:8]} = vext_s8(vout${ABC[0:8]}, vout${ABC[0:8]}, 4);
}
if (channels & 2) {
vst1_lane_u16(__builtin_assume_aligned(output, 1), vreinterpret_u16_s8(vout${ABC[0:8]}), 0); output += 2;
vout${ABC[0:8]} = vext_s8(vout${ABC[0:8]}, vout${ABC[0:8]}, 2);
}
if (channels & 1) {
vst1_lane_s8(output, vout${ABC[0:8]}, 0);
}
}${" while (channels != 0);" if CHANNEL_TILE > 8 else ""}
}
}