blob: 4ae2bb9fb9df6fec80343d973d0e81cfc3774eb7 [file] [log] [blame]
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
$assert CHANNEL_TILE % 4 == 0
$assert CHANNEL_TILE >= 4
$assert PIXEL_TILE == 1
$ABC = "0123456789ABCDEFGHIJKLMN"
$VMULADDQ_F32 = "vfmaq_f32" if FMA else "vmlaq_f32"
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/bilinear.h>
void xnn_f32_bilinear_ukernel__${"neonfma" if FMA else "neon"}_c${CHANNEL_TILE}${"" if PIXEL_TILE == 1 else "x%d" % PIXEL_TILE}(
size_t output_pixels,
size_t channels,
const float**restrict input,
size_t input_offset,
const float*restrict weights,
float*restrict output,
size_t output_increment)
{
assert(output_pixels != 0);
assert(channels != 0);
assert(channels % sizeof(float) == 0);
do {
const float* i0 = (const float*) ((uintptr_t) input[0] + input_offset);
const float* i1 = (const float*) ((uintptr_t) input[1] + input_offset);
const float* i2 = (const float*) ((uintptr_t) input[2] + input_offset);
const float* i3 = (const float*) ((uintptr_t) input[3] + input_offset);
input += 4;
const float32x2_t valphahv = vld1_f32(weights); weights += 2;
$if FMA:
#if XNN_ARCH_ARM
const float32x4_t valphah = vdupq_lane_f32(valphahv, 0);
const float32x4_t valphav = vdupq_lane_f32(valphahv, 1);
#endif
size_t c = channels;
for (; c >= ${CHANNEL_TILE} * sizeof(float); c -= ${CHANNEL_TILE} * sizeof(float)) {
$for C in range(0, CHANNEL_TILE, 4):
const float32x4_t vtl${ABC[C:C+4]} = vld1q_f32(i0); i0 += 4;
const float32x4_t vtr${ABC[C:C+4]} = vld1q_f32(i1); i1 += 4;
const float32x4_t vbl${ABC[C:C+4]} = vld1q_f32(i2); i2 += 4;
const float32x4_t vbr${ABC[C:C+4]} = vld1q_f32(i3); i3 += 4;
$for C in range(0, CHANNEL_TILE, 4):
const float32x4_t vtd${ABC[C:C+4]} = vsubq_f32(vtr${ABC[C:C+4]}, vtl${ABC[C:C+4]});
const float32x4_t vbd${ABC[C:C+4]} = vsubq_f32(vbr${ABC[C:C+4]}, vbl${ABC[C:C+4]});
$if FMA:
#if XNN_ARCH_ARM
$for C in range(0, CHANNEL_TILE, 4):
const float32x4_t vt${ABC[C:C+4]} = vfmaq_f32(vtl${ABC[C:C+4]}, vtd${ABC[C:C+4]}, valphah);
const float32x4_t vb${ABC[C:C+4]} = vfmaq_f32(vbl${ABC[C:C+4]}, vbd${ABC[C:C+4]}, valphah);
#else
$for C in range(0, CHANNEL_TILE, 4):
const float32x4_t vt${ABC[C:C+4]} = vfmaq_lane_f32(vtl${ABC[C:C+4]}, vtd${ABC[C:C+4]}, valphahv, 0);
const float32x4_t vb${ABC[C:C+4]} = vfmaq_lane_f32(vbl${ABC[C:C+4]}, vbd${ABC[C:C+4]}, valphahv, 0);
#endif
$else:
$for C in range(0, CHANNEL_TILE, 4):
const float32x4_t vt${ABC[C:C+4]} = vmlaq_lane_f32(vtl${ABC[C:C+4]}, vtd${ABC[C:C+4]}, valphahv, 0);
const float32x4_t vb${ABC[C:C+4]} = vmlaq_lane_f32(vbl${ABC[C:C+4]}, vbd${ABC[C:C+4]}, valphahv, 0);
$for C in range(0, CHANNEL_TILE, 4):
const float32x4_t vd${ABC[C:C+4]} = vsubq_f32(vb${ABC[C:C+4]}, vt${ABC[C:C+4]});
$if FMA:
#if XNN_ARCH_ARM
$for C in range(0, CHANNEL_TILE, 4):
const float32x4_t vo${ABC[C:C+4]} = vfmaq_f32(vt${ABC[C:C+4]}, vd${ABC[C:C+4]}, valphav);
#else
$for C in range(0, CHANNEL_TILE, 4):
const float32x4_t vo${ABC[C:C+4]} = vfmaq_lane_f32(vt${ABC[C:C+4]}, vd${ABC[C:C+4]}, valphahv, 1);
#endif
$else:
$for C in range(0, CHANNEL_TILE, 4):
const float32x4_t vo${ABC[C:C+4]} = vmlaq_lane_f32(vt${ABC[C:C+4]}, vd${ABC[C:C+4]}, valphahv, 1);
$for C in range(0, CHANNEL_TILE, 4):
vst1q_f32(output, vo${ABC[C:C+4]}); output += 4;
}
$if CHANNEL_TILE > 4:
for (; c >= 4 * sizeof(float); c -= 4 * sizeof(float)) {
const float32x4_t vtl0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vtr0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vbl0123 = vld1q_f32(i2); i2 += 4;
const float32x4_t vbr0123 = vld1q_f32(i3); i3 += 4;
const float32x4_t vtd0123 = vsubq_f32(vtr0123, vtl0123);
const float32x4_t vbd0123 = vsubq_f32(vbr0123, vbl0123);
$if FMA:
#if XNN_ARCH_ARM
const float32x4_t vt0123 = vfmaq_f32(vtl0123, vtd0123, valphah);
const float32x4_t vb0123 = vfmaq_f32(vbl0123, vbd0123, valphah);
#else
const float32x4_t vt0123 = vfmaq_lane_f32(vtl0123, vtd0123, valphahv, 0);
const float32x4_t vb0123 = vfmaq_lane_f32(vbl0123, vbd0123, valphahv, 0);
#endif
$else:
const float32x4_t vt0123 = vmlaq_lane_f32(vtl0123, vtd0123, valphahv, 0);
const float32x4_t vb0123 = vmlaq_lane_f32(vbl0123, vbd0123, valphahv, 0);
const float32x4_t vd0123 = vsubq_f32(vb0123, vt0123);
$if FMA:
#if XNN_ARCH_ARM
const float32x4_t vo0123 = vfmaq_f32(vt0123, vd0123, valphav);
#else
const float32x4_t vo0123 = vfmaq_lane_f32(vt0123, vd0123, valphahv, 1);
#endif
$else:
const float32x4_t vo0123 = vmlaq_lane_f32(vt0123, vd0123, valphahv, 1);
vst1q_f32(output, vo0123);
output += 4;
}
if XNN_UNLIKELY(c != 0) {
const float32x4_t vtl0123 = vld1q_f32(i0);
const float32x4_t vtr0123 = vld1q_f32(i1);
const float32x4_t vbl0123 = vld1q_f32(i2);
const float32x4_t vbr0123 = vld1q_f32(i3);
const float32x4_t vtd0123 = vsubq_f32(vtr0123, vtl0123);
const float32x4_t vbd0123 = vsubq_f32(vbr0123, vbl0123);
$if FMA:
#if XNN_ARCH_ARM
const float32x4_t vt0123 = vfmaq_f32(vtl0123, vtd0123, valphah);
const float32x4_t vb0123 = vfmaq_f32(vbl0123, vbd0123, valphah);
#else
const float32x4_t vt0123 = vfmaq_lane_f32(vtl0123, vtd0123, valphahv, 0);
const float32x4_t vb0123 = vfmaq_lane_f32(vbl0123, vbd0123, valphahv, 0);
#endif
$else:
const float32x4_t vt0123 = vmlaq_lane_f32(vtl0123, vtd0123, valphahv, 0);
const float32x4_t vb0123 = vmlaq_lane_f32(vbl0123, vbd0123, valphahv, 0);
const float32x4_t vd0123 = vsubq_f32(vb0123, vt0123);
$if FMA:
#if XNN_ARCH_ARM
float32x4_t vo0123 = vfmaq_f32(vt0123, vd0123, valphav);
#else
float32x4_t vo0123 = vfmaq_lane_f32(vt0123, vd0123, valphahv, 1);
#endif
$else:
const float32x4_t vo0123 = vmlaq_lane_f32(vt0123, vd0123, valphahv, 1);
float32x2_t vo01 = vget_low_f32(vo0123);
if (c & (2 * sizeof(float))) {
vst1_f32(output, vo01); output += 2;
vo01 = vget_high_f32(vo0123);
}
if (c & (1 * sizeof(float))) {
vst1_lane_f32(output, vo01, 0); output += 1;
}
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_pixels != 0);
}