blob: af27072f6ba0cd0b0fd10f458774d0f9ad437f59 [file] [log] [blame]
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
$assert ELEMENTS_TILE % 4 == 0
$assert ELEMENTS_TILE >= 4
$SIMD_TILE = ELEMENTS_TILE // 4
$ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
$VMULADDQ_F32 = "vfmaq_f32" if FMA else "vmlaq_f32"
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/raddstoreexpminusmax.h>
extern XNN_INTERNAL const float xnn_table_exp2_k_over_64[64];
void xnn_f32_raddstoreexpminusmax_ukernel__${"neonfma" if FMA else "neon"}_lut64_p2_x${ELEMENTS_TILE}${"" if ACCUMULATORS == 1 else "_acc%d" % ACCUMULATORS}(
size_t elements,
const float* input,
float* output,
float* sum,
float max) XNN_DISABLE_TSAN
{
assert(elements % sizeof(float) == 0);
const float32x4_t vmagic_bias = vmovq_n_f32(0x1.800000p23f);
// The smallest x for which expf(x) is normalized.
const float32x4_t vdenorm_cutoff = vmovq_n_f32(-0x1.5D589Ep6f);
const float32x4_t vlog2e_x64 = vmovq_n_f32(0x1.715476p6f);
$if FMA:
const float32x4_t vminus_ln2_o64_hi = vmovq_n_f32(-0x1.62e43p-7f);
const float32x4_t vminus_ln2_o64_lo = vmovq_n_f32(0x1.05c61p-35f);
$else:
// Last 13 bits are zeroes
const float32x4_t vminus_ln2_o64_hi = vmovq_n_f32(-0x1.630000p-7f);
const float32x4_t vminus_ln2_o64_lo = vmovq_n_f32(0x1.BD0106p-19f);
const float32x4_t vc2 = vmovq_n_f32(0x1.FFFF0Ap-2f);
const int32x4_t vindex_mask = vmovq_n_s32(INT32_C(0x3F));
const float32x4_t vi_max = vdupq_n_f32(max);
$if ELEMENTS_TILE > 4:
$for K in range(ACCUMULATORS):
float32x4_t vacc${K} = vmovq_n_f32(0.0f);
for (; elements >= ${ELEMENTS_TILE} * sizeof(float); elements -= ${ELEMENTS_TILE} * sizeof(float)) {
// Load ${ELEMENTS_TILE} (${SIMD_TILE}x4) inputs at a time.
$for N in range(0, ELEMENTS_TILE, 4):
const float32x4_t vi${ABC[N:N+4]} = vld1q_f32(input); input += 4;
// Subtract maximum input x := i - i_max. This implies x <= 0.
$for N in range(0, ELEMENTS_TILE, 4):
const float32x4_t vx${ABC[N:N+4]} = vsubq_f32(vi${ABC[N:N+4]}, vi_max);
// Compute reduced argument n := round(x * 64 / log(2)).
// We do it by adding a large number (magic bias), which cause rounding of the result to an integer, then subtracing
// the large number back. The first addition is combined with multiplication by log2e into a single FMA instruction.
// The trick with adding large number is valid only within certain bounds (|x * 64 / log(2)| <= 2**22, i.e.
// |x| <= 0x1.62E43p+15 = 45426.09375), but that is acceptable, because inputs outside of [-87.336540, 0.0]
// result in denormalized or underflown expf(x). We fixup the result for such inputs at the very end of the
// algorithm.
$for N in range(0, ELEMENTS_TILE, 4):
float32x4_t vn${ABC[N:N+4]} = ${VMULADDQ_F32}(vmagic_bias, vx${ABC[N:N+4]}, vlog2e_x64);
// Create a floating-point number s (scale) such that s := 2**(n / 64) for such inputs that expf(x) is normalized,
// i.e. -87.33642 <= x <= 0.0. As n has 6 fractional bits, we split s == 2**(n / 64) = 2**e * 2**(n / 64 - e), where
// e := int(n / 64). We create s in two steps:
// 1. Fetch 2**(n / 64 - e) = 2**(n % 64) from the table using the 6 low bits of n, as integer. Note that the
// fetched values are in the [1.0, 2.0) range, i.e. their floating-point exponent is 0.
// 2. Adjust fecthed value by addition of e to its floating-point exponent. The result is always a normalized
// number, because for -87.33642 <= x <= 0.0 (inputs for which expf(x) is normalized) we have -126 <= e <= 0,
// and thus the adjusted exponent is not lower than -126.
//
// Extract e from bits 6:14 of n and shift it into bits 23:31 (position of floating-point exponent).
$for N in range(0, ELEMENTS_TILE, 4):
const int32x4_t ve${ABC[N:N+4]} = vshlq_n_s32(vbicq_s32(vreinterpretq_s32_f32(vn${ABC[N:N+4]}), vmovq_n_s32(INT32_C(0x3F))), 17);
// Use bits 0:6 bits of n, as integer, as an index for table lookup of l := 2**(n % 64).
$for N in range(0, ELEMENTS_TILE, 4):
const uint64x2_t vidx${ABC[N:N+4]} = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn${ABC[N:N+4]}), vindex_mask));
const uint64_t vidx${ABC[N:N+2]} = vgetq_lane_u64(vidx${ABC[N:N+4]}, 0);
const uint64_t vidx${ABC[N+2:N+4]} = vgetq_lane_u64(vidx${ABC[N:N+4]}, 1);
$for N in range(0, ELEMENTS_TILE, 4):
float32x2_t vl${ABC[N:N+2]} = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx${ABC[N:N+2]}]);
float32x2_t vl${ABC[N+2:N+4]} = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx${ABC[N+2:N+4]}]);
$for N in range(0, ELEMENTS_TILE, 4):
vl${ABC[N:N+2]} = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx${ABC[N:N+2]} >> 32)], vl${ABC[N:N+2]}, 1);
vl${ABC[N+2:N+4]} = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx${ABC[N+2:N+4]} >> 32)], vl${ABC[N+2:N+4]}, 1);
const float32x4_t vl${ABC[N:N+4]} = vcombine_f32(vl${ABC[N:N+2]}, vl${ABC[N+2:N+4]});
// Adjust exponent of the value l fetched from the table to get the final s value.
$for N in range(0, ELEMENTS_TILE, 4):
const float32x4_t vs${ABC[N:N+4]} = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl${ABC[N:N+4]}), ve${ABC[N:N+4]}));
// Subtract the large number back to get final n := round(x * 64 / log(2)) as a floating-point number.
$for N in range(0, ELEMENTS_TILE, 4):
vn${ABC[N:N+4]} = vsubq_f32(vn${ABC[N:N+4]}, vmagic_bias);
// Compute reduced argument t := x - n * log(2) / 64.
// Use Cody-Waite range reduction method (note the two constants representing log(2) / 64) to improve accuracy.
$for N in range(0, ELEMENTS_TILE, 4):
float32x4_t vt${ABC[N:N+4]} = ${VMULADDQ_F32}(vx${ABC[N:N+4]}, vn${ABC[N:N+4]}, vminus_ln2_o64_hi);
$for N in range(0, ELEMENTS_TILE, 4):
vt${ABC[N:N+4]} = ${VMULADDQ_F32}(vt${ABC[N:N+4]}, vn${ABC[N:N+4]}, vminus_ln2_o64_lo);
// Compute degree-2 polynomial approximation for exp(t) on [-log(2)/128, log(2)/128].
$for N in range(0, ELEMENTS_TILE, 4):
float32x4_t vp${ABC[N:N+4]} = vmulq_f32(vt${ABC[N:N+4]}, vc2);
$for N in range(0, ELEMENTS_TILE, 4):
vp${ABC[N:N+4]} = ${VMULADDQ_F32}(vt${ABC[N:N+4]}, vt${ABC[N:N+4]}, vp${ABC[N:N+4]});
// Reconstruct the final f value:
// f = s * (1 + t * (1 + t * c2))
// = s * (1 + t + t * (t * c2))
// = s + s * (t + t * (t * c2))
// = s + s * p
$for N in range(0, ELEMENTS_TILE, 4):
float32x4_t vf${ABC[N:N+4]} = ${VMULADDQ_F32}(vs${ABC[N:N+4]}, vs${ABC[N:N+4]}, vp${ABC[N:N+4]});
// For inputs below denormal cutoff, replace output with +0.0f.
// Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
$for N in range(0, ELEMENTS_TILE, 4):
vf${ABC[N:N+4]} = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf${ABC[N:N+4]}), vcltq_f32(vx${ABC[N:N+4]}, vdenorm_cutoff)));
// Store ${ELEMENTS_TILE} (${SIMD_TILE}x4) outputs at a time.
$for N in range(0, ELEMENTS_TILE, 4):
vst1q_f32(output, vf${ABC[N:N+4]}); output += 4;
// Accumulate computed exponents.
$for N in range(0, ELEMENTS_TILE, 4):
vacc${N % ACCUMULATORS} = vaddq_f32(vacc${N % ACCUMULATORS}, vf${ABC[N:N+4]});
}
$if ACCUMULATORS > 1:
// Add up all accumulators to vacc0
$ACC_SLICE = 1
$while ACC_SLICE < ACCUMULATORS:
$for A in range(0, ACCUMULATORS, ACC_SLICE * 2):
$if A + ACC_SLICE < ACCUMULATORS:
vacc${A} = vaddq_f32(vacc${A}, vacc${A + ACC_SLICE});
$ACC_SLICE *= 2
float32x4_t vacc = vacc0;
$else:
float32x4_t vacc = vmovq_n_f32(0.0f);
for (; elements >= 4 * sizeof(float); elements -= 4 * sizeof(float)) {
// Load 4 inputs at a time.
const float32x4_t vi = vld1q_f32(input); input += 4;
// Subtract maximum input x := i - i_max. This implies x <= 0.
const float32x4_t vx = vsubq_f32(vi, vi_max);
// Compute reduced argument n := round(x * 64 / log(2)).
// We do it by adding a large number (magic bias), which cause rounding of the result to an integer, then subtracing
// the large number back. The first addition is combined with multiplication by log2e into a single FMA instruction.
// The trick with adding large number is valid only within certain bounds (|x * 64 / log(2)| <= 2**22, i.e.
// |x| <= 0x1.62E43p+15 = 45426.09375), but that is acceptable, because inputs outside of [-87.336540, 0.0]
// result in denormalized or underflown expf(x). We fixup the result for such inputs at the very end of the
// algorithm.
float32x4_t vn = ${VMULADDQ_F32}(vmagic_bias, vx, vlog2e_x64);
// Create a floating-point number s (scale) such that s := 2**(n / 64) for such inputs that expf(x) is normalized,
// i.e. -87.33642 <= x <= 0.0. As n has 6 fractional bits, we split s == 2**(n / 64) = 2**e * 2**(n / 64 - e), where
// e := int(n / 64). We create s in two steps:
// 1. Fetch 2**(n / 64 - e) = 2**(n % 64) from the table using the 6 low bits of n, as integer. Note that the
// fetched values are in the [1.0, 2.0) range, i.e. their floating-point exponent is 0.
// 2. Adjust fecthed value by addition of e to its floating-point exponent. The result is always a normalized
// number, because for -87.33642 <= x <= 0.0 (inputs for which expf(x) is normalized) we have -126 <= e <= 0,
// and thus the adjusted exponent is not lower than -126.
//
// Extract e from bits 6:14 of n and shift it into bits 23:31 (position of floating-point exponent).
const int32x4_t ve = vshlq_n_s32(vbicq_s32(vreinterpretq_s32_f32(vn), vmovq_n_s32(INT32_C(0x3F))), 17);
// Use bits 0:6 bits of n, as integer, as an index for table lookup of l := 2**(n % 64).
const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx_lo]);
float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx_hi]);
vl_lo = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
vl_hi = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
// Adjust exponent of the value l fetched from the table to get the final s value.
const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
// Subtract the large number back to get final n := round(x * 64 / log(2)) as a floating-point number.
vn = vsubq_f32(vn, vmagic_bias);
// Compute reduced argument t := x - n * log(2) / 64.
// Use Cody-Waite range reduction method (note the two constants representing log(2) / 64) to improve accuracy.
float32x4_t vt = ${VMULADDQ_F32}(vx, vn, vminus_ln2_o64_hi);
vt = ${VMULADDQ_F32}(vt, vn, vminus_ln2_o64_lo);
// Compute degree-2 polynomial approximation for exp(t) on [-log(2)/128, log(2)/128].
float32x4_t vp = vmulq_f32(vt, vc2);
vp = ${VMULADDQ_F32}(vt, vt, vp);
// Reconstruct the final f value:
// f = s * (1 + t * (1 + t * c2))
// = s * (1 + t + t * (t * c2))
// = s + s * (t + t * (t * c2))
// = s + s * p
float32x4_t vf = ${VMULADDQ_F32}(vs, vs, vp);
// For inputs below denormal cutoff, replace output with +0.0f.
// Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcltq_f32(vx, vdenorm_cutoff)));
// Store 4 outputs at a time.
vst1q_f32(output, vf); output += 4;
// Accumulate computed exponents.
vacc = vaddq_f32(vacc, vf);
}
#if XNN_ARCH_ARM64
float vacc_lo = vaddvq_f32(vacc);
#else
float32x2_t vacc_lo = vadd_f32(vget_high_f32(vacc), vget_low_f32(vacc));
#endif
if (elements != 0) {
assert(elements >= 1 * sizeof(float));
assert(elements <= 3 * sizeof(float));
// Load 4 inputs at a time.
const float32x4_t vi = vld1q_f32(input); input += 4;
// Subtract maximum input x := i - i_max. This implies x <= 0.
const float32x4_t vx = vsubq_f32(vi, vi_max);
// Compute reduced argument n := round(x * 64 / log(2)).
// We do it by adding a large number (magic bias), which cause rounding of the result to an integer, then subtracing
// the large number back. The first addition is combined with multiplication by log2e into a single FMA instruction.
// The trick with adding large number is valid only within certain bounds (|x * 64 / log(2)| <= 2**22, i.e.
// |x| <= 0x1.62E43p+15 = 45426.09375), but that is acceptable, because inputs outside of [-87.336540, 0.0]
// result in denormalized or underflown expf(x). We fixup the result for such inputs at the very end of the
// algorithm.
float32x4_t vn = ${VMULADDQ_F32}(vmagic_bias, vx, vlog2e_x64);
// Create a floating-point number s (scale) such that s := 2**(n / 64) for such inputs that expf(x) is normalized,
// i.e. -87.33642 <= x <= 0.0. As n has 6 fractional bits, we split s == 2**(n / 64) = 2**e * 2**(n / 64 - e), where
// e := int(n / 64). We create s in two steps:
// 1. Fetch 2**(n / 64 - e) = 2**(n % 64) from the table using the 6 low bits of n, as integer. Note that the
// fetched values are in the [1.0, 2.0) range, i.e. their floating-point exponent is 0.
// 2. Adjust fecthed value by addition of e to its floating-point exponent. The result is always a normalized
// number, because for -87.33642 <= x <= 0.0 (inputs for which expf(x) is normalized) we have -126 <= e <= 0,
// and thus the adjusted exponent is not lower than -126.
//
// Extract e from bits 6:14 of n and shift it into bits 23:31 (position of floating-point exponent).
const int32x4_t ve = vshlq_n_s32(vbicq_s32(vreinterpretq_s32_f32(vn), vmovq_n_s32(INT32_C(0x3F))), 17);
// Use bits 0:6 bits of n, as integer, as an index for table lookup of l := 2**(n % 64).
const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx_lo]);
float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx_hi]);
vl_lo = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
vl_hi = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
// Adjust exponent of the value l fetched from the table to get the final s value.
const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
// Subtract the large number back to get final n := round(x * 64 / log(2)) as a floating-point number.
vn = vsubq_f32(vn, vmagic_bias);
// Compute reduced argument t := x - n * log(2) / 64.
// Use Cody-Waite range reduction method (note the two constants representing log(2) / 64) to improve accuracy.
float32x4_t vt = ${VMULADDQ_F32}(vx, vn, vminus_ln2_o64_hi);
vt = ${VMULADDQ_F32}(vt, vn, vminus_ln2_o64_lo);
// Compute degree-2 polynomial approximation for exp(t) on [-log(2)/128, log(2)/128].
float32x4_t vp = vmulq_f32(vt, vc2);
vp = ${VMULADDQ_F32}(vt, vt, vp);
// Reconstruct the final f value:
// f = s * (1 + t * (1 + t * c2))
// = s * (1 + t + t * (t * c2))
// = s + s * (t + t * (t * c2))
// = s + s * p
float32x4_t vf = ${VMULADDQ_F32}(vs, vs, vp);
// For inputs below denormal cutoff, replace output with +0.0f.
// Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcltq_f32(vx, vdenorm_cutoff)));
float32x2_t vf_lo = vget_low_f32(vf);
if (elements & (2 * sizeof(float))) {
// Store 2 outputs at a time.
vst1_f32(output, vf_lo); output += 2;
// Accumulate 2 computed exponents.
#if XNN_ARCH_ARM64
vacc_lo += vaddv_f32(vf_lo);
#else
vacc_lo = vadd_f32(vacc_lo, vf_lo);
#endif
vf_lo = vget_high_f32(vf);
}
if (elements & (1 * sizeof(float))) {
// Store 1 output at a time.
vst1_lane_f32(output, vf_lo, 0);
// Accumulate 1 computed exponent.
#if XNN_ARCH_ARM64
vacc_lo += vget_lane_f32(vf_lo, 0);
#else
vacc_lo = vadd_f32(vacc_lo, vreinterpret_f32_u64(vshl_n_u64(vreinterpret_u64_f32(vf_lo), 32)));
#endif
}
}
// Reduce 4 elements in the SIMD register
#if XNN_ARCH_ARM64
*sum = vacc_lo;
#else
vst1_lane_f32(sum, vpadd_f32(vacc_lo, vacc_lo), 0);
#endif
}