blob: fc35d1b77ce21f066e0591491ff6ef51540dbb91 [file] [log] [blame]
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
$assert BATCH_TILE % 4 == 0
$assert BATCH_TILE >= 4
$ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vunary.h>
void xnn_f32_vrndz_ukernel__wasmsimd_cvt_x${BATCH_TILE}(
size_t n,
const float* x,
float* y,
const union xnn_f32_rnd_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
{
assert(n != 0);
assert(n % sizeof(float) == 0);
const v128_t vsign_mask = wasm_f32x4_splat(-0.0f);
const v128_t vmagic_number = wasm_f32x4_splat(0x1.000000p+23f);
$if BATCH_TILE > 4:
for (; n >= ${BATCH_TILE} * sizeof(float); n -= ${BATCH_TILE} * sizeof(float)) {
const v128_t vx${ABC[0:4]} = wasm_v128_load(x);
$for N in range(4, BATCH_TILE, 4):
const v128_t vx${ABC[N:N+4]} = wasm_v128_load(x + ${N});
x += ${BATCH_TILE};
$for N in range(0, BATCH_TILE, 4):
const v128_t vintx${ABC[N:N+4]} = wasm_i32x4_trunc_saturate_f32x4(vx${ABC[N:N+4]});
const v128_t vabsx${ABC[N:N+4]} = wasm_f32x4_abs(vx${ABC[N:N+4]});
$for N in range(0, BATCH_TILE, 4):
const v128_t vrndx${ABC[N:N+4]} = wasm_f32x4_convert_i32x4(vintx${ABC[N:N+4]});
const v128_t vrndmask${ABC[N:N+4]} = wasm_v128_andnot(wasm_f32x4_lt(vabsx${ABC[N:N+4]}, vmagic_number), vsign_mask);
$for N in range(0, BATCH_TILE, 4):
const v128_t vy${ABC[N:N+4]} = wasm_v128_bitselect(vrndx${ABC[N:N+4]}, vx${ABC[N:N+4]}, vrndmask${ABC[N:N+4]});
wasm_v128_store(y, vy${ABC[0:4]});
$for N in range(4, BATCH_TILE, 4):
wasm_v128_store(y + ${N}, vy${ABC[N:N+4]});
y += ${BATCH_TILE};
}
for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
const v128_t vx = wasm_v128_load(x);
x += 4;
const v128_t vintx = wasm_i32x4_trunc_saturate_f32x4(vx);
const v128_t vabsx = wasm_f32x4_abs(vx);
const v128_t vrndx = wasm_f32x4_convert_i32x4(vintx);
const v128_t vrndmask = wasm_v128_andnot(wasm_f32x4_lt(vabsx, vmagic_number), vsign_mask);
const v128_t vy = wasm_v128_bitselect(vrndx, vx, vrndmask);
wasm_v128_store(y, vy);
y += 4;
}
if XNN_UNLIKELY(n != 0) {
const v128_t vx = wasm_v128_load(x);
const v128_t vintx = wasm_i32x4_trunc_saturate_f32x4(vx);
const v128_t vabsx = wasm_f32x4_abs(vx);
const v128_t vrndx = wasm_f32x4_convert_i32x4(vintx);
const v128_t vrndmask = wasm_v128_andnot(wasm_f32x4_lt(vabsx, vmagic_number), vsign_mask);
v128_t vy = wasm_v128_bitselect(vrndx, vx, vrndmask);
if (n & (2 * sizeof(float))) {
*((double*) y) = wasm_f64x2_extract_lane(vy, 0);
vy = wasm_v32x4_shuffle(vy, vy, 2, 3, 2, 3);
y += 2;
}
if (n & (1 * sizeof(float))) {
*y = wasm_f32x4_extract_lane(vy, 0);
}
}
}