blob: 4c3787ca8f6e3260be876e14294580e62353baa6 [file] [log] [blame]
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
$assert BATCH_TILE % 4 == 0
$assert BATCH_TILE >= 4
$ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
$assert OP in ["RNDNE", "RNDZ", "RNDU", "RNDD"]
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vunary.h>
$_MM_FROUND_TO_FLAG = {
$ "RNDNE": "_MM_FROUND_TO_NEAREST_INT",
$ "RNDZ": "_MM_FROUND_TO_ZERO",
$ "RNDU": "_MM_FROUND_TO_POS_INF",
$ "RNDD": "_MM_FROUND_TO_NEG_INF",
$}[OP]
void xnn_f32_v${OP.lower()}_ukernel__sse41_x${BATCH_TILE}(
size_t n,
const float* x,
float* y,
const union xnn_f32_rnd_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
{
assert(n != 0);
assert(n % sizeof(float) == 0);
for (; n >= ${BATCH_TILE} * sizeof(float); n -= ${BATCH_TILE} * sizeof(float)) {
const __m128 vx${ABC[0:4]} = _mm_loadu_ps(x);
$for N in range(4, BATCH_TILE, 4):
const __m128 vx${ABC[N:N+4]} = _mm_loadu_ps(x + ${N});
x += ${BATCH_TILE};
$for N in range(0, BATCH_TILE, 4):
const __m128 vy${ABC[N:N+4]} = _mm_round_ps(vx${ABC[N:N+4]}, ${_MM_FROUND_TO_FLAG} | _MM_FROUND_NO_EXC);
_mm_storeu_ps(y, vy${ABC[0:4]});
$for N in range(4, BATCH_TILE, 4):
_mm_storeu_ps(y + ${N}, vy${ABC[N:N+4]});
y += ${BATCH_TILE};
}
$if BATCH_TILE > 4:
for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
const __m128 vx = _mm_loadu_ps(x);
x += 4;
const __m128 vy = _mm_round_ps(vx, ${_MM_FROUND_TO_FLAG} | _MM_FROUND_NO_EXC);
_mm_storeu_ps(y, vy);
y += 4;
}
if XNN_UNLIKELY(n != 0) {
const __m128 vx = _mm_loadu_ps(x);
__m128 vy = _mm_round_ps(vx, ${_MM_FROUND_TO_FLAG} | _MM_FROUND_NO_EXC);
if (n & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) y, vy);
vy = _mm_movehl_ps(vy, vy);
y += 2;
}
if (n & (1 * sizeof(float))) {
_mm_store_ss(y, vy);
}
}
}