blob: 7978aa0ec9252aeeef4a33ba1cc77ffff39ed686 [file] [log] [blame]
/*===---- arm_neon.h - ARM Neon intrinsics ---------------------------------===
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*
*===-----------------------------------------------------------------------===
*/
#ifndef __ARM_NEON_H
#define __ARM_NEON_H
#if !defined(__ARM_NEON)
#error "NEON support not enabled"
#endif
#include <stdint.h>
typedef float float32_t;
typedef __fp16 float16_t;
#ifdef __aarch64__
typedef double float64_t;
#endif
#ifdef __aarch64__
typedef uint8_t poly8_t;
typedef uint16_t poly16_t;
typedef uint64_t poly64_t;
typedef __uint128_t poly128_t;
#else
typedef int8_t poly8_t;
typedef int16_t poly16_t;
#endif
typedef __attribute__((neon_vector_type(8))) int8_t int8x8_t;
typedef __attribute__((neon_vector_type(16))) int8_t int8x16_t;
typedef __attribute__((neon_vector_type(4))) int16_t int16x4_t;
typedef __attribute__((neon_vector_type(8))) int16_t int16x8_t;
typedef __attribute__((neon_vector_type(2))) int32_t int32x2_t;
typedef __attribute__((neon_vector_type(4))) int32_t int32x4_t;
typedef __attribute__((neon_vector_type(1))) int64_t int64x1_t;
typedef __attribute__((neon_vector_type(2))) int64_t int64x2_t;
typedef __attribute__((neon_vector_type(8))) uint8_t uint8x8_t;
typedef __attribute__((neon_vector_type(16))) uint8_t uint8x16_t;
typedef __attribute__((neon_vector_type(4))) uint16_t uint16x4_t;
typedef __attribute__((neon_vector_type(8))) uint16_t uint16x8_t;
typedef __attribute__((neon_vector_type(2))) uint32_t uint32x2_t;
typedef __attribute__((neon_vector_type(4))) uint32_t uint32x4_t;
typedef __attribute__((neon_vector_type(1))) uint64_t uint64x1_t;
typedef __attribute__((neon_vector_type(2))) uint64_t uint64x2_t;
typedef __attribute__((neon_vector_type(4))) float16_t float16x4_t;
typedef __attribute__((neon_vector_type(8))) float16_t float16x8_t;
typedef __attribute__((neon_vector_type(2))) float32_t float32x2_t;
typedef __attribute__((neon_vector_type(4))) float32_t float32x4_t;
#ifdef __aarch64__
typedef __attribute__((neon_vector_type(1))) float64_t float64x1_t;
typedef __attribute__((neon_vector_type(2))) float64_t float64x2_t;
#endif
typedef __attribute__((neon_polyvector_type(8))) poly8_t poly8x8_t;
typedef __attribute__((neon_polyvector_type(16))) poly8_t poly8x16_t;
typedef __attribute__((neon_polyvector_type(4))) poly16_t poly16x4_t;
typedef __attribute__((neon_polyvector_type(8))) poly16_t poly16x8_t;
#ifdef __aarch64__
typedef __attribute__((neon_polyvector_type(1))) poly64_t poly64x1_t;
typedef __attribute__((neon_polyvector_type(2))) poly64_t poly64x2_t;
#endif
typedef struct int8x8x2_t {
int8x8_t val[2];
} int8x8x2_t;
typedef struct int8x16x2_t {
int8x16_t val[2];
} int8x16x2_t;
typedef struct int16x4x2_t {
int16x4_t val[2];
} int16x4x2_t;
typedef struct int16x8x2_t {
int16x8_t val[2];
} int16x8x2_t;
typedef struct int32x2x2_t {
int32x2_t val[2];
} int32x2x2_t;
typedef struct int32x4x2_t {
int32x4_t val[2];
} int32x4x2_t;
typedef struct int64x1x2_t {
int64x1_t val[2];
} int64x1x2_t;
typedef struct int64x2x2_t {
int64x2_t val[2];
} int64x2x2_t;
typedef struct uint8x8x2_t {
uint8x8_t val[2];
} uint8x8x2_t;
typedef struct uint8x16x2_t {
uint8x16_t val[2];
} uint8x16x2_t;
typedef struct uint16x4x2_t {
uint16x4_t val[2];
} uint16x4x2_t;
typedef struct uint16x8x2_t {
uint16x8_t val[2];
} uint16x8x2_t;
typedef struct uint32x2x2_t {
uint32x2_t val[2];
} uint32x2x2_t;
typedef struct uint32x4x2_t {
uint32x4_t val[2];
} uint32x4x2_t;
typedef struct uint64x1x2_t {
uint64x1_t val[2];
} uint64x1x2_t;
typedef struct uint64x2x2_t {
uint64x2_t val[2];
} uint64x2x2_t;
typedef struct float16x4x2_t {
float16x4_t val[2];
} float16x4x2_t;
typedef struct float16x8x2_t {
float16x8_t val[2];
} float16x8x2_t;
typedef struct float32x2x2_t {
float32x2_t val[2];
} float32x2x2_t;
typedef struct float32x4x2_t {
float32x4_t val[2];
} float32x4x2_t;
#ifdef __aarch64__
typedef struct float64x1x2_t {
float64x1_t val[2];
} float64x1x2_t;
typedef struct float64x2x2_t {
float64x2_t val[2];
} float64x2x2_t;
#endif
typedef struct poly8x8x2_t {
poly8x8_t val[2];
} poly8x8x2_t;
typedef struct poly8x16x2_t {
poly8x16_t val[2];
} poly8x16x2_t;
typedef struct poly16x4x2_t {
poly16x4_t val[2];
} poly16x4x2_t;
typedef struct poly16x8x2_t {
poly16x8_t val[2];
} poly16x8x2_t;
#ifdef __aarch64__
typedef struct poly64x1x2_t {
poly64x1_t val[2];
} poly64x1x2_t;
typedef struct poly64x2x2_t {
poly64x2_t val[2];
} poly64x2x2_t;
#endif
typedef struct int8x8x3_t {
int8x8_t val[3];
} int8x8x3_t;
typedef struct int8x16x3_t {
int8x16_t val[3];
} int8x16x3_t;
typedef struct int16x4x3_t {
int16x4_t val[3];
} int16x4x3_t;
typedef struct int16x8x3_t {
int16x8_t val[3];
} int16x8x3_t;
typedef struct int32x2x3_t {
int32x2_t val[3];
} int32x2x3_t;
typedef struct int32x4x3_t {
int32x4_t val[3];
} int32x4x3_t;
typedef struct int64x1x3_t {
int64x1_t val[3];
} int64x1x3_t;
typedef struct int64x2x3_t {
int64x2_t val[3];
} int64x2x3_t;
typedef struct uint8x8x3_t {
uint8x8_t val[3];
} uint8x8x3_t;
typedef struct uint8x16x3_t {
uint8x16_t val[3];
} uint8x16x3_t;
typedef struct uint16x4x3_t {
uint16x4_t val[3];
} uint16x4x3_t;
typedef struct uint16x8x3_t {
uint16x8_t val[3];
} uint16x8x3_t;
typedef struct uint32x2x3_t {
uint32x2_t val[3];
} uint32x2x3_t;
typedef struct uint32x4x3_t {
uint32x4_t val[3];
} uint32x4x3_t;
typedef struct uint64x1x3_t {
uint64x1_t val[3];
} uint64x1x3_t;
typedef struct uint64x2x3_t {
uint64x2_t val[3];
} uint64x2x3_t;
typedef struct float16x4x3_t {
float16x4_t val[3];
} float16x4x3_t;
typedef struct float16x8x3_t {
float16x8_t val[3];
} float16x8x3_t;
typedef struct float32x2x3_t {
float32x2_t val[3];
} float32x2x3_t;
typedef struct float32x4x3_t {
float32x4_t val[3];
} float32x4x3_t;
#ifdef __aarch64__
typedef struct float64x1x3_t {
float64x1_t val[3];
} float64x1x3_t;
typedef struct float64x2x3_t {
float64x2_t val[3];
} float64x2x3_t;
#endif
typedef struct poly8x8x3_t {
poly8x8_t val[3];
} poly8x8x3_t;
typedef struct poly8x16x3_t {
poly8x16_t val[3];
} poly8x16x3_t;
typedef struct poly16x4x3_t {
poly16x4_t val[3];
} poly16x4x3_t;
typedef struct poly16x8x3_t {
poly16x8_t val[3];
} poly16x8x3_t;
#ifdef __aarch64__
typedef struct poly64x1x3_t {
poly64x1_t val[3];
} poly64x1x3_t;
typedef struct poly64x2x3_t {
poly64x2_t val[3];
} poly64x2x3_t;
#endif
typedef struct int8x8x4_t {
int8x8_t val[4];
} int8x8x4_t;
typedef struct int8x16x4_t {
int8x16_t val[4];
} int8x16x4_t;
typedef struct int16x4x4_t {
int16x4_t val[4];
} int16x4x4_t;
typedef struct int16x8x4_t {
int16x8_t val[4];
} int16x8x4_t;
typedef struct int32x2x4_t {
int32x2_t val[4];
} int32x2x4_t;
typedef struct int32x4x4_t {
int32x4_t val[4];
} int32x4x4_t;
typedef struct int64x1x4_t {
int64x1_t val[4];
} int64x1x4_t;
typedef struct int64x2x4_t {
int64x2_t val[4];
} int64x2x4_t;
typedef struct uint8x8x4_t {
uint8x8_t val[4];
} uint8x8x4_t;
typedef struct uint8x16x4_t {
uint8x16_t val[4];
} uint8x16x4_t;
typedef struct uint16x4x4_t {
uint16x4_t val[4];
} uint16x4x4_t;
typedef struct uint16x8x4_t {
uint16x8_t val[4];
} uint16x8x4_t;
typedef struct uint32x2x4_t {
uint32x2_t val[4];
} uint32x2x4_t;
typedef struct uint32x4x4_t {
uint32x4_t val[4];
} uint32x4x4_t;
typedef struct uint64x1x4_t {
uint64x1_t val[4];
} uint64x1x4_t;
typedef struct uint64x2x4_t {
uint64x2_t val[4];
} uint64x2x4_t;
typedef struct float16x4x4_t {
float16x4_t val[4];
} float16x4x4_t;
typedef struct float16x8x4_t {
float16x8_t val[4];
} float16x8x4_t;
typedef struct float32x2x4_t {
float32x2_t val[4];
} float32x2x4_t;
typedef struct float32x4x4_t {
float32x4_t val[4];
} float32x4x4_t;
#ifdef __aarch64__
typedef struct float64x1x4_t {
float64x1_t val[4];
} float64x1x4_t;
typedef struct float64x2x4_t {
float64x2_t val[4];
} float64x2x4_t;
#endif
typedef struct poly8x8x4_t {
poly8x8_t val[4];
} poly8x8x4_t;
typedef struct poly8x16x4_t {
poly8x16_t val[4];
} poly8x16x4_t;
typedef struct poly16x4x4_t {
poly16x4_t val[4];
} poly16x4x4_t;
typedef struct poly16x8x4_t {
poly16x8_t val[4];
} poly16x8x4_t;
#ifdef __aarch64__
typedef struct poly64x1x4_t {
poly64x1_t val[4];
} poly64x1x4_t;
typedef struct poly64x2x4_t {
poly64x2_t val[4];
} poly64x2x4_t;
#endif
#define __ai static inline __attribute__((__always_inline__, __nodebug__))
#ifdef __LITTLE_ENDIAN__
__ai uint8x16_t vabdq_u8(uint8x16_t __p0, uint8x16_t __p1) {
uint8x16_t __ret;
__ret = (uint8x16_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
return __ret;
}
#else
__ai uint8x16_t vabdq_u8(uint8x16_t __p0, uint8x16_t __p1) {
uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
uint8x16_t __ret;
__ret = (uint8x16_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
__ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
return __ret;
}
__ai uint8x16_t __noswap_vabdq_u8(uint8x16_t __p0, uint8x16_t __p1) {
uint8x16_t __ret;
__ret = (uint8x16_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
return __ret;
}
#endif
#ifdef __LITTLE_ENDIAN__
__ai uint32x4_t vabdq_u32(uint32x4_t __p0, uint32x4_t __p1) {
uint32x4_t __ret;
__ret = (uint32x4_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
return __ret;
}
#else
__ai uint32x4_t vabdq_u32(uint32x4_t __p0, uint32x4_t __p1) {
uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
uint32x4_t __ret;
__ret = (uint32x4_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
__ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
return __ret;
}
__ai uint32x4_t __noswap_vabdq_u32(uint32x4_t __p0, uint32x4_t __p1) {
uint32x4_t __ret;
__ret = (uint32x4_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
return __ret;
}
#endif
#ifdef __LITTLE_ENDIAN__
__ai uint16x8_t vabdq_u16(uint16x8_t __p0, uint16x8_t __p1) {
uint16x8_t __ret;
__ret = (uint16x8_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
return __ret;
}
#else
__ai uint16x8_t vabdq_u16(uint16x8_t __p0, uint16x8_t __p1) {
uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
uint16x8_t __ret;
__ret = (uint16x8_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
__ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
return __ret;
}
__ai uint16x8_t __noswap_vabdq_u16(uint16x8_t __p0, uint16x8_t __p1) {
uint16x8_t __ret;
__ret = (uint16x8_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
return __ret;
}
#endif
#ifdef __LITTLE_ENDIAN__
__ai int8x16_t vabdq_s8(int8x16_t __p0, int8x16_t __p1) {
int8x16_t __ret;
__ret = (int8x16_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
return __ret;
}
#else
__ai int8x16_t vabdq_s8(int8x16_t __p0, int8x16_t __p1) {
int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
int8x16_t __ret;
__ret = (int8x16_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
__ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
return __ret;
}
__ai int8x16_t __noswap_vabdq_s8(int8x16_t __p0, int8x16_t __p1) {
int8x16_t __ret;
__ret = (int8x16_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
return __ret;
}
#endif
#ifdef __LITTLE_ENDIAN__
__ai float32x4_t vabdq_f32(float32x4_t __p0, float32x4_t __p1) {
float32x4_t __ret;
__ret = (float32x4_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
return __ret;
}
#else
__ai float32x4_t vabdq_f32(float32x4_t __p0, float32x4_t __p1) {
float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
float32x4_t __ret;
__ret = (float32x4_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
__ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
return __ret;
}
#endif
#ifdef __LITTLE_ENDIAN__
__ai int32x4_t vabdq_s32(int32x4_t __p0, int32x4_t __p1) {
int32x4_t __ret;
__ret = (int32x4_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
return __ret;
}
#else
__ai int32x4_t vabdq_s32(int32x4_t __p0, int32x4_t __p1) {
int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
int32x4_t __ret;
__ret = (int32x4_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
__ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
return __ret;
}
__ai int32x4_t __noswap_vabdq_s32(int32x4_t __p0, int32x4_t __p1) {
int32x4_t __ret;
__ret = (int32x4_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
return __ret;
}
#endif
#ifdef __LITTLE_ENDIAN__
__ai int16x8_t vabdq_s16(int16x8_t __p0, int16x8_t __p1) {
int16x8_t __ret;
__ret = (int16x8_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
return __ret;
}
#else
__ai int16x8_t vabdq_s16(int16x8_t __p0, int16x8_t __p1) {
int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
int16x8_t __ret;
__ret = (int16x8_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
__ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
return __ret;
}
__ai int16x8_t __noswap_vabdq_s16(int16x8_t __p0, int16x8_t __p1) {
int16x8_t __ret;
__ret = (int16x8_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
return __ret;
}
#endif
#ifdef __LITTLE_ENDIAN__
__ai uint8x8_t vabd_u8(uint8x8_t __p0, uint8x8_t __p1) {
uint8x8_t __ret;
__ret = (uint8x8_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
return __ret;
}
#else
__ai uint8x8_t vabd_u8(uint8x8_t __p0, uint8x8_t __p1) {
uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
uint8x8_t __ret;
__ret = (uint8x8_t) __builtin_neon_vabd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
__ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
return __ret;
}
__ai uint8x8_t __noswap_vabd_u8(uint8x8_t __p0, uint8x8_t __p1) {
uint8x8_t __ret;
__ret = (uint8x8_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
return __ret;
}
#endif
#ifdef __LITTLE_ENDIAN__
__ai uint32x2_t vabd_u32(uint32x2_t __p0, uint32x2_t __p1) {
uint32x2_t __ret;
__ret = (uint32x2_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
return __ret;
}
#else
__ai uint32x2_t vabd_u32(uint32x2_t __p0, uint32x2_t __p1) {
uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
uint32x2_t __ret;
__ret = (uint32x2_t) __builtin_neon_vabd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
__ret = __builtin_shufflevector(__ret, __ret, 1, 0);
return __ret;
}
__ai uint32x2_t __noswap_vabd_u32(uint32x2_t __p0, uint32x2_t __p1) {
uint32x2_t __ret;
__ret = (uint32x2_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
return __ret;
}
#endif
#ifdef __LITTLE_ENDIAN__
__ai uint16x4_t vabd_u16(uint16x4_t __p0, uint16x4_t __p1) {
uint16x4_t __ret;
__ret = (uint16x4_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
return __ret;
}
#else
__ai uint16x4_t vabd_u16(uint16x4_t __p0, uint16x4_t __p1) {
uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
uint16x4_t __ret;
__ret = (uint16x4_t) __builtin_neon_vabd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
__ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
return __ret;
}
__ai uint16x4_t __noswap_vabd_u16(uint16x4_t __p0, uint16x4_t __p1) {
uint16x4_t __ret;
__ret = (uint16x4_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
return __ret;
}
#endif
#ifdef __LITTLE_ENDIAN__
__ai int8x8_t vabd_s8(int8x8_t __p0, int8x8_t __p1) {
int8x8_t __ret;
__ret = (int8x8_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
return __ret;
}
#else
__ai int8x8_t vabd_s8(int8x8_t __p0, int8x8_t __p1) {
int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
int8x8_t __ret;
__ret = (int8x8_t) __builtin_neon_vabd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
__ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
return __ret;
}
__ai int8x8_t __noswap_vabd_s8(int8x8_t __p0, int8x8_t __p1) {
int8x8_t __ret;
__ret = (int8x8_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
return __ret;
}
#endif
#ifdef __LITTLE_ENDIAN__
__ai float32x2_t vabd_f32(float32x2_t __p0, float32x2_t __p1) {
float32x2_t __ret;
__ret = (float32x2_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 9);
return __ret;
}
#else
__ai float32x2_t vabd_f32(float32x2_t __p0, float32x2_t __p1) {
float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
float32x2_t __ret;
__ret = (float32x2_t) __builtin_neon_vabd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
__ret = __builtin_shufflevector(__ret, __ret, 1, 0);
return __ret;
}
#endif
#ifdef __LITTLE_ENDIAN__
__ai int32x2_t vabd_s32(int32x2_t __p0, int32x2_t __p1) {
int32x2_t __ret;
__ret = (int32x2_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
return __ret;
}
#else
__ai int32x2_t vabd_s32(int32x2_t __p0, int32x2_t __p1) {
int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
int32x2_t __ret;
__ret = (int32x2_t) __builtin_neon_vabd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
__ret = __builtin_shufflevector(__ret, __ret, 1, 0);
return __ret;
}
__ai int32x2_t __noswap_vabd_s32(int32x2_t __p0, int32x2_t __p1) {
int32x2_t __ret;
__ret = (int32x2_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
return __ret;
}
#endif
#ifdef __LITTLE_ENDIAN__
__ai int16x4_t vabd_s16(int16x4_t __p0, int16x4_t __p1) {
int16x4_t __ret;
__ret = (int16x4_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
return __ret;
}
#else
__ai int16x4_t vabd_s16(int16x4_t __p0, int16x4_t __p1) {
int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
int16x4_t __ret;
__ret = (int16x4_t) __builtin_neon_vabd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
__ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
return __ret;
}
__ai int16x4_t __noswap_vabd_s16(int16x4_t __p0, int16x4_t __p1) {
int16x4_t __ret;
__ret = (int16x4_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
return __ret;
}
#endif
#ifdef __LITTLE_ENDIAN__
__ai int8x16_t vabsq_s8(int8x16_t __p0) {
int8x16_t __ret;
__ret = (int8x16_t) __builtin_neon_vabsq_v((int8x16_t)__p0, 32);
return __ret;
}
#else
__ai int8x16_t vabsq_s8(int8x16_t __p0) {
int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
int8x16_t __ret;
__ret = (int8x16_t) __builtin_neon_vabsq_v((int8x16_t)__rev0, 32);
__ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
return __ret;
}
#endif
#ifdef __LITTLE_ENDIAN__
__ai float32x4_t vabsq_f32(float32x4_t __p0) {
float32x4_t __ret;
__ret = (float32x4_t) __builtin_neon_vabsq_v((int8x16_t)__p0, 41);
return __ret;
}
#else
__ai float32x4_t vabsq_f32(float32x4_t __p0) {
float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
float32x4_t __ret;
__ret = (float32x4_t) __builtin_neon_vabsq_v((int8x16_t)__rev0, 41);
__ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
return __ret;
}
#endif
#ifdef __LITTLE_ENDIAN__
__ai int32x4_t vabsq_s32(int32x4_t __p0) {
int32x4_t __ret;
__ret = (int32x4_t) __builtin_neon_vabsq_v((int8x16_t)__p0, 34);
return __ret;
}
#else
__ai int32x4_t vabsq_s32(int32x4_t __p0) {
int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
int32x4_t __ret;
__ret = (int32x4_t) __builtin_neon_vabsq_v((int8x16_t)__rev0, 34);
__ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
return __ret;
}
#endif
#ifdef __LITTLE_ENDIAN__
__ai int16x8_t vabsq_s16(int16x8_t __p0) {
int16x8_t __ret;
__ret = (int16x8_t) __builtin_neon_vabsq_v((int8x16_t)__p0, 33);
return __ret;
}
#else
__ai int16x8_t vabsq_s16(int16x8_t __p0) {
int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
int16x8_t __ret;
__ret = (int16x8_t) __builtin_neon_vabsq_v((int8x16_t)__rev0, 33);
__ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
return __ret;
}
#endif
#ifdef __LITTLE_ENDIAN__
__ai int8x8_t vabs_s8(int8x8_t __p0) {
int8x8_t __ret;
__ret = (int8x8_t) __builtin_neon_vabs_v((int8x8_t)__p0, 0);
return __ret;
}
#else
__ai int8x8_t vabs_s8(int8x8_t __p0) {
int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
int8x8_t __ret;
__ret = (int8x8_t) __builtin_neon_vabs_v((int8x8_t)__rev0, 0);
__ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
return __ret;
}
#endif
#ifdef __LITTLE_ENDIAN__
__ai float32x2_t vabs_f32(float32x2_t __p0) {
float32x2_t __ret;
__ret = (float32x2_t) __builtin_neon_vabs_v((int8x8_t)__p0, 9);
return __ret;
}
#else
__ai float32x2_t vabs_f32(float32x2_t __p0) {
float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
float32x2_t __ret;
__ret = (float32x2_t) __builtin_neon_vabs_v((int8x8_t)__rev0, 9);
__ret = __builtin_shufflevector(__ret, __ret, 1, 0);
return __ret;
}
#endif
#ifdef __LITTLE_ENDIAN__
__ai int32x2_t vabs_s32(int32x2_t __p0) {
int32x2_t __ret;
__ret = (int32x2_t) __builtin_neon_vabs_v((int8x8_t)__p0, 2);
return __ret;
}
#else
__ai int32x2_t vabs_s32(int32x2_t __p0) {
int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
int32x2_t __ret;
__ret = (int32x2_t) __builtin_neon_vabs_v((int8x8_t)__rev0, 2);
__ret = __builtin_shufflevector(__ret, __ret, 1, 0);
return __ret;
}
#endif
#ifdef __LITTLE_ENDIAN__
__ai int16x4_t vabs_s16(int16x4_t __p0) {
int16x4_t __ret;
__ret = (int16x4_t) __builtin_neon_vabs_v((int8x8_t)__p0, 1);
return __ret;
}
#else
__ai int16x4_t vabs_s16(int16x4_t __p0) {
int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
int16x4_t __ret;
__ret = (int16x4_t) __builtin_neon_vabs_v((int8x8_t)__rev0, 1);
__ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
return __ret;
}
#endif
#ifdef __LITTLE_ENDIAN__
__ai uint8x16_t vaddq_u8(uint8x16_t __p0, uint8x16_t __p1) {
uint8x16_t __ret;
__ret = __p0 + __p1;
return __ret;
}
#else
__ai uint8x16_t vaddq_u8(uint8x16_t __p0, uint8x16_t __p1) {
uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
uint8x16_t __ret;
__ret = __rev0 + __rev1;
__ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
return __ret;
}
#endif
#ifdef __LITTLE_ENDIAN__
__ai uint32x4_t vaddq_u32(uint32x4_t __p0, uint32x4_t __p1) {
uint32x4_t __ret;
__ret = __p0 + __p1;
return __ret;
}
#else
__ai uint32x4_t vaddq_u32(uint32x4_t __p0, uint32x4_t __p1) {
uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
uint32x4_t __ret;
__ret = __rev0 + __rev1;
__ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
return __ret;
}
#endif
#ifdef __LITTLE_ENDIAN__
__ai uint64x2_t vaddq_u64(uint64x2_t __p0, uint64x2_t __p1) {
uint64x2_t __ret;
__ret = __p0 + __p1;
return __ret;
}
#else
__ai uint64x2_t vaddq_u64(uint64x2_t __p0, uint64x2_t __p1) {
uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
uint64x2_t __ret;
__ret = __rev0 + __rev1;
__ret = __builtin_shufflevector(__ret, __ret, 1, 0);
return __ret;
}
#endif
#ifdef __LITTLE_ENDIAN__
__ai uint16x8_t vaddq_u16(uint16x8_t __p0, uint16x8_t __p1) {
uint16x8_t __ret;
__ret = __p0 + __p1;
return __ret;
}
#else
__ai uint16x8_t vaddq_u16(uint16x8_t __p0, uint16x8_t __p1) {
uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
uint16x8_t __ret;
__ret = __rev0 + __rev1;
__ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
return __ret;
}
#endif
#ifdef __LITTLE_ENDIAN__
__ai int8x16_t vaddq_s8(int8x16_t __p0, int8x16_t __p1) {
int8x16_t __ret;
__ret = __p0 + __p1;
return __ret;
}
#else
__ai int8x16_t vaddq_s8(int8x16_t __p0, int8x16_t __p1) {
int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
int8x16_t __ret;
__ret = __rev0 + __rev1;
__ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
return __ret;
}
#endif
#ifdef __LITTLE_ENDIAN__
__ai float32x4_t vaddq_f32(float32x4_t __p0, float32x4_t __p1) {
float32x4_t __ret;
__ret = __p0 + __p1;
return __ret;
}
#else
__ai float32x4_t vaddq_f32(float32x4_t __p0, float32x4_t __p1) {
float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
float32x4_t __ret;
__ret = __rev0 + __rev1;
__ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
return __ret;
}
#endif
#ifdef __LITTLE_ENDIAN__
__ai int32x4_t vaddq_s32(int32x4_t __p0, int32x4_t __p1) {
int32x4_t __ret;
__ret = __p0 + __p1;
return __ret;
}
#else
__ai int32x4_t vaddq_s32(int32x4_t __p0, int32x4_t __p1) {
int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
int32x4_t __ret;
__ret = __rev0 + __rev1;
__ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
return __ret;
}
#endif
#ifdef __LITTLE_ENDIAN__
__ai int64x2_t vaddq_s64(int64x2_t __p0, int64x2_t __p1) {
int64x2_t __ret;
__ret = __p0 + __p1;
return __ret;
}
#else
__ai int64x2_t vaddq_s64(int64x2_t __p0, int64x2_t __p1) {
int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
int64x2_t __ret;
__ret = __rev0 + __rev1;
__ret = __builtin_shufflevector(__ret, __ret, 1, 0);
return __ret;
}
#endif
#ifdef __LITTLE_ENDIAN__
__ai int16x8_t vaddq_s16(int16x8_t __p0, int16x8_t __p1) {
int16x8_t __ret;
__ret = __p0 + __p1;
return __ret;
}
#else
__ai int16x8_t vaddq_s16(int16x8_t __p0, int16x8_t __p1) {
int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
int16x8_t __ret;
__ret = __rev0 + __rev1;
__ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
return __ret;
}
#endif
#ifdef __LITTLE_ENDIAN__
__ai uint8x8_t vadd_u8(uint8x8_t __p0, uint8x8_t __p1) {
uint8x8_t __ret;
__ret = __p0 + __p1;
return __ret;
}
#else
__ai uint8x8_t vadd_u8(uint8x8_t __p0, uint8x8_t __p1) {
uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
uint8x8_t __ret;
__ret = __rev0 + __rev1;
__ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
return __ret;
}
#endif
#ifdef __LITTLE_ENDIAN__
__ai uint32x2_t vadd_u32(uint32x2_t __p0, uint32x2_t __p1) {
uint32x2_t __ret;
__ret = __p0 + __p1;
return __ret;
}
#else
__ai uint32x2_t vadd_u32(uint32x2_t __p0, uint32x2_t __p1) {
uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
uint32x2_t __ret;
__ret = __rev0 + __rev1;
__ret = __builtin_shufflevector(__ret, __ret, 1, 0);
return __ret;
}
#endif
#ifdef __LITTLE_ENDIAN__
__ai uint64x1_t vadd_u64(uint64x1_t __p0, uint64x1_t __p1) {
uint64x1_t __ret;
__ret = __p0 + __p1;
return __ret;
}
#else
__ai uint64x1_t vadd_u64(uint64x1_t __p0, uint64x1_t __p1) {
uint64x1_t __ret;
__ret = __p0 + __p1;
return __ret;
}
#endif
#ifdef __LITTLE_ENDIAN__
__ai uint16x4_t vadd_u16(uint16x4_t __p0, uint16x4_t __p1) {
uint16x4_t __ret;
__ret = __p0 + __p1;
return __ret;
}
#else
__ai uint16x4_t vadd_u16(uint16x4_t __p0, uint16x4_t __p1) {
uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
uint16x4_t __ret;
__ret = __rev0 + __rev1;
__ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
return __ret;
}
#endif
#ifdef __LITTLE_ENDIAN__
__ai int8x8_t vadd_s8(int8x8_t __p0, int8x8_t __p1) {
int8x8_t __ret;
__ret = __p0 + __p1;
return __ret;
}
#else
__ai int8x8_t vadd_s8(int8x8_t __p0, int8x8_t __p1) {
int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
int8x8_t __ret;
__ret = __rev0 + __rev1;
__ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
return __ret;
}
#endif
#ifdef __LITTLE_ENDIAN__
__ai float32x2_t vadd_f32(float32x2_t __p0, float32x2_t __p1) {
float32x2_t __ret;
__ret = __p0 + __p1;
return __ret;
}
#else
__ai float32x2_t vadd_f32(float32x2_t __p0, float32x2_t __p1) {
float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
float32x2_t __ret;
__ret = __rev0 + __rev1;
__ret = __builtin_shufflevector(__ret, __ret, 1, 0);
return __ret;
}
#endif
#ifdef __LITTLE_ENDIAN__
__ai int32x2_t vadd_s32(int32x2_t __p0, int32x2_t __p1) {
int32x2_t __ret;
__ret = __p0 + __p1;
return __ret;
}
#else
__ai int32x2_t vadd_s32(int32x2_t __p0, int32x2_t __p1) {
int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
int32x2_t __ret;
__ret = __rev0 + __rev1;
__ret = __builtin_shufflevector(__ret, __ret, 1, 0);
return __ret;
}
#endif
#ifdef __LITTLE_ENDIAN__
__ai int64x1_t vadd_s64(int64x1_t __p0, int64x1_t __p1) {
int64x1_t __ret;
__ret = __p0 + __p1;
return __ret;
}
#else
__ai int64x1_t vadd_s64(int64x1_t __p0, int64x1_t __p1) {
int64x1_t __ret;
__ret = __p0 + __p1;
return __ret;
}
#endif
#ifdef __LITTLE_ENDIAN__
__ai int16x4_t vadd_s16(int16x4_t __p0, int16x4_t __p1) {
int16x4_t __ret;
__ret = __p0 + __p1;
return __ret;
}
#else
__ai int16x4_t vadd_s16(int16x4_t __p0, int16x4_t __p1) {
int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
int16x4_t __ret;
__ret = __rev0 + __rev1;
__ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
return __ret;
}
#endif
#ifdef __LITTLE_ENDIAN__
__ai uint16x4_t vaddhn_u32(uint32x4_t __p0, uint32x4_t __p1) {
uint16x4_t __ret;
__ret = (uint16x4_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 17);
return __ret;
}
#else
__ai uint16x4_t vaddhn_u32(uint32x4_t __p0, uint32x4_t __p1) {
uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
uint16x4_t __ret;
__ret = (uint16x4_t) __builtin_neon_vaddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 17);
__ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
return __ret;
}
__ai uint16x4_t __noswap_vaddhn_u32(uint32x4_t __p0, uint32x4_t __p1) {
uint16x4_t __ret;
__ret = (uint16x4_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 17);
return __ret;
}
#endif
#ifdef __LITTLE_ENDIAN__
__ai uint32x2_t vaddhn_u64(uint64x2_t __p0, uint64x2_t __p1) {
uint32x2_t __ret;
__ret = (uint32x2_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 18);
return __ret;
}
#else
__ai uint32x2_t vaddhn_u64(uint64x2_t __p0, uint64x2_t __p1) {
uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
uint32x2_t __ret;
__ret = (uint32x2_t) __builtin_neon_vaddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 18);
__ret = __builtin_shufflevector(__ret, __ret, 1, 0);
return __ret;
}
__ai uint32x2_t __noswap_vaddhn_u64(uint64x2_t __p0, uint64x2_t __p1) {
uint32x2_t __ret;
__ret = (uint32x2_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 18);
return __ret;
}
#endif
#ifdef __LITTLE_ENDIAN__
__ai uint8x8_t vaddhn_u16(uint16x8_t __p0, uint16x8_t __p1) {
uint8x8_t __ret;
__ret = (uint8x8_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 16);
return __ret;
}
#else
__ai uint8x8_t vaddhn_u16(uint16x8_t __p0, uint16x8_t __p1) {
uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
uint8x8_t __ret;
__ret = (uint8x8_t) __builtin_neon_vaddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 16);
__ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
return __ret;
}
__ai uint8x8_t __noswap_vaddhn_u16(uint16x8_t __p0, uint16x8_t __p1) {
uint8x8_t __ret;
__ret = (uint8x8_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 16);
return __ret;
}
#endif
#ifdef __LITTLE_ENDIAN__
__ai int16x4_t vaddhn_s32(int32x4_t __p0, int32x4_t __p1) {
int16x4_t __ret;
__ret = (int16x4_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 1);
return __ret;
}
#else
__ai int16x4_t vaddhn_s32(int32x4_t __p0, int32x4_t __p1) {
int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
int16x4_t __ret;
__ret = (int16x4_t) __builtin_neon_vaddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 1);
__ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
return __ret;
}
__ai int16x4_t __noswap_vaddhn_s32(int32x4_t __p0, int32x4_t __p1) {
int16x4_t __ret;
__ret = (int16x4_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 1);
return __ret;
}
#endif
#ifdef __LITTLE_ENDIAN__
__ai int32x2_t vaddhn_s64(int64x2_t __p0, int64x2_t __p1) {
int32x2_t __ret;
__ret = (int32x2_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 2);
return __ret;
}
#else
__ai int32x2_t vaddhn_s64(int64x2_t __p0, int64x2_t __p1) {
int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
int32x2_t __ret;
__ret = (int32x2_t) __builtin_neon_vaddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 2);
__ret = __builtin_shufflevector(__ret, __ret, 1, 0);
return __ret;
}
__ai int32x2_t __noswap_vaddhn_s64(int64x2_t __p0, int64x2_t __p1) {
int32x2_t __ret;
__ret = (int32x2_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 2);
return __ret;
}
#endif
#ifdef __LITTLE_ENDIAN__
__ai int8x8_t vaddhn_s16(int16x8_t __p0, int16x8_t __p1) {
int8x8_t __ret;
__ret = (int8x8_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 0);
return __ret;
}
#else
__ai int8x8_t vaddhn_s16(int16x8_t __p0, int16x8_t __p1) {
int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
int8x8_t __ret;
__ret = (int8x8_t) __builtin_neon_vaddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 0);
__ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
return __ret;
}
__ai int8x8_t __noswap_vaddhn_s16(int16x8_t __p0, int16x8_t __p1) {
int8x8_t __ret;
__ret = (int8x8_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 0);
return __ret;
}
#endif
#ifdef __LITTLE_ENDIAN__
__ai uint8x16_t vandq_u8(uint8x16_t __p0, uint8x16_t __p1) {
uint8x16_t __ret;
__ret = __p0 & __p1;
return __ret;
}
#else
__ai uint8x16_t vandq_u8(uint8x16_t __p0, uint8x16_t __p1) {
uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
uint8x16_t __ret;
__ret = __rev0 & __rev1;
__ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
return __ret;
}
#endif
#ifdef __LITTLE_ENDIAN__
__ai uint32x4_t vandq_u32(uint32x4_t __p0, uint32x4_t __p1) {
uint32x4_t __ret;
__ret = __p0 & __p1;
return __ret;
}
#else
__ai uint32x4_t vandq_u32(uint32x4_t __p0, uint32x4_t __p1) {
uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
uint32x4_t __ret;
__ret = __rev0 & __rev1;
__ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
return __ret;
}
#endif
#ifdef __LITTLE_ENDIAN__
__ai uint64x2_t vandq_u64(uint64x2_t __p0, uint64x2_t __p1) {
uint64x2_t __ret;
__ret = __p0 & __p1;
return __ret;
}
#else
__ai uint64x2_t vandq_u64(uint64x2_t __p0, uint64x2_t __p1) {
uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
uint64x2_t __ret;
__ret = __rev0 & __rev1;
__ret = __builtin_shufflevector(__ret, __ret, 1, 0);
return __ret;
}
#endif
#ifdef __LITTLE_ENDIAN__
__ai uint16x8_t vandq_u16(uint16x8_t __p0, uint16x8_t __p1) {
uint16x8_t __ret;
__ret = __p0 & __p1;
return __ret;
}
#else
__ai uint16x8_t vandq_u16(uint16x8_t __p0, uint16x8_t __p1) {
uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
uint16x8_t __ret;
__ret = __rev0 & __rev1;
__ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
return __ret;
}
#endif
#ifdef __LITTLE_ENDIAN__
__ai int8x16_t vandq_s8(int8x16_t __p0, int8x16_t __p1) {
int8x16_t __ret;
__ret = __p0 & __p1;
return __ret;
}
#else
__ai int8x16_t vandq_s8(int8x16_t __p0, int8x16_t __p1) {
int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
int8x16_t __ret;
__ret = __rev0 & __rev1;
__ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
return __ret;
}
#endif
#ifdef __LITTLE_ENDIAN__
__ai int32x4_t vandq_s32(int32x4_t __p0, int32x4_t __p1) {
int32x4_t __ret;
__ret = __p0 & __p1;
return __ret;
}
#else
__ai int32x4_t vandq_s32(int32x4_t __p0, int32x4_t __p1) {
int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
int32x4_t __ret;
__ret = __rev0 & __rev1;
__ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
return __ret;
}
#endif
#ifdef __LITTLE_ENDIAN__
__ai int64x2_t vandq_s64(int64x2_t __p0, int64x2_t __p1) {
int64x2_t __ret;
__ret = __p0 & __p1;
return __ret;
}
#else
__ai int64x2_t vandq_s64(int64x2_t __p0, int64x2_t __p1) {
int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
int64x2_t __ret;
__ret = __rev0 & __rev1;
__ret = __builtin_shufflevector(__ret, __ret, 1, 0);
return __ret;
}
#endif
#ifdef __LITTLE_ENDIAN__
__ai int16x8_t vandq_s16(int16x8_t __p0, int16x8_t __p1) {
int16x8_t __ret;
__ret = __p0 & __p1;
return __ret;
}
#else
__ai int16x8_t vandq_s16(int16x8_t __p0, int16x8_t __p1) {
int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
int16x8_t __ret;
__ret = __rev0 & __rev1;
__ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
return __ret;
}
#endif
#ifdef __LITTLE_ENDIAN__
__ai uint8x8_t vand_u8(uint8x8_t __p0, uint8x8_t __p1) {
uint8x8_t __ret;
__ret = __p0 & __p1;
return __ret;
}
#else
__ai uint8x8_t vand_u8(uint8x8_t __p0, uint8x8_t __p1) {
uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
uint8x8_t __ret;
__ret = __rev0 & __rev1;
__ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
return __ret;
}
#endif
#ifdef __LITTLE_ENDIAN__
__ai uint32x2_t vand_u32(uint32x2_t __p0, uint32x2_t __p1) {
uint32x2_t __ret;
__ret = __p0 & __p1;
return __ret;
}
#else
__ai uint32x2_t vand_u32(uint32x2_t __p0, uint32x2_t __p1) {
uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
uint32x2_t __ret;
__ret = __rev0 & __rev1;
__ret = __builtin_shufflevector(__ret, __ret, 1, 0);
return __ret;
}
#endif
#ifdef __LITTLE_ENDIAN__
__ai uint64x1_t vand_u64(uint64x1_t __p0, uint64x1_t __p1) {
uint64x1_t __ret;
__ret = __p0 & __p1;
return __ret;
}
#else
__ai uint64x1_t vand_u64(uint64x1_t __p0, uint64x1_t __p1) {
uint64x1_t __ret;
__ret = __p0 & __p1;
return __ret;
}
#endif
#ifdef __LITTLE_ENDIAN__
__ai uint16x4_t vand_u16(uint16x4_t __p0, uint16x4_t __p1) {
uint16x4_t __ret;
__ret = __p0 & __p1;
return __ret;
}
#else
__ai uint16x4_t vand_u16(uint16x4_t __p0, uint16x4_t __p1) {
uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
uint16x4_t __ret;
__ret = __rev0 & __rev1;
__ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
return __ret;
}
#endif
#ifdef __LITTLE_ENDIAN__
__ai int8x8_t vand_s8(int8x8_t __p0, int8x8_t __p1) {
int8x8_t __ret;
__ret = __p0 & __p1;
return __ret;
}
#else
__ai int8x8_t vand_s8(int8x8_t __p0, int8x8_t __p1) {
int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
int8x8_t __ret;
__ret = __rev0 & __rev1;
__ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
return __ret;
}
#endif
#ifdef __LITTLE_ENDIAN__
__ai int32x2_t vand_s32(int32x2_t __p0, int32x2_t __p1) {
int32x2_t __ret;
__ret = __p0 & __p1;
return __ret;
}
#else
__ai int32x2_t vand_s32(int32x2_t __p0, int32x2_t __p1) {
int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
int32x2_t __ret;
__ret = __rev0 & __rev1;
__ret = __builtin_shufflevector(__ret, __ret, 1, 0);
return __ret;
}
#endif
#ifdef __LITTLE_ENDIAN__
__ai int64x1_t vand_s64(int64x1_t __p0, int64x1_t __p1) {
int64x1_t __ret;
__ret = __p0 & __p1;
return __ret;
}
#else
__ai int64x1_t vand_s64(int64x1_t __p0, int64x1_t __p1) {
int64x1_t __ret;
__ret = __p0 & __p1;
return __ret;
}
#endif
#ifdef __LITTLE_ENDIAN__
__ai int16x4_t vand_s16(int16x4_t __p0, int16x4_t __p1) {
int16x4_t __ret;
__ret = __p0 & __p1;
return __ret;
}
#else
__ai int16x4_t vand_s16(int16x4_t __p0, int16x4_t __p1) {
int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
int16x4_t __ret;
__ret = __rev0 & __rev1;
__ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
return __ret;
}
#endif
#ifdef __LITTLE_ENDIAN__
__ai uint8x16_t vbicq_u8(uint8x16_t __p0, uint8x16_t __p1) {
uint8x16_t __ret;
__ret = __p0 & ~__p1;
return __ret;
}
#else
__ai uint8x16_t vbicq_u8(uint8x16_t __p0, uint8x16_t __p1) {
uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
uint8x16_t __ret;
__ret = __rev0 & ~__rev1;
__ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
return __ret;
}
#endif
#ifdef __LITTLE_ENDIAN__
__ai uint32x4_t vbicq_u32(uint32x4_t __p0, uint32x4_t __p1) {
uint32x4_t __ret;
__ret = __p0 & ~__p1;
return __ret;
}
#else
__ai uint32x4_t vbicq_u32(uint32x4_t __p0, uint32x4_t __p1) {
uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
uint32x4_t __ret;
__ret = __rev0 & ~__rev1;
__ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
return __ret;
}
#endif
#ifdef __LITTLE_ENDIAN__
__ai uint64x2_t vbicq_u64(uint64x2_t __p0, uint64x2_t __p1) {
uint64x2_t __ret;
__ret = __p0 & ~__p1;
return __ret;
}
#else
__ai uint64x2_t vbicq_u64(uint64x2_t __p0, uint64x2_t __p1) {
uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
uint64x2_t __ret;
__ret = __rev0 & ~__rev1;
__ret = __builtin_shufflevector(__ret, __ret, 1, 0);
return __ret;
}
#endif
#ifdef __LITTLE_ENDIAN__
__ai uint16x8_t vbicq_u16(uint16x8_t __p0, uint16x8_t __p1) {
uint16x8_t __ret;
__ret = __p0 & ~__p1;
return __ret;
}
#else
__ai uint16x8_t vbicq_u16(uint16x8_t __p0, uint16x8_t __p1) {
uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
uint16x8_t __ret;
__ret = __rev0 & ~__rev1;
__ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
return __ret;
}
#endif
#ifdef __LITTLE_ENDIAN__
__ai int8x16_t vbicq_s8(int8x16_t __p0, int8x16_t __p1) {
int8x16_t __ret;
__ret = __p0 & ~__p1;
return __ret;
}
#else
__ai int8x16_t vbicq_s8(int8x16_t __p0, int8x16_t __p1) {
int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
int8x16_t __ret;
__ret = __rev0 & ~__rev1;
__ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
return __ret;
}
#endif
#ifdef __LITTLE_ENDIAN__
__ai int32x4_t vbicq_s32(int32x4_t __p0, int32x4_t __p1) {
int32x4_t __ret;
__ret = __p0 & ~__p1;
return __ret;
}
#else
__ai int32x4_t vbicq_s32(int32x4_t __p0, int32x4_t __p1) {
int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
int32x4_t __ret;
__ret = __rev0 & ~__rev1;
__ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
return __ret;
}
#endif
#ifdef __LITTLE_ENDIAN__
__ai int64x2_t vbicq_s64(int64x2_t __p0, int64x2_t __p1) {
int64x2_t __ret;
__ret = __p0 & ~__p1;
return __ret;
}
#else
__ai int64x2_t vbicq_s64(int64x2_t __p0, int64x2_t __p1) {
int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
int64x2_t __ret;
__ret = __rev0 & ~__rev1;
__ret = __builtin_shufflevector(__ret, __ret, 1, 0);
return __ret;
}
#endif
#ifdef __LITTLE_ENDIAN__
__ai int16x8_t vbicq_s16(int16x8_t __p0, int16x8_t __p1) {
int16x8_t __ret;
__ret = __p0 & ~__p1;
return __ret;
}
#else
__ai int16x8_t vbicq_s16(int16x8_t __p0, int16x8_t __p1) {
int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
int16x8_t __ret;
__ret = __rev0 & ~__rev1;
__ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
return __ret;
}
#endif
#ifdef __LITTLE_ENDIAN__
__ai uint8x8_t vbic_u8(uint8x8_t __p0, uint8x8_t __p1) {
uint8x8_t __ret;
__ret = __p0 & ~__p1;
return __ret;
}
#else
__ai uint8x8_t vbic_u8(uint8x8_t __p0, uint8x8_t __p1) {
uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
uint8x8_t __ret;
__ret = __rev0 & ~__rev1;
__ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
return __ret;
}
#endif
#ifdef __LITTLE_ENDIAN__
__ai uint32x2_t vbic_u32(uint32x2_t __p0, uint32x2_t __p1) {
uint32x2_t __ret;
__ret = __p0 & ~__p1;
return __ret;
}
#else
__ai uint32x2_t vbic_u32(uint32x2_t __p0, uint32x2_t __p1) {
uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
uint32x2_t __ret;
__ret = __rev0 & ~__rev1;
__ret = __builtin_shufflevector(__ret, __ret, 1, 0);
return __ret;
}
#endif
#ifdef __LITTLE_ENDIAN__
__ai uint64x1_t vbic_u64(uint64x1_t __p0, uint64x1_t __p1) {
uint64x1_t __ret;
__ret = __p0 & ~__p1;
return __ret;
}
#else
__ai uint64x1_t vbic_u64(uint64x1_t __p0, uint64x1_t __p1) {
uint64x1_t __ret;
__ret = __p0 & ~__p1;
return __ret;
}
#endif
#ifdef __LITTLE_ENDIAN__
__ai uint16x4_t vbic_u16(uint16x4_t __p0, uint16x4_t __p1) {
uint16x4_t __ret;
__ret = __p0 & ~__p1;
return __ret;
}
#else
__ai uint16x4_t vbic_u16(uint16x4_t __p0, uint16x4_t __p1) {
uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
uint16x4_t __ret;
__ret = __rev0 & ~__rev1;
__ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
return __ret;
}
#endif
#ifdef __LITTLE_ENDIAN__
__ai int8x8_t vbic_s8(int8x8_t __p0, int8x8_t __p1) {
int8x8_t __ret;
__ret = __p0 & ~__p1;
return __ret;
}
#else
__ai int8x8_t vbic_s8(int8x8_t __p0, int8x8_t __p1) {
int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
int8x8_t __ret;
__ret = __rev0 & ~__rev1;
__ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
return __ret;
}
#endif
#ifdef __LITTLE_ENDIAN__
__ai int32x2_t vbic_s32(int32x2_t __p0, int32x2_t __p1) {
int32x2_t __ret;
__ret = __p0 & ~__p1;
return __ret;
}
#else
__ai int32x2_t vbic_s32(int32x2_t __p0, int32x2_t __p1) {
int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
int32x2_t __ret;
__ret = __rev0 & ~__rev1;
__ret = __builtin_shufflevector(__ret, __ret, 1, 0);
return __ret;
}
#endif
#ifdef __LITTLE_ENDIAN__
__ai int64x1_t vbic_s64(int64x1_t __p0, int64x1_t __p1) {
int64x1_t __ret;
__ret = __p0 & ~__p1;
return __ret;
}
#else
__ai int64x1_t vbic_s64(int64x1_t __p0, int64x1_t __p1) {
int64x1_t __ret;
__ret = __p0 & ~__p1;
return __ret;
}
#endif
#ifdef __LITTLE_ENDIAN__
__ai int16x4_t vbic_s16(int16x4_t __p0, int16x4_t __p1) {
int16x4_t __ret;
__ret = __p0 & ~__p1;
return __ret;
}
#else
__ai int16x4_t vbic_s16(int16x4_t __p0, int16x4_t __p1) {
int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
int16x4_t __ret;
__ret = __rev0 & ~__rev1;
__ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
return __ret;
}
#endif
#ifdef __LITTLE_ENDIAN__
__ai poly8x8_t vbsl_p8(uint8x8_t __p0, poly8x8_t __p1, poly8x8_t __p2) {
poly8x8_t __ret;
__ret = (poly8x8_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 4);
return __ret;
}
#else
__ai poly8x8_t vbsl_p8(uint8x8_t __p0, poly8x8_t __p1, poly8x8_t __p2) {
uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
poly8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
poly8x8_t __ret;
__ret = (poly8x8_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 4);
__ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
return __ret;
}
#endif
#ifdef __LITTLE_ENDIAN__
__ai poly16x4_t vbsl_p16(uint16x4_t __p0, poly16x4_t __p1, poly16x4_t __p2) {
poly16x4_t __ret;
__ret = (poly16x4_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 5);
return __ret;
}
#else
__ai poly16x4_t vbsl_p16(uint16x4_t __p0, poly16x4_t __p1, poly16x4_t __p2) {
uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
poly16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
poly16x4_t __ret;
__ret = (poly16x4_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 5);
__ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
return __ret;
}
#endif
#ifdef __LITTLE_ENDIAN__
__ai poly8x16_t vbslq_p8(uint8x16_t __p0, poly8x16_t __p1, poly8x16_t __p2) {
poly8x16_t __ret;
__ret = (poly8x16_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 36);
return __ret;
}
#else
__ai poly8x16_t vbslq_p8(uint8x16_t __p0, poly8x16_t __p1, poly8x16_t __p2) {
uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
poly8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
poly8x16_t __ret;
__ret = (poly8x16_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 36);
__ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
return __ret;
}
#endif
#ifdef __LITTLE_ENDIAN__
__ai poly16x8_t vbslq_p16(uint16x8_t __p0, poly16x8_t __p1, poly16x8_t __p2) {
poly16x8_t __ret;
__ret = (poly16x8_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 37);
return __ret;
}
#else
__ai poly16x8_t vbslq_p16(uint16x8_t __p0, poly16x8_t __p1, poly16x8_t __p2) {
uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
poly16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
poly16x8_t __ret;
__ret = (poly16x8_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 37);
__ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
return __ret;
}
#endif
#ifdef __LITTLE_ENDIAN__
__ai uint8x16_t vbslq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
uint8x16_t __ret;
__ret = (uint8x16_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 48);
return __ret;
}
#else
__ai uint8x16_t vbslq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
uint8x16_t __ret;
__ret = (uint8x16_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 48);
__ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
return __ret;
}
#endif
#ifdef __LITTLE_ENDIAN__
__ai uint32x4_t vbslq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
uint32x4_t __ret;
__ret = (uint32x4_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50);
return __ret;
}
#else
__ai uint32x4_t vbslq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
uint32x4_t __ret;
__ret = (uint32x4_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50);
__ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
return __ret;
}
#endif
#ifdef __LITTLE_ENDIAN__
__ai uint64x2_t vbslq_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) {
uint64x2_t __ret;
__ret = (uint64x2_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 51);
return __ret;
}
#else
__ai uint64x2_t vbslq_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) {
uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
uint64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
uint64x2_t __ret;
__ret = (uint64x2_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 51);
__ret = __builtin_shufflevector(__ret, __ret, 1, 0);
return __ret;
}
#endif
#ifdef __LITTLE_ENDIAN__
__ai uint16x8_t vbslq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
uint16x8_t __ret;
__ret = (uint16x8_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 49);
return __ret;
}
#else
__ai uint16x8_t vbslq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
uint16x8_t __ret;
__ret = (uint16x8_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 49);
__ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
return __ret;
}
#endif
#ifdef __LITTLE_ENDIAN__
__ai int8x16_t vbslq_s8(uint8x16_t __p0, int8x16_t __p1, int8x16_t __p2) {
int8x16_t __ret;
__ret = (int8x16_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 32);
return __ret;
}
#else
__ai int8x16_t vbslq_s8(uint8x16_t __p0, int8x16_t __p1, int8x16_t __p2) {
uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
int8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
int8x16_t __ret;
__ret = (int8x16_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 32);
__ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
return __ret;
}
#endif
#ifdef __LITTLE_ENDIAN__
__ai float32x4_t vbslq_f32(uint32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
float32x4_t __ret;
__ret = (float32x4_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
return __ret;
}
#else
__ai float32x4_t vbslq_f32(uint32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
float32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
float32x4_t __ret;
__ret = (float32x4_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41);
__ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
return __ret;
}
#endif
#ifdef __LITTLE_ENDIAN__
__ai int32x4_t vbslq_s32(uint32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
int32x4_t __ret;
__ret = (int32x4_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34);
return __ret;
}
#else
__ai int32x4_t vbslq_s32(uint32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
int32x4_t __ret;
__ret = (int32x4_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 34);
__ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
return __ret;
}
#endif
#ifdef __LITTLE_ENDIAN__
__ai int64x2_t vbslq_s64(uint64x2_t __p0, int64x2_t __p1, int64x2_t __p2) {
int64x2_t __ret;
__ret = (int64x2_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 35);
return __ret;
}
#else
__ai int64x2_t vbslq_s64(uint64x2_t __p0, int64x2_t __p1, int64x2_t __p2) {
uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
int64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
int64x2_t __ret;
__ret = (int64x2_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 35);
__ret = __builtin_shufflevector(__ret, __ret, 1, 0);
return __ret;
}
#endif
#ifdef __LITTLE_ENDIAN__
__ai int16x8_t vbslq_s16(uint16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
int16x8_t __ret;
__ret = (int16x8_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 33);
return __ret;
}
#else
__ai int16x8_t vbslq_s16(uint16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
int16x8_t __ret;
__ret = (int16x8_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 33);
__ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
return __ret;
}
#endif
#ifdef __LITTLE_ENDIAN__
__ai uint8x8_t vbsl_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
uint8x8_t __ret;
__ret = (uint8x8_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 16);
return __ret;
}
#else
__ai uint8x8_t vbsl_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
uint8x8_t __ret;
__ret = (uint8x8_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 16);
__ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
return __ret;
}
#endif
#ifdef __LITTLE_ENDIAN__
__ai uint32x2_t vbsl_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
uint32x2_t __ret;
__ret = (uint32x2_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 18);
return __ret;
}
#else
__ai uint32x2_t vbsl_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
uint32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
uint32x2_t __ret;
__ret = (uint32x2_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 18);
__ret = __builtin_shufflevector(__ret, __ret, 1, 0);
return __ret;
}
#endif
#ifdef __LITTLE_ENDIAN__
__ai uint64x1_t vbsl_u64(uint64x1_t __p0, uint64x1_t __p1, uint64x1_t __p2) {
uint64x1_t __ret;
__ret = (uint64x1_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 19);
return __ret;
}
#else
__ai uint64x1_t vbsl_u64(uint64x1_t __p0, uint64x1_t __p1, uint64x1_t __p2) {
uint64x1_t __ret;
__ret = (uint64x1_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 19);
return __ret;
}
#endif
#ifdef __LITTLE_ENDIAN__
__ai uint16x4_t vbsl_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
uint16x4_t __ret;
__ret = (uint16x4_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 17);
return __ret;
}
#else
__ai uint16x4_t vbsl_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
uint16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
uint16x4_t __ret;
__ret = (uint16x4_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 17);
__ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
return __ret;
}
#endif
#ifdef __LITTLE_ENDIAN__
__ai int8x8_t vbsl_s8(uint8x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
int8x8_t __ret;
__ret = (int8x8_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 0);
return __ret;
}
#else
__ai int8x8_t vbsl_s8(uint8x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
int8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
int8x8_t __ret;
__ret = (int8x8_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 0);
__ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
return __ret;
}
#endif
#ifdef __LITTLE_ENDIAN__
__ai float32x2_t vbsl_f32(uint32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
float32x2_t __ret;
__ret = (float32x2_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9);
return __ret;
}
#else
__ai float32x2_t vbsl_f32(uint32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
float32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
float32x2_t __ret;
__ret = (float32x2_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9);
__ret = __builtin_shufflevector(__ret, __ret, 1, 0);
return __ret;
}
#endif
#ifdef __LITTLE_ENDIAN__
__ai int32x2_t vbsl_s32(uint32x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
int32x2_t __ret;
__ret = (int32x2_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 2);
return __ret;
}
#else
__ai int32x2_t vbsl_s32(uint32x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
int32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
int32x2_t __ret;
__ret = (int32x2_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 2);
__ret = __builtin_shufflevector(__ret, __ret, 1, 0);
return __ret;
}
#endif
#ifdef __LITTLE_ENDIAN__
__ai int64x1_t vbsl_s64(uint64x1_t __p0, int64x1_t __p1, int64x1_t __p2) {
int64x1_t __ret;
__ret = (int64x1_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 3);
return __ret;
}
#else
__ai int64x1_t vbsl_s64(uint64x1_t __p0, int64x1_t __p1, int64x1_t __p2) {
int64x1_t __ret;
__ret = (int64x1_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 3);
return __ret;
}
#endif
#ifdef __LITTLE_ENDIAN__
__ai int16x4_t vbsl_s16(uint16x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
int16x4_t __ret;
__ret = (int16x4_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 1);
return __ret;
}
#else
__ai int16x4_t vbsl_s16(uint16x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
int16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
int16x4_t __ret;
__ret = (int16x4_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 1);
__ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
return __ret;
}
#endif
#ifdef __LITTLE_ENDIAN__
__ai uint32x4_t vcageq_f32(float32x4_t __p0, float32x4_t __p1) {
uint32x4_t __ret;
__ret = (uint32x4_t) __builtin_neon_vcageq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);